content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def sign2(x):
"""Výpočet znaménka ze zadané hodnoty. Výsledkem je hodnota 1 nebo -1."""
if x > 0:
# kladný vstup
return 1
# záporný vstup
return -1 | 0eb935ab13eade49fd045d68cce0dd05dc2ab0b5 | 125,977 |
import pathlib
def check_file_exist_by_pathlib(path: str = ""):
"""
Check if the file exists
:param path: Path to file
:return: Boolean indicating if the file exist
"""
# Create path lib object.
pl = pathlib.Path(path)
# Check whether the path lib exist or not.
ret = pl.exists()
return ret | 87d8d4250a5aaed99943460d1c28c0a6bce8a938 | 125,978 |
def merge_boxes(box1, box2):
"""Merges two boxes."""
(x_min1, x_max1), (y_min1, y_max1), (z_min1, z_max1) = box1
(x_min2, x_max2), (y_min2, y_max2), (z_min2, z_max2) = box2
x_min = min(x_min1, x_min2)
y_min = min(y_min1, y_min2)
z_min = min(z_min1, z_min2)
x_max = max(x_max1, x_max2)
y_max = max(y_max1, y_max2)
z_max = max(z_max1, z_max2)
return ((x_min, x_max), (y_min, y_max), (z_min, z_max)) | 55adbbb0b483b6b2bf1ab06eb9945d1af3661d5f | 125,982 |
def safe_get(dct, *keys, **kwargs):
"""Return the dict value for the given ordered keys.
Args:
dct (dict): the dictionary that will be consulted.
*keys: positional arguments which contains the key path to the
wanted value.
**kwargs:
default -> If any key is missing, default will be returned.
Examples:
>>> my_dict = {"a": {"b": {"c": "my_value"}}}
>>> safe_get(my_dict, "a", "b", "c")
'my_value'
>>> safe_get(my_dict, "a", "z")
>>> safe_get(my_dict, "a", "z", default="my_other_value")
'my_other_value'
>>> my_other_dict = {"a": ["first"]}
>>> safe_get(my_other_dict, "a", 0)
'first'
>>> safe_get(my_other_dict, "a", 1)
>>> safe_get(my_other_dict, "a", 1, default="second")
'second'
Returns:
Any: the dictionary value for the given ordered keys.
"""
default_value = kwargs.get("default")
for key in keys:
try:
dct = dct[key]
except (KeyError, IndexError):
return default_value
return dct | e0ff40087b6b3a6ad0179b56c1c6fe21fdfa6d0d | 125,985 |
def check_samples(sids, proband, mother, father):
"""Check if proband, mother and father exists in vcf
Args:
sids (list): List of sample ids in VCF
proband (str): ID of proband in VCF
mother (str): ID of mother in VCF
father (str): ID of father in VCF
Returns:
bool: if all samples exists in VCF
"""
if not all(elem in sids for elem in [proband, mother, father]):
return False
return True | 163fab0961bdb96e6cb71e67db89e7d7608c172a | 125,991 |
def full_name(user) -> str:
"""Full name of a user."""
return f'{user.first_name}{" " + u if (u := user.last_name) else ""}' | f57c3834332aef61187ede096e53b29d039eb1ef | 125,995 |
import inspect
def invalid_args(func, argdict):
"""
Given a function and a dictionary of arguments, returns a set of arguments
from argdict that aren't accepted by func
"""
args, _, keywords, _, _, _, _ = inspect.getfullargspec(func)
if keywords:
return set() # All accepted
return set(argdict) - set(args) | b6a1fca8cf1b56456d2e0a636ad61dc2c9c89026 | 125,998 |
def lemmatize_word(lm,word):
"""Lemmatizes a word using the nltk library.
Since we don't know the part of speech, this method performs 2 lemmatizations (once as a noun and once as a verb)
The verson of the word that differs from the input word is returned.
This is not always guaranteed to generate a correct answer, but it's good enough for our purposes.
"""
candidateN = lm.lemmatize(word,'n')
candidateV = lm.lemmatize(word,'v')
if candidateN == word:
return candidateV
else:
return candidateN | 60c16e9e2c75897d9d71661b61e96ff82b8882d0 | 125,999 |
def dict_deep_merge(lhs, rhs):
"""Deep-merge dictionary `rhs` into dictionary `lhs`."""
updated_rhs = {}
for key in rhs:
if key in lhs and isinstance(lhs[key], dict) and isinstance(rhs[key], dict):
updated_rhs[key] = dict_deep_merge(lhs[key], rhs[key])
else:
updated_rhs[key] = rhs[key]
lhs.update(updated_rhs)
return lhs | 1c0647c2e12297961b6ba03da0bebf4d9b0cb45a | 126,010 |
def _DuplicateName(values):
"""Returns the 'mojom_name' of the first entry in |values| whose 'mojom_name'
has already been encountered. If there are no duplicates, returns None."""
names = set()
for value in values:
if value.mojom_name in names:
return value.mojom_name
names.add(value.mojom_name)
return None | c20216c26136132326d9b2eff594dee025a89896 | 126,012 |
def can_put(t, i, j, v):
"""Tell whether we can put digit v in position (i, j) in grid t"""
for x in range(9):
if t[x][j] == v:
return False
if t[i][x] == v:
return False
i0 = (i // 3) * 3
j0 = (j // 3) * 3
for x in range(i0, i0 + 3):
for y in range(j0, j0 + 3):
if t[x][y] == v:
return False
return True | 2c4ead0059919937ce865339cdcb0a6a7072a919 | 126,014 |
def find_index_common_oxygen(iz, site_1: int, site_2: int) -> int:
"""
Finds a common oxygen, if it exists, between two T sites
:param iz: imperfect MAZE-sim (or subclass) containing T sites
:param site_1: index of T site 1
:param site_2: index of T site 2
:return:
"""
iz.update_nl()
nl1 = iz.neighbor_list.get_neighbors(site_1)[0]
nl2 = iz.neighbor_list.get_neighbors(site_2)[0]
# find common oxygen
for i in nl1:
if i in nl2:
if iz[i].symbol == 'O':
return i
assert False, 'No middle oxygen found!!' | 35c2dbd24f03b11e309a1446c448baadd975cf3a | 126,016 |
def _unencapsulate_facts(facts, check_version=False):
"""Extract node facts and version from final fact format."""
assert 'nodes' in facts, 'No nodes present in parsed facts file(s)'
return facts['nodes'], facts.get('version') | 5c56b63d721428588c7df47986bf2df9016a07dd | 126,017 |
def manhattan_distance(node1, node2):
"""Retourner la "Manhattan distance" (distance L1) entre 2 noeuds."""
return abs(node2[0] - node1[0]) + abs(node2[1] - node1[1]) | aa5efcabe549337fc3eadc7cfad04b5ec7334bb1 | 126,019 |
def normalize_first_digit(array):
"""Normalize observed distribution of the first significant digits.
Function normalizing an array by the sum of the array values.
Parameters
¯¯¯¯¯¯¯¯¯¯
array: array of int
Array of observed data.
Returns
¯¯¯¯¯¯¯
array: array of float
Array of observed data normalized.
"""
array = array / sum(array)
return array | a3a2995692231ffbb3e32d34f8818015c720463a | 126,021 |
import struct
def prefix_size(data: bytes, size: int) -> bytes:
"""
Prefix a byte string with 4-byte size field.
"""
prefix = struct.pack('!L', size)
return prefix + data | 3ad1aa241f06e826f93dd6c935ff525113c99960 | 126,023 |
def add_events_to_model(model):
"""
Convert model events into variables to be evaluated in the solver step.
Args:
model (pybamm.lithium_ion.BaseModel):
The PyBaMM model to solve.
Returns:
pybamm.lithium_ion.BaseModel:
The PyBaMM model to solve with events added as variables.
"""
for event in model.events:
model.variables.update({"Event: " + event.name: event.expression})
return model | b96ddd3002fb054a70bde3ebaab7888a0c11a65c | 126,029 |
import inspect
def is_instance_or_subclass(thing, klass):
"""
Check if a thing is an instance or subclass of a given class (or set of classes)
"""
return isinstance(thing, klass) or (inspect.isclass(thing) and issubclass(thing, klass)) | 6c62dbf7ed9bfee8886593334d7e71745dc9b085 | 126,036 |
def salary_columns(html_text, context='current'):
""" salary_columns returns the column names
for the salary information
@param **html_text** (*str*): String of the HTML response
from SALARY_URL
@param **context** (*str*): 'current' signifies that current
and future salaries are being pulled. Otherwise, historical
values are returned
Returns:
**html_text** (*str*): Truncated string of the HTML
response from a Hoopshype URL with the column information
removed
**column_list** (*list*): List of column names for
salary information
"""
if context == 'current':
col_count = 6
else:
col_count = 2
column_list = []
for col_count in range(0, col_count):
start_ind = html_text.find('>') + 1
end_ind = html_text.find('</td>')
column_list.append(html_text[start_ind:end_ind])
html_text = html_text[end_ind + 5:]
return html_text, column_list | 6cbd3c922d9debae8db5318766d263d10b4c123d | 126,040 |
def injectlocals(l, skip=['self','args','kwargs'], **kwargs):
"""Update a dictionary with another, skipping specified keys."""
if 'kwargs' in l: kwargs.update(l['kwargs'])
kwargs.update(dict((k, v) for k, v in l.items() if k not in skip))
return kwargs | 60c7794455b65013ad9fed3b3f0593ae4b73eb5b | 126,041 |
def trace_play(play, strategy0, strategy1, score0, score1, dice, goal, say, feral_hogs):
"""Wraps the user's play function and
(1) ensures that strategy0 and strategy1 are called exactly once per turn
(2) records the entire game, returning the result as a list of dictionaries,
each with keys "s0_start", "s1_start", "who", "num_dice", "dice_values"
Returns (s0, s1, trace) where s0, s1 are the return values from play and trace
is the trace as specified above.
This might seem a bit overcomplicated but it will also used to create the game
traces for the fuzz test (when run against the staff solution).
"""
game_trace = []
def mod_strategy(who, my_score, opponent_score):
if game_trace:
prev_total_score = game_trace[-1]["s0_start"] + game_trace[-1]["s1_start"]
if prev_total_score == my_score + opponent_score:
# game is still on last turn since the total number of points
# goes up every turn
return game_trace[-1]["num_dice"]
current_num_dice = (strategy0, strategy1)[who](my_score, opponent_score)
current_turn = {
"s0_start" : [my_score, opponent_score][who],
"s1_start" : [my_score, opponent_score][1 - who],
"who" : who,
"num_dice" : current_num_dice,
"dice_values" : [] # no dice rolled yet
}
game_trace.append(current_turn)
return current_num_dice
def mod_dice():
roll = dice()
if not game_trace:
raise RuntimeError("roll_dice called before either strategy function")
game_trace[-1]["dice_values"].append(roll)
return roll
s0, s1 = play(
lambda a, b: mod_strategy(0, a, b),
lambda a, b: mod_strategy(1, a, b),
score0,
score1,
dice=mod_dice,
goal=goal,
say=say,
feral_hogs=feral_hogs)
return s0, s1, game_trace | 02f17acdbf08ee5cf143a0c64274e67a064da9d4 | 126,043 |
def _dict_to_list(chrdict):
""" Convert a dictionary to an array of tuples """
output = []
for chromosome, values in chrdict.items():
for value in values:
output.append((chromosome, ) + value)
return output | cb97da872d48f3261d7692f8c3755eef4164cdc3 | 126,044 |
from datetime import datetime
def create_result_map(event):
"""Extract the Attributes and Contact ID from the customer call to be
stored in DynamoDB.
Args:
event (dict): A dictionary with the events sent from Amazon Connect.
Returns:
result_map (dict): A dictionary with the attributes, contact id and dttm
"""
result_map = event["Details"]["ContactData"]["Attributes"]
result_map["guid"] = event["Details"]["ContactData"]["ContactId"]
result_map["dttm"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return result_map | 1cd1f7610edf4afc3222aa42fddf085d0af6a407 | 126,045 |
import re
def get_sde_id(pattern, string, n=1):
"""Extract an id from an sde string. Return the first part of string that matches pattern, or the whole string."""
m = re.search(pattern, string)
if m is not None:
return m.group(n)
return string | ec2be7c662d59d2918553865cab4e07e045e15c0 | 126,047 |
def _ToDict(taxonomy, subject_id, levels, result=None):
"""Converts taxonomy to dict of lists. Recursive.
The dict is of the form:
{subject_id: [child_subject, ...],
...
}
The root node is explicitly represented as 'root', rather than None.
Each child subject is a dict of the form:
{i: id, n: name, l: isLeaf}
Args:
taxonomy: An instance of SubjectTaxonomy.
subject_id: The ID of the subject to convert to JSON.
levels: The number of levels to convert.
result: The result dict to use for output. If None (default) a new one
is created and returned.
Returns:
The dict that represents the accumulated result.
"""
result = result or {}
subjects = taxonomy.GetChildSubjects(subject_id)
if subjects:
subjects_as_dict = [
dict(i=s.subject_id, n=s.name, l=taxonomy.IsLeafSubject(s.subject_id))
for s in subjects]
if subject_id:
result[subject_id] = subjects_as_dict
else:
result['root'] = subjects_as_dict
# Recurse, if needed.
levels -= 1
if levels and subjects:
for subject in subjects:
_ToDict(taxonomy, subject.subject_id, levels, result)
return result | 6b4c51a2163244d14fea4e1cbf60c2350dc839c3 | 126,048 |
def file_to_str(filename):
"""Returns the contents of filename as a string."""
with open(filename) as f:
return f.read().lower() | dbf852fc9c4402833a70dfcefe516d8df96de222 | 126,052 |
def get_lomb_lambda(lomb_model):
"""Get the regularization parameter of a fitted Lomb-Scargle model."""
return lomb_model['freq_fits'][0]['lambda'] | 8e4188bf7b4099148a9b10096dc608aa14e3e1af | 126,055 |
def fetch_subscriptions(weboob_proxy, backend):
"""
Fetch subscriptions from a given backend.
:param weboob_proxy: An instance of ``WeboobProxy`` class.
:param backend: A valid built backend to use.
:return The list of fetched subscriptions (Weboob objects).
"""
subscriptions = []
for subscription in backend.iter_subscription():
# Ensure ids are fully qualified, of the form id@backend_name.
subscription.id = weboob_proxy._ensure_fully_qualified_id(
subscription.id,
backend
)
subscriptions.append(subscription)
return subscriptions | e533ef4645c89e4ec492b71fd3fb057129a2bb87 | 126,058 |
from typing import Union
import pathlib
import sqlite3
def load_db(path: Union[str, pathlib.PurePath]) -> sqlite3.Connection:
"""
Load database from path.
Args:
path: String or :class:`pathlib.PurePath`.
Returns:
:class:`sqlite3.Connection`
"""
path = pathlib.Path(path)
if not path.is_file():
raise FileNotFoundError(f"Not a file/file not found: {path}")
return sqlite3.connect(str(path.resolve())) | 94d2b1ff793b4e9ef6a2dcc767dbdfa4e6d880f0 | 126,059 |
import math
def vect(magnitude, angle=0):
"""Returns a vector of the given magnitude and direction in component form"""
return [magnitude * math.cos(angle), magnitude * math.sin(angle)] | 22a7c8ce23af7c5574ff75fc4fad32671631b5cd | 126,060 |
def _contains(a, e):
"""Checks for the existence of an element in a set.
Args:
a: A set, as returned by `sets.make()`.
e: The element to look for.
Returns:
True if the element exists in the set, False if the element does not.
"""
return e in a._values | 30e3969008038ef803b69d8bc9fc052753bad851 | 126,061 |
def is_square_form(num: int) -> bool:
"""
Determines if num is in the form 1_2_3_4_5_6_7_8_9
>>> is_square_form(1)
False
>>> is_square_form(112233445566778899)
True
>>> is_square_form(123456789012345678)
False
"""
digit = 9
while num > 0:
if num % 10 != digit:
return False
num //= 100
digit -= 1
return True | 947bcf2e1933a2f74b5b6dbb3b36e874159855ea | 126,064 |
def line_with_unit_test_header(line):
"""Check if the given string represents unit test header."""
return line.startswith("Name ") and line.endswith("Stmts Miss Cover Missing") | 7e0e0d0e89b087e1ea1da66ce2107573c3e4ca50 | 126,071 |
import zlib
import dill
def _dumps(obj):
"""
Serialize and compress an object.
"""
return zlib.compress(dill.dumps(obj)) | f5b174a69a605df2e3bc35be93b6a124ac997bcb | 126,075 |
from typing import Type
from typing import Any
from typing import Set
def _get_subclass_methods(cls: Type[Any]) -> Set[str]:
"""Return the set of method names defined (only) on a subclass."""
all_methods = set(dir(cls))
base_methods = (dir(base()) for base in cls.__bases__)
return all_methods.difference(*base_methods) | c1a6f2b9acac1ee9f819587fc50051d9daf0f3b0 | 126,076 |
def get_role(action):
"""Maps action from input to a role (promote or demote)"""
if action == "demote":
return 'user'
if action == "promote":
return 'admin' | 1291927260b667d58572e2f4ea44f1063c76a744 | 126,079 |
def top_hat_gust(reduced_time: float) -> float:
"""
A canonical example gust.
Args:
reduced_time (float)
Returns:
gust_velocity (float)
"""
if 5 <= reduced_time <= 10:
gust_velocity = 1
else:
gust_velocity = 0
return gust_velocity | a445131888e077b5a330df0c2901116a173afa0f | 126,085 |
def _antnums_to_bl(antnums):
"""
Convert tuple of antenna numbers to baseline integer.
A baseline integer is the two antenna numbers + 100
directly (i.e. string) concatenated. Ex: (1, 2) -->
101 + 102 --> 101102.
Parameters
----------
antnums : tuple
tuple containing integer antenna numbers for a baseline.
Ex. (ant1, ant2)
Returns
-------
bl : <i6 integer
baseline integer
"""
# get antennas
ant1 = antnums[0] + 100
ant2 = antnums[1] + 100
# form bl
bl = int(ant1*1e3 + ant2)
return bl | 1b9dc4a442b94fefb7c4eec9148b190b791e2b02 | 126,088 |
def changes_dict_to_set_attribute(metakey, changes_dict, end=";"):
"""Convert dictionart of changes to set_attribute instructions"""
result = []
for key, (value, old) in changes_dict.items():
result.append("set_attribute({!r}, {!r}, {!r}, old={!r})".format(metakey, key, value, old))
return "\n".join(result) + end | 820f9c6f500b8adc7859fcbff8649ed7e2c58805 | 126,089 |
import functools
def capture_init(init):
"""capture_init.
Decorate `__init__` with this, and you can then
recover the *args and **kwargs passed to it in `self._init_args_kwargs`
"""
@functools.wraps(init)
def __init__(self, *args, **kwargs):
self._init_args_kwargs = (args, kwargs)
init(self, *args, **kwargs)
return __init__ | 94ad79f86e8f156f2282435d7b6f722ee5b6c1d7 | 126,092 |
def is_colinear(p, q, r, epsilon):
"""
Returns true if points p, q, r, are colinear, otherwise returns false.
pqr is considered colinear if the area of triangle pqr is less than
the error tolerance epsilon.
"""
# Get the area of triangle pqr
area = ((p.x * (q.y - r.y)) + (q.x * (r.y - p.y)) + (r.x * (p.y - q.y))) / 2
return area < epsilon | 2ab54a88b1a88eac507f6463cea0f5de96ddc8c2 | 126,093 |
from pathlib import Path
def replace_extension(infile):
"""Replace extensions of file."""
extensions = "".join(Path(str(infile)).suffixes[-2:])
new_ext = ".fastq.gz"
outfile = str(infile).replace(extensions, new_ext)
return outfile | 994f8ef160081a7303a4b1db6c465de885171a91 | 126,094 |
import torch
def self_critical_loss(log_probs, lengths, sampling_reward, greedy_reward):
"""
Self critical sequence training loss for RL
log_probs: [batch_size, time_steps]
lengths: [batch_size], containing the lengths of the predicted polygons
sampling_reward: [batch_size, ]
greedy_reward: [batch_size, ]
"""
reward = sampling_reward - greedy_reward
loss = 0
for i in torch.arange(reward.size(0), dtype=torch.long, device=reward.device):
l = -1 * log_probs[:lengths[i]] * reward[i]
# Expectation per polygon
l = torch.mean(l)
loss += l
# mean across batches
return loss/reward.size(0) | 264f90f3143f82881b4aa0918826ac6a236b7c80 | 126,095 |
def get_cutoff_frequencies(values,cutoffs):
""" The purpose of this function to calculate the frequencies of values in a column based off of cutoff values.
Attributes:
- values(list): the list of values which we want to get values from.
- cutoffs(list): list of values to have cutoffs
Returns:
- counts(list): frequencies of values.
"""
counts = [0 for _ in range(len(cutoffs)-1)]
for value in values:
contains_value = False
for i in cutoffs[1:]:
if value < i:
contains_value= True
counts[cutoffs.index(i)-1] += 1
break
if not contains_value:
counts[-1] += 1
return counts | 88b3fdd4361fceae38331baae86dbd52e7d87bf7 | 126,096 |
import re
def add_suffix(alias):
"""Adds an int suffix to alias."""
alias = alias.strip('`')
m = re.search(r'([0-9]+)$', alias)
if m:
suffix = m.group(1)
alias = alias[:-len(suffix)] + str(int(suffix) + 1)
return alias
else:
return alias + '_1' | ea534007b58293d143b7752e2295305ee982f45f | 126,097 |
import calendar
def get_timestamp(dt):
"""Converts datetime object to a float value in epoch seconds."""
return calendar.timegm(dt.utctimetuple()) + dt.microsecond * 1e-6 | 0f36c35e5cc0cf5b2313f1e34ed890bc667da8ea | 126,099 |
def approx_equal(a, b, epsilon=0.0000000001):
"""Returns True if two numbers a and b are equal to within epsilon,
otherwise False"""
return abs(a-b) <= epsilon | aa4d18289d05ab0e11d42957939abebc26c99bf8 | 126,101 |
def get_order_tokens(order):
""" Retrieves the order tokens used in an order
e.g. 'A PAR - MAR' would return ['A PAR', '-', 'MAR']
"""
# We need to keep 'A', 'F', and '-' in a temporary buffer to concatenate them with the next word
# We replace 'R' orders with '-'
# Tokenization would be: 'A PAR S A MAR - BUR' --> 'A PAR', 'S', 'A MAR', '- BUR'
# 'A PAR R MAR' --> 'A PAR', '- MAR'
buffer, order_tokens = [], []
for word in order.replace(' R ', ' - ').split():
buffer += [word]
if word not in ['A', 'F', '-']:
order_tokens += [' '.join(buffer)]
buffer = []
return order_tokens | fc4a5edc2a1c63c49202e05a16adc928e8c63788 | 126,102 |
def is_c19_narrative (narratives):
""" Check a dict of different-language text for the string "COVID-19" (case-insensitive) """
for lang, text in narratives.items():
if "COVID-19" in text.upper():
return True
return False | db4dedcf0b41f6547c704b7bc6f478a3b6af7672 | 126,105 |
def _check_range_uniqueness(ds):
"""Check if range (``echo_range``) changes across ping in a given frequency channel."""
return (
ds["echo_range"].isel(ping_time=0).dropna(dim="range_sample")
== ds["echo_range"].dropna(dim="range_sample")
).all() | b65130679cb84ac781ac81850ceb1c69fc0bb80d | 126,106 |
def get_seat_id(row, column):
""" Calculates seat id.
:param row: decimal row number
:param column: decimal column number
:return: id
"""
return 8 * row + column | f4505e557229f1186c6708168adfb46803f50abc | 126,107 |
def from_hex(hex_string):
"""
Converts the given hex string to a byte array.
This is a convenience method useful for testing values during development.
:param hex_string: The hex String to parse to bytes.
:return: A byte array, as parsed from the given String
"""
result = None
if hex_string is not None:
result = bytearray.fromhex(hex_string)
return result | 868bdad8ec0f8ba560d14db1990a0f3d3c5cfdf3 | 126,113 |
def pass_check_scale(candidate_box, min_max_area):
"""Check scale of the elements. Returns true if passed check."""
return candidate_box.area >= min_max_area | 863e69e051a91479120c8b643285cbebd7163486 | 126,117 |
def get_jwst_siaf_instrument(tree):
"""Return the instrument specified in the first aperture of a SIAF xml tree.
Returns
-------
instrument : str
All Caps instrument name, e.g. NIRSPEC
"""
for entry in tree.getroot().iter('SiafEntry'):
for node in entry.iterchildren():
if node.tag == 'InstrName':
return node.text | 322dd1ddbcbf1070187e2df4e8139122a8b828a1 | 126,119 |
def get_model_weight(model):
"""Get model weight in a dict."""
param_dict = {name: param.data for name, param in model.named_parameters() if param.requires_grad}
return param_dict | 11376f508bd9e7ccd2f51db2eb69faafc80d51ce | 126,120 |
def cipher(text, shift, encrypt=True):
"""
Encrypts words by shifting the direction of letters.
Parameters:
-----------
text = Word whose letters will be converted. String.
shift = Number of positions up/down the alphabet to shift letters. Int.
encrypt = Direction of encryption. Boolean.
Examples:
-----------
cipher("testing", 1)
"uftjoh"
cipher("uftjoh", -1)
"testing"
cipher("uftjoh", 1, encrypt = False)
"testing"
"""
alphabet = 'abcdefghijklmnopqrstuvwyzABCDEFGHIJKLMNOPQRSTUVWYZ'
new_text = ''
for c in text:
index = alphabet.find(c)
if index == -1:
new_text += c
else:
new_index = index + shift if encrypt == True else index - shift
new_index %= len(alphabet)
new_text += alphabet[new_index:new_index+1]
return new_text | e05a2c8da9e4428b414a7e89ed4dc409c045a2b6 | 126,122 |
import math
def pressure_from_alt(alt):
"""
Calculate pressure in Pa from altitude in m using standard atmospheric tables
"""
return 101325.0 * math.pow((1.0 - 2.25577e-5 * alt), 5.25588) | 17c1b87cdfc387c247542bf11670627d07fc2232 | 126,125 |
from random import random
def rand(x):
"""
Returns a random floating-point number between 0 and the maximum value.
Args:
x(float): maximum value.
Returns:
float: random number between 0 and `x`
"""
return random() * x | 36cedba0f54c1c9fd9d1b43d885006cdbd30a943 | 126,129 |
def test_max_line_length(style_checker):
"""Verify the maximum line length for Python is 88 characters.
"""
def python_statement_from_len(length):
"""Return a string with a python statement of exactly length chars.
Note that length must be at least ~ 10 characters.
"""
pre = 'print("'
post = '")'
line = '-' * (length - len(pre) - len(post))
return pre + line + post
EXPECTED_MAX_LINE_LENGTH = 88
def test_style_checker_with_len(length):
"""Check style_checker behavior a Python line of length chars."""
stmt = python_statement_from_len(length)
py_file_name = "foo{length}.py".format(length=length)
with open(py_file_name, 'w') as f:
f.write(stmt + '\n')
p = style_checker.run_style_checker('whatever', py_file_name)
if length <= EXPECTED_MAX_LINE_LENGTH:
style_checker.assertEqual(p.status, 0, p.image)
style_checker.assertRunOutputEmpty(p)
else:
style_checker.assertNotEqual(p.status, 0, p.image)
style_checker.assertRunOutputEqual(p, """\
{py_file_name}:1:{col}: E501 line too long ({length} > {EXPECTED_MAX_LINE_LENGTH} characters)
""".format(
py_file_name=py_file_name,
col=EXPECTED_MAX_LINE_LENGTH + 1,
length=length,
EXPECTED_MAX_LINE_LENGTH=EXPECTED_MAX_LINE_LENGTH,
))
# Test the behavior on a variety of length, some below 80,
# some between 80 and EXPECTED_MAX_LINE_LENGTH, and some
# above EXPECTED_MAX_LINE_LENGTH.
for length in range(75, EXPECTED_MAX_LINE_LENGTH + 5):
test_style_checker_with_len (length) | de685ff26e5edd8656fc5a2ac432c0446b8414bf | 126,135 |
import random
import string
def random_letters(count):
"""Get a series of pseudo-random letters with no repeats."""
rv = random.choice(string.ascii_uppercase)
while len(rv) < count:
l = random.choice(string.ascii_uppercase)
if not l in rv:
rv += l
return rv | e842cea1c3caf21785bf46ca18da03a9eb16c6ad | 126,142 |
def split_line(line):
"""
:param line: from an input data file.
:return: lower-cased words split by whitespaces.
"""
return line.split('\t')[3].strip().lower().split(' ') | 9932450bc1da3ec8e2313ba2fb7c2b42cefe4827 | 126,150 |
def replace_corrected_orography_with_original_for_glaciated_grid_points(input_corrected_orography,
input_original_orography,
input_glacier_mask):
"""Replace a corrected orography with the original orography at points that are glaciated
Arguments:
input_corrected_orography: Field object; the corrected orography where correction are applied
to both glaciated and unglaciated points
input_original_orography: Field object; the original orography to use for glaciated points
input_glacier_mask: Field object; a binary glacial mask where glacier is 1/True and non-glacier
is 0/False
Returns: An orography using the corrected orography for non-glaciated points and the original
orography for glaciated points.
"""
input_corrected_orography.mask_field_with_external_mask(input_glacier_mask.get_data())
input_original_orography.update_field_with_partially_masked_data(input_corrected_orography)
return input_original_orography | 751ca727e3c31e28b4b9a70a00aa7ee8f5b4b241 | 126,152 |
import zipfile
def _GetAffectedClasses(jar_file, source_files):
"""Gets affected classes by affected source files to a jar.
Args:
jar_file: The jar file to get all members.
source_files: The list of affected source files.
Returns:
A tuple of affected classes and unaffected members.
"""
with zipfile.ZipFile(jar_file) as f:
members = f.namelist()
affected_classes = []
unaffected_members = []
for member in members:
if not member.endswith('.class'):
unaffected_members.append(member)
continue
is_affected = False
index = member.find('$')
if index == -1:
index = member.find('.class')
for source_file in source_files:
if source_file.endswith(member[:index] + '.java'):
affected_classes.append(member)
is_affected = True
break
if not is_affected:
unaffected_members.append(member)
return affected_classes, unaffected_members | 3e403fd4f5ec43d1281c43ddb98439095fdcf8ef | 126,154 |
def encrypt(plaintext, cipher, shift):
"""
Caesar encryption of a plaintext using a shifted cipher.
You can specify a positiv shift to rotate the cipher to the right
and using a negative shift to rotate the cipher to the left.
:param plaintext: the text to encrypt.
:param cipher: set of characters, shifted in a directed to used for character substitution.
:param shift: offset to rotate cipher.
:returns: encrypted plaintext (ciphertext).
See: https://en.wikipedia.org/wiki/Caesar_cipher
Example:
>>> encrypt("hello world", "abcdefghijklmnopqrstuvwxyz ", 1)
'gdkknzvnqkc'
>>> encrypt("hello world", "abcdefghijklmnopqrstuvwxyz ", -1)
'ifmmpaxpsme'
"""
# calculating shifted cipher
shifted_cipher = cipher
if shift > 0:
while shift > 0:
shifted_cipher = shifted_cipher[-1] + shifted_cipher[0:len(shifted_cipher) - 1]
shift -= 1
else:
while shift < 0:
shifted_cipher = shifted_cipher[1:] + shifted_cipher[0]
shift += 1
return "".join([shifted_cipher[cipher.index(character)] for character in plaintext]) | ba932f3582745bcd41618337b0e8d6dbc4bf0c2d | 126,162 |
from pathlib import Path
from typing import Tuple
import re
def fiscal_year_quarter_from_path(path: Path) -> Tuple[int, int]:
"""Extract the fiscal year and quarter from the file path."""
# Match the FYXX_QX pattern
pattern = "FY(?P<fy>[0-9]{2})[_-]Q(?P<q>[1234])"
match = re.match(pattern, path.stem)
if match:
d = match.groupdict()
else:
raise RuntimeError(f"Cannot match FYXX_QX pattern in '{path.stem}'")
fiscal_year = int(f"20{d['fy']}")
quarter = int(d["q"])
return fiscal_year, quarter | 20b49599abac05a39ccdc1bb1b277c1f8cffc21e | 126,164 |
import re
def parse_shutdown_result(result):
"""Parse the shutdown result string and return the strings (grace left,
deadline left, queries registered, queries executing)."""
assert len(result.data) == 1
summary = result.data[0]
match = re.match(r'startup grace period left: ([0-9ms]*), deadline left: ([0-9ms]*), ' +
r'queries registered on coordinator: ([0-9]*), queries executing: ([0-9]*), ' +
r'fragment instances: [0-9]*', summary)
assert match is not None, summary
return match.groups() | fb3846fb1372e1d63721786fec1b05bea5a4b223 | 126,165 |
def get_dictionary(key, resources):
"""Return a new dictionary using the given key and resources (key value).
Keyword arguments:
key -- the key to use in the dictionary
resources -- the resources to use as the key value
"""
return {
key: resources,
} | 5f7839834be10d2463c7dd085b76686eee0ebeea | 126,167 |
import torch
def const_to_rad(x: torch.Tensor) -> torch.Tensor:
"""Retrieve radius from solution to linear system.
Args:
x (torch.Tensor): Solutions to circle linear system.
Return:
radius (torch.Tensor): Circle's radius.
"""
radius = torch.sqrt(x[:,2] + x[:,0]**2 + x[:,1]**2)
return radius | 12037c5077b4655c571ed41c7afc0ca267d3763a | 126,171 |
def query_yes_no(question):
"""Ask a yes/no question and return the answer.
:param str question: the question string.
:return bool: True for yes and False for no.
"""
ans = input(f"{question} (y/n)").lower()
while True:
if ans not in ['y', 'yes', 'n', 'no']:
ans = input('please enter yes (y) or no (n): ')
continue
if ans == 'y' or ans == 'yes':
return True
if ans == 'n' or ans == 'no':
return False | f45451b0c27ce4416d072233b90a31f29c0f2511 | 126,173 |
def create_hash_table(distances: list):
"""
:param distances: list of Distance objects
:return: dict with _ids as key and distance as value
"""
result = {}
for dist in distances:
result[dist._id] = dist.value
return result | 7ae8491b82da2b87fd659c3c8093a9c92e3cb4a8 | 126,178 |
def db_model_repr(self):
"""Create a automatic meaningful repr for db.Model classes
Usage example:
class MyClass(db.Model):
__repr__ = db_model_repr
"""
fields = [str(x).split('.')[-1] for x in self.__table__.c]
values = ["{}={!r}".format(field, getattr(self, field)) for field in fields]
return "{}({})".format(self.__class__.__name__, ', '.join(values)) | 0bb179377326436025dbbb2dd924844577981f97 | 126,179 |
def lookup_key_from_chunk_key(chunk_key):
"""
breaks out the lookup_key from the chunk_key.
Args:
chunk_key (str): volumetric chunk key = hash&num_items&col_id&exp_id&ch_idres&x&y&z"
Returns (str): lookup_key col_id&exp_id&ch_id
"""
parts = chunk_key.split('&')
lookup_parts = parts[2:5]
return "&".join(lookup_parts) | a84274f42e87f6562b06f12a1e3434862dd1b4e6 | 126,184 |
from typing import Union
def coerce_to_bytes(thing: Union[str, bytes]) -> bytes:
"""
Ensure whatever is passed in is a bytes object.
"""
return thing.encode('UTF-8') if isinstance(thing, str) else thing | d837f753260d2b623a004f195a1e5a34aa882b96 | 126,186 |
def mishra_bird_constr(x, *args):
"""Constraint for the Mishra's Bird function."""
fx = (x[0] + 5) ** 2 + (x[1] + 5) ** 2 - 25
return fx * -1 | 315a25fc776d2948a286748135315bcecb618561 | 126,194 |
import ctypes
def _get_ctypes_dtype(dt):
"""Return a ctypes c_* datatype given a string data type."""
if "int" in dt:
return getattr(ctypes, f"c_{dt}")
elif dt == "float32":
return ctypes.c_float
elif dt == "float64":
return ctypes.c_double
else:
assert False, f"unknown dtype: {dt}" | 0efdaa0c60dfc04eff4215b896897abf3b24bc27 | 126,195 |
def _autotype(var):
"""Automatically convert strings to numerical types if possible."""
if type(var) is not str:
return var
if var.isdigit() or (var.startswith("-") and var[1:].isdigit()):
return int(var)
try:
f = float(var)
return f
except ValueError:
return var | 48f72e8e43c6affa32962b3167051624e9e479c9 | 126,196 |
import pathlib
def get_gql_template(template_path=None):
"""Gets graphql html template text from template.html"""
if not template_path:
path = pathlib.Path(__file__)
cur_path = path.parent.absolute()
template_path = f"{cur_path}/gql/template.html"
with open(template_path, encoding="utf-8") as f:
template = f.read()
return template | 256a7e38ff4680de78e8218f1624bf4fce43582f | 126,197 |
def box(text: str, lang=""):
"""Return text in a markdown block"""
return f"```{lang}\n{text}```" | 869cd0d5e3d5d3019647591d29d661fb0a1ad84b | 126,200 |
def add_number_to_cols(df):
""" Add number ahead of column names, e.g. [col1, col2] --> [[1]col1, [2]col2] """
df.columns = [f"[{n}]{col}" for n, col in enumerate(df.columns)]
return df | 32f818e61dcb08cb1d3fe6a49a39a4859896b711 | 126,201 |
import re
def get_vlan_parent(device, vlan):
"""Return the parent interface of a VLAN subinterface.
:param device: VLAN interface name.
:param vlan: VLAN ID.
:returns: parent interface name.
"""
return re.sub(r'\.{}$'.format(vlan), '', device) | cacda7fa1b65198c26b9ec673613668663a5eb1d | 126,203 |
def s_curve(CurrTime, Amp, RiseTime, StartTime=0.0):
"""
Function to generate an s-curve command
Arguments:
CurrTime : The current timestep or an array of times
Amp : The magnitude of the s-curve (or final setpoint)
RiseTime : The rise time of the curve
StartTime : The time that the command should StartTime
Returns :
The command at the current timestep or an array representing the command
over the times given (if CurrTime was an array)
"""
scurve = 2.0 * ((CurrTime - StartTime)/RiseTime)**2 * (CurrTime-StartTime >= 0) * (CurrTime-StartTime < RiseTime/2) \
+(-2.0 * ((CurrTime - StartTime)/RiseTime)**2 + 4.0 * ((CurrTime - StartTime)/RiseTime) - 1.0) * (CurrTime-StartTime >= RiseTime/2) * (CurrTime-StartTime < RiseTime) \
+ 1.0 * (CurrTime-StartTime >= RiseTime)
return Amp * scurve | 19c198dd823b4fd5d4c382bcb76249aa2e438638 | 126,212 |
import collections
def odict_delete(odict, key):
"""Return an OrderedDict with selected key:value pair removed.
Parameters
----------
odict : collections.OrderedDict
Ordered dictionary to copy and insert new value into.
key : string
Key to delete.
Returns
-------
odict_new : collections.OrderedDict
A copy of the dictionary the new key:value pair deleted.
"""
odict_new = collections.OrderedDict()
for k in odict.keys():
if k != key:
odict_new[k] = odict[k]
return odict_new | 6a7498a9440659c332e418ebd84c57c117c309ef | 126,214 |
def get_columns(c, table, verbose=False):
"""Get all columns in a specified table."""
head = c.execute("select * from " + table)
names = list(map(lambda x: x[0], head.description))
if verbose:
print(names)
return(names) | cdf0166b1057224cb1ba308b9f5f10b88d688891 | 126,220 |
def extract_point_list_from_augmented_points(augmented_points):
"""
Given a list or generator of augmented points extract the geo point
representation as a list.
"""
return list(map(lambda augmented_points: augmented_points['geoPoint'],
augmented_points)) | d4f75fb61218cb9b1584a2d5e18aeb589c5bd692 | 126,224 |
def millis_to_minutes(millis):
"""Return minutes from input of milliseconds."""
minutes = round((int(millis) / (1000*60)) % 60, 2)
return minutes | 8f4f08be2b2cc8d6315d7e8a2acf70da629a1e9c | 126,225 |
def recursive_lookup(lookup, index_list):
"""
Takes in indexes `index_list` in the form of a list of keys, and
iteratively look through those keys in `lookup` until you reach the
end of the list.
For example, if given the list ['inventory', 'wood'] this method
returns lookup['inventory']['wood']
:return:
"""
for index in index_list:
lookup = lookup[index]
return lookup | 01c559f52dc5c71e849e57c0281a1846e3f3cb08 | 126,232 |
def init_memory(original, noun, verb):
"""Return a copy of the original memory state with the custom noun and verb"""
return [original[0], noun, verb] + original[3:] | b0481d1753c75aa4f91043851753e9dd8213f5bb | 126,236 |
import torch
def load_checkpoint(model, checkpoint_file, optimizer=None):
"""
Loads model's state (i.e. weights), optimizer's state from a checkpoint file.
Restores the random state of PyTorch.
It also returns the corresponding loss and epoch needed to continue the training.
:param model: model object for which the parameters are loaded
:param checkpoint_file: file path where the checkpoint is stored
:param optimizer: (optional) optimizer object which the state is loaded
:return: loss and epoch
"""
if torch.cuda.is_available():
checkpoint = torch.load(checkpoint_file, map_location=torch.device('cuda'))
else:
checkpoint = torch.load(checkpoint_file, map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['model_state_dict'])
torch.random.set_rng_state(checkpoint['rng_state'].cpu())
if optimizer:
optimizer.load_state_dict(checkpoint['optim_dict'])
return checkpoint['loss'], checkpoint['epoch'] | 527f7089b86f82af987b3f83aae098996c335898 | 126,244 |
def _pipeline_is_running(j):
"""Return true if the specified K8s job is still running.
Args:
j: A K8s job object
"""
conditions = j.get("status", {}).get("conditions", [])
if not conditions:
return True
for c in conditions[::-1]:
# It looks like when a pipelinerun fails we have condition succceeded
# and status False
if c["type"].lower() in ["succeeded", "failed", "complete"]:
return False
return True | db58e30fef02284a063ace2f0db1270f124d556c | 126,245 |
import re
def split_sens(text):
""" split sentence and keep sperator to the left
Args:
text (str):
Returns:
list[str]: splited sentence
Examples:
>>> split_sens("中文:语音,合成!系统\n")
['中文:', '语音,', '合成!', '系统']
"""
texts = re.split(r";", re.sub(r"([、,。!?])", r"\1;", text.strip()))
return [x for x in texts if x] | fbb03dafc8e06d6bff09c7c2f50d73ddc2326660 | 126,246 |
def vis_params_ndvi(band=["NDVI"]):
"""
Return the visual parameters for NDVI maps, representing the values with a red to green color palette
"""
params = {
'bands': band,
'min': -1,
'max': 1,
'palette': ["red", "orange", "yellow", "green", "darkgreen"],
}
return params | e4513fa76136ea557e58899db10b45134e58697a | 126,248 |
import hashlib
import warnings
import pathlib
def hashfile(fname, blocksize=65536, count=0, constructor=hashlib.md5,
hasher_class=None):
"""Compute md5 hex-hash of a file
Parameters
----------
fname: str or pathlib.Path
path to the file
blocksize: int
block size in bytes read from the file
(set to `0` to hash the entire file)
count: int
number of blocks read from the file
hasher_class: callable
deprecated, see use `constructor` instead
constructor: callable
hash algorithm constructor
"""
if hasher_class is not None:
warnings.warn("The `hasher_class` argument is deprecated, please use "
"`constructor` instead.")
constructor = hasher_class
hasher = constructor()
fname = pathlib.Path(fname)
with fname.open('rb') as fd:
buf = fd.read(blocksize)
ii = 0
while len(buf) > 0:
hasher.update(buf)
buf = fd.read(blocksize)
ii += 1
if count and ii == count:
break
return hasher.hexdigest() | b6cc5aca0ca5e564cb6a2cac5c3fa7c4cee14841 | 126,249 |
def remove_quotes(value, unused):
"""Remove quotes helper."""
return value.replace('"', "") | e5dd4b11fe9f62d62d58c14d32935f0910cf4e2b | 126,250 |
def read_prophage_table(infile):
"""
Reads tab-delimited table with columns for:
1 - path to GenBank file,
2 - replicon id,
3 - prophage start coordinate,
4 - prophage end coordinate,
5 (optional) - prophage name (if not provided pp1, pp2, etc.
will be assigned for each file)
:param infile: path to the file
:return prophages: dictionary of GenBank file(s) paths, replicons and prophages coordinates
"""
prophages = {}
with open(infile) as inf:
for line in inf:
if line.startswith('#'): continue
line = line.strip().split('\t')
file_path, replicon_id, start, end = line[:4]
# adjust coords to Python's indexing
coords = (int(start) - 1, int(end) - 1)
if start > end:
start, end = end, start
if file_path in prophages:
pp_cnt = len(prophages[file_path]) + 1
else:
pp_cnt = 1
pp = line[4] if len(line) == 5 else f"pp{pp_cnt}"
try:
prophages[file_path][replicon_id][coords] = pp
except KeyError:
try:
prophages[file_path][replicon_id] = {coords: pp}
except KeyError:
prophages[file_path] = {replicon_id: {coords: pp}}
return prophages | 22ce00548ff0fb1fa9ee45c51269bfa40babc512 | 126,251 |
def parabolic_interpolation(signal, tau):
"""Parabolic Interpolation on tau. Step 5 in `YIN`_.
Args:
signal (:obj:`numpy.array(float)`): A small piece normalised self correlated audio d'(t, tau) processed by normalisation(). 1D array.
tau (int): Estimated thresholdeshold.
Returns:
float: A better estimation of tau.
"""
N, tau = len(signal), int(tau)
x1 = tau if tau < 1 else tau-1
x2 = tau if tau+1 >= N/2 else tau+1
if x1 == tau:
return tau if signal[tau] <= signal[x2] else x2
elif x2 == tau:
return tau if signal[tau] <= signal[x1] else x1
else:
s0, s1, s2 = signal[x1], signal[tau], signal[x2]
return tau if 2 * s1 - s2 - s0 == 0 else tau + (s2 - s0) / (2 * (2 * s1 - s2 - s0)) | 551dc2e2152d6847c66602f2f2d9d49aeaeaf14f | 126,255 |
import time
def wait(func, msg=None, delay=1, tries=60):
"""
Wait for FUNC to return something truthy, and return that.
FUNC is called repeatedly until it returns a true value or until a
timeout occurs. In the latter case, a exception is raised that
describes the situation. The exception is either the last one
thrown by FUNC, or includes MSG, or a default message.
Arguments:
func: The function to call.
msg: A error message to use when the timeout occurs. Defaults
to a generic message.
delay: How long to wait between calls to FUNC, in seconds.
Defaults to 1.
tries: How often to call FUNC. Defaults to 60.
Raises:
TimeoutError: When a timeout occurs.
"""
t = 0
while t < tries:
try:
val = func()
if val:
return val
except Exception:
if t == tries - 1:
raise
else:
pass
t = t + 1
time.sleep(delay)
raise TimeoutError(msg or "Condition did not become true.") | 9ed82e6974d8c12f4e7d167eef901b495589acbc | 126,260 |
def get_intersection_set(group):
""" Task 2: gets intersection set of all questions in a group.
Make a set for every declaration and return the intersection.
:param group: list of strings
"""
declarations = []
for declaration in group:
questions = set()
for question in declaration:
questions.add(question)
declarations.append(questions)
return declarations[0].intersection(*declarations) | 484564ba80d067c7a55f50c4d81759877c7db53d | 126,262 |
def _strip_prefix(package, version, strip_prefix = ""):
"""Computes the strip prefix for a downloaded archive of the provided
PyPI package and version.
Args:
package: PyPI package name.
version: version for which the archive should be downloaded.
strip_prefix: additional directory prefix to strip from the extracted
files.
"""
if strip_prefix:
return "{0}-{1}/{2}".format(package, version, strip_prefix)
return "{0}-{1}".format(package, version) | c1bcec6bbe139879ac63d26341f163d5ebc60155 | 126,263 |
from pathlib import Path
def sql_from_file(p: Path) -> str:
"""Read sql string from .sql file
Parameters
----------
p : Path
Path to .sql file
Returns
-------
str
sql string
"""
with open(p, 'r') as file:
return file.read() | dc708b7918a9a4936299ff73fa26a07794121e11 | 126,271 |
def output_transform_tl(process_output):
"""
Output transform for traffic light status metrics.
"""
y_pred = process_output[0]['traffic_light_status'].argmax(dim=1)
y = process_output[1]['traffic_light_status'].argmax(dim=1)
return dict(y_pred=y_pred, y=y) | 0ebf642852f53877fc138075b75b91ce80d73f95 | 126,272 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.