content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def centerxywh_to_xyxy(boxes):
"""
args:
boxes:list of center_x,center_y,width,height,
return:
boxes:list of x,y,x,y,cooresponding to top left and bottom right
"""
x_top_left = boxes[0] - boxes[2] / 2
y_top_left = boxes[1] - boxes[3] / 2
x_bottom_right = boxes[0] + boxes[2] / 2
y_bottom_right = boxes[1] + boxes[3] / 2
return [x_top_left, y_top_left, x_bottom_right, y_bottom_right]
|
2d1afd843a34d1a0bdd7b44b8a76127eda2b3e5d
| 283,533
|
import torch
def batch_randperm(n_batch: int, n: int, device: str = 'cpu') -> torch.Tensor:
"""
Returns multiple random permutations.
:param n_batch: Number of permutations.
:param n: Length of permutations.
:param device: PyTorch Device to put the permutations on.
:return: Returns a torch.Tensor of integer type of shape [n_batch, n] containing the n_batch permutations.
"""
# batched randperm:
# https://discuss.pytorch.org/t/batched-shuffling-of-feature-vectors/30188/4
# https://github.com/pytorch/pytorch/issues/42502
return torch.stack([torch.randperm(n, device=device) for i in range(n_batch)], dim=0)
|
2067703054ec87404f109d9246c52e918d216596
| 503,190
|
def isValidArgument(s):
"""Returns whether s is strictly a valid argument for an IRC message."""
return '\r' not in s and '\n' not in s and '\x00' not in s
|
0d613d30088cb0fdf92b795997b16ff358f48913
| 203,778
|
from typing import Sequence
def is_sequence(s):
"""Check for sequence
Args:
s: object to check
Returns:
bool: True if `s` is a sequence (but not a `str`), False otherwise.
"""
return isinstance(s, Sequence) and not isinstance(s, str)
|
1871f5ea513658ad6bf00de817ccf06554f08aa0
| 324,226
|
from textwrap import dedent
def dedent_nodetext_formatter(nodetext, has_options, caller=None):
"""
Just dedent text.
"""
return dedent(nodetext)
|
7a4a06a68470f75eba26aa311b99f85a2859367a
| 42,039
|
import random
def split_train_val_test(annos, randomize=True, splits=(0.6, 0.2, 0.2), seed=1):
"""Convert annotation index into splits based on classes
:param annos: (list) of filepaths
:param randomize: (bool) randomize list
:param splits: (tuple) splits of train, val, test
:param seed: (int) random seed
:returns (dict) of train, val, test lists
"""
if randomize:
random.seed(seed)
random.shuffle(annos)
n_annos = len(annos)
n_train, n_val, n_test = splits
n_train = int(n_annos * n_train)
n_val = int(n_annos * n_val)
result = {
'train': annos[:n_train],
'val': annos[n_train:n_train + n_val],
'test': annos[n_train + n_val:],
}
return result
|
b160710cddafe335ab6698948ecab83c8f6af488
| 163,571
|
def get_env_id(task_type, aircraft) -> str:
"""
Creates an env ID from the environment's components
:param task_type: Task class, the environment's task
:param aircraft: Aircraft namedtuple, the aircraft to be flown
"""
return f'JSBSim-{task_type.__name__}-{aircraft.name}-v0'
|
f119690037861f880a7ef341be75b2de0adff519
| 560,600
|
def strip_quotes(arg):
""" Strip outer quotes from a string.
Applies to both single and double quotes.
:param arg: str - string to strip outer quotes from
:return str - same string with potentially outer quotes stripped
"""
quote_chars = '"' + "'"
if len(arg) > 1 and arg[0] == arg[-1] and arg[0] in quote_chars:
arg = arg[1:-1]
return arg
|
97c8cefe2d0de2b3cecb8b6ed94a40ce4be89b94
| 37,134
|
from typing import Iterable
from typing import Callable
from typing import Any
from typing import Generator
def argzip(sequence: Iterable[Callable], *args: Any) -> Generator:
"""
Similar to zip, but instead of zipping iterables,
It zips an argument(s) with all the values of the iterable.
for example.
>>> list(argzip([1,2,3,4], "number"))
[(1, 'number'), (2, 'number'), (3, 'number'), (4, 'number')]
>>> list(argzip([1,2,3,4], "number", "int"))
[(1, 'number', 'int'), (2, 'number', 'int'), (3, 'number', 'int'), (4, 'number', 'int')]
"""
return ((a, *args) for a in sequence)
|
f93a84298559eee1565d9f3c4537cc83be00738c
| 535,358
|
import re
def _parse_string(s):
"""parse strings such as '1^ 2' into a term form ((1, 1), (2, 0))"""
s = s.strip()
if s == "":
return ()
s = re.sub(" +", " ", s)
terms = s.split(" ")
processed_terms = []
for term in terms:
if term[-1] == "^":
dagger = True
term = term[:-1]
else:
dagger = False
orb_nr = int(term)
processed_terms.append((orb_nr, int(dagger)))
return tuple(processed_terms)
|
cfea06ee4f6a7067a0d95ca03f7864b76189005a
| 457,161
|
def parse_int(token):
"""Convert a token of a integer literal to its integer value.
>>> parse_int("100")
100
>>> parse_int("0xA")
10
"""
if token.startswith("0x"):
return int(token, base=16)
elif token.startswith("0o"):
return int(token, base=8)
else:
return int(token)
|
ec73eedf190226ba68519f645f4043d256726a46
| 194,292
|
def scapozza(rho):
"""
Compute Young's modulus (MPa) from density (kg/m^3).
Arguments
---------
rho : float or ndarray
Density (kg/m^3).
Returns
-------
E : float or ndarray
Young's modulus (MPa).
"""
rho = rho*1e-12 # Convert to t/mm^3
rho0 = 917e-12 # Desity of ice in t/mm^3
E = 5.07e3*(rho/rho0)**5.13 # Young's modulus in MPa
return E
|
866d4775a5c251998e4dc07205346093e0ac5095
| 403,722
|
def make_extended_read_name(original_name: str, probe: str, umi: str) -> str:
"""Generate read name that contains probe name and UMI."""
assert len(original_name) > 0
assert len(probe) > 0
return '%s:::pr_%s:::umi_%s' % (original_name, probe, umi)
|
a66f76229e3378fa7acbb307163e010f41bd3a9c
| 207,520
|
def file_to_set(file_name):
"""Read a file and convert each line to set of items
Args:
file_name (str): name of file
Returns:
results (set): set of words from file
"""
results = set()
with open(file_name, 'rt') as f:
for line in f:
results.add(line.replace('\n', ''))
return results
|
389646aa4030b1c1992418c5cf6b0e558bc00736
| 112,185
|
import cmath
import math
def robustlog(x):
"""Returns log(x) if x > 0, the complex log cmath.log(x) if x < 0,
or float('-inf') if x == 0.
"""
if x == 0.:
return float('-inf')
elif type(x) is complex or (type(x) is float and x < 0):
return cmath.log(x)
else:
return math.log(x)
|
718db8bb90905eb7e0ea7f82fc4d42efc99503be
| 268,065
|
def normalize(image):
""" Normalize image along channel axis.
Args:
image: Image array.
Returns: Normalized image.
"""
# std and mean are dataset specific
std = [0.25472827, 0.25604966, 0.26684684]
mean = [0.48652189, 0.50312634, 0.44743868]
new_image = image / 255.
new_image = (new_image - mean) / std
return new_image
|
01ee94d6796c92b03e8792844e7e2481724e5627
| 395,920
|
def WaitEvent(event):
"""Waits for an event without timeout, without blocking signals.
event.wait() masks all signals until the event is set; this can be used
instead to make sure that the signal is delivered within 100 ms.
Returns:
True if the event is set (i.e., always, since there is no timeout). This
return value is used so that this method behaves the same way as
event.wait().
"""
while not event.is_set():
event.wait(0.1)
return True
|
e5eec03edc59dbaf62ddd8ef941450e3a497deca
| 526,480
|
import math
def get_photon_density_gaussian(
elec_x, elec_y, elec_z, ct, photon_n_lab_max, inv_laser_waist2,
inv_laser_ctau2, laser_initial_z0, gamma_boost, beta_boost ):
"""
Get the photon density in the scattering Gaussian laser pulse,
at the position of a given electron, and at the current time.
Parameters
----------
elec_x, elec_y, elec_z: floats
The position of the given electron (in the frame of the simulation)
ct: float
Current time in the simulation frame (multiplied by c)
photon_n_lab_max: float
Peak photon density (in the lab frame)
(i.e. at the peak of the Gaussian pulse)
inv_laser_waist2, inv_laser_ctau2, laser_initial_z0: floats
Properties of the Gaussian laser pulse (in the lab frame)
gamma_boost, beta_boost: floats
Properties of the Lorentz boost between the lab and simulation frame.
Returns
-------
photon_n_sim: float
The photon density in the frame of the simulation
"""
# Transform electrons coordinates from simulation frame to lab frame
elec_zlab = gamma_boost*( elec_z + beta_boost*ct )
elec_ctlab = gamma_boost*( ct + beta_boost*elec_z )
# Get photon density *in the lab frame*
photon_n_lab = photon_n_lab_max * math.exp(
- 2*inv_laser_waist2*( elec_x**2 + elec_y**2 ) \
- 2*inv_laser_ctau2*(elec_zlab - laser_initial_z0 + elec_ctlab)**2 )
# Get photon density *in the simulation frame*
photon_n_sim = gamma_boost*photon_n_lab*( 1 + beta_boost)
return( photon_n_sim )
|
8766a342fcfe6b03321de32a50efe1641426d9d9
| 118,588
|
def iterate_ngrams(text, n):
"""Generator to yield ngrams in ``text``.
Example:
>>> for ngram in iterate_ngrams("example", 4):
... print(ngram)
exam
xamp
ampl
mple
Args:
text (str): text to iterate over
n (int): size of window for iteration
Returns:
Generator expression to yield the next ngram in the text
Raises:
ValueError: If n is non positive
"""
if n <= 0:
raise ValueError("n must be a positive integer")
return [text[i: i + n] for i in range(len(text) - n + 1)]
|
78454188e265f23eb255cedddc55c53955e49734
| 499,864
|
def encode_int(i):
"""Encode an integer for a bytes database."""
return bytes(str(i), "utf-8")
|
96f405d02c1fd53316ad9b81cfc8e5eceac453f1
| 648,463
|
def compose_ngrams_regex(emoji_regex):
""" Compose a regex expression to parse out Ngrams
:param emoji_regex: a regular expression that matches emojis
:return: a compiled regular expression that matches ngrams
"""
pattern = '(' + emoji_regex
pattern += r"|(&\S+;)"
pattern += r"|(https?:\/\/\w+\.\S+)"
pattern += r"|((?:\b|[@\#\$\£\¥\¢]|[\u20a0-\u20cf])[\u20a0-\u20cf\$\£\¥\¢\w\@\.\#\‘\’\'\&\:\,\]\*\-\/\[\=]+([\'\']|\b))"
pattern += r"|([\-\.]+)"
pattern += r"|(\S)" + ')'
return pattern
|
fb5a138286bf4d9717da01d6fc6053901e7c6bd9
| 638,224
|
def translate_backend_state_name(state: str):
"""
Translate state-machine state from backend into actual state name.
:param state: new state given from the backend
:return: translated state for state_machine
"""
switcher = {
'installation': 'waitingForOvershoot',
'blinking': 'blinking',
'sensing': 'waitingForOvershoot',
'custom1': 'waitingForOvershoot',
'custom2': 'waitingForOvershoot',
'custom3': 'bootloader'
}
return switcher.get(state, 'error')
|
f7d5ed2bfacc0b105059b5dcbd54baf702b6d336
| 536,202
|
def cubic_hermite_spline(x, p0, m0, p1, m1):
"""The third order polynomial p(x) with p(0)=p0, p'(0)=m0, p(1)=p1, p'(1)=m1."""
a3 = m0 + m1 + p0 + p0 - p1 - p1
a2 = p1 - a3 - m0 - p0
return p0 + x * (m0 + x * (a2 + x * a3))
|
a51ec24914c6926dde893aa78ba7abca1ab29e54
| 459,142
|
def nodestrength(mtx, mean=False):
"""
Compute the node strength of a graph.
Parameters
----------
mtx : numpy.ndarray
A matrix depicting a graph.
mean : bool, optional
If True, return the average node strength along the last axis of mtx.
Returns
-------
numpy.ndarray
The node strength.
"""
ns = abs(mtx).sum(axis=0)
if mean:
ns = ns.mean(axis=-1)
return ns
|
0b8a30a6b1ab2218368c0af0f6bf8036b819d431
| 18,723
|
import re
def prepare_defines(defines, fmt, excluded = None):
"""Prepare defines for a C++ or Assembly program.
"""
lines = []
for name, value in defines.items():
if excluded and re.match(excluded + '$', name):
continue
try:
lines.append(fmt % (name, value))
except TypeError:
pass
return '\n'.join(lines)
|
6d1713c2aa82ea711cd976f7078d8d6f36759de1
| 391,297
|
def max_in(values):
"""
Returns the largest value in `values`,
or `None` if the input list is empty.
"""
max_value = None
for v in values:
if max_value is None or v > max_value:
max_value = v
return max_value
|
4672ca12b61c91332a05f705661907ef96d0bb34
| 480,009
|
import re
def parse_countdown_length(string):
"""Given a countdown length string, return the number of seconds
to run for.
Raises a ValueError if the string cannot be parsed.
"""
match = re.match(
r'^(?:(?P<hours>[0-9]+)h\s*)?'
r'(?:(?P<minutes>[0-9]+)m\s*)?'
r'(?:(?P<seconds>[0-9]+)s)?$',
string.strip().lower(),
)
if not match:
raise ValueError('Unable to parse countdown length %r' % string)
seconds = 0
if match.group('hours'):
seconds += int(match.group('hours')) * 60 * 60
if match.group('minutes'):
seconds += int(match.group('minutes')) * 60
if match.group('seconds'):
seconds += int(match.group('seconds'))
return seconds
|
4ccc705050363668544bae7fba0ca8eb36a0b864
| 315,886
|
def convert_bin_to_int(list_string_col):
"""
Convert a list of strings to a list of integers
Args:
list_string_col: list of strings (characters 0 or 1)
Returns:
list of integer
"""
int_from_string_col = [int(x, 2) for x in list_string_col]
return int_from_string_col
|
af03589bf06210cac28d2125ab014f50a893a54e
| 315,209
|
def default(value, default):
"""
Return `default` is `value` is :data:`None`, otherwise return `value`.
"""
if value is None:
return default
return value
|
517ffb3c6f67ad9290d8c44be5bd54a90bc4e37c
| 52,995
|
from typing import Callable
from typing import List
from typing import Tuple
from typing import Type
import inspect
def parameters(method: Callable) -> List[Tuple[str, Type]]:
"""
Inspect the desired method extracting a list of parameters needed
to execute it.
Parameters:
method: The type.Callable that will be inspected
Returns:
A list of Tuples with the parameter name and its annotation
"""
output: List[Tuple[str, Type]] = list()
if callable(method):
spec: inspect.FullArgSpec = inspect.getfullargspec(method)
if spec.args:
for index in range(0, len(spec.args)):
arg: str = spec.args[index]
if arg == 'self' or arg == 'cls':
continue
output.append((arg, spec.annotations[arg]))
return output
|
7cb60dfb1ec9bc0adb4d5753699e9d955d3a9516
| 576,527
|
import math
def quaternion_to_euler_list(quaternion:list, degrees = False, axis_qw=0, axis_front=1, axis_left=2, axis_up=3):
"""
Returns the euler representation of a quaternion [qw, qi, qj, qk] into
yaw = [-pi, pi]
pitch = [-pi/2, pi/2]
roll = [-pi, pi]
If the quaternion is not in the order [qw, qi, qj, qk], you can specify these variables to set the index of the dimension:
- `axis_qw` > Contains the scalar factor of the quaternion
- `axis_front` > Contains the axis indicating the POSITIVE-FRONT in the coordinate reference system
- `axis_left` > Contains the axis indicating the POSITIVE-LEFT in the coordinate reference system
- `axis_up` > Contains the axis indicating the POSITIVE-UP in the coordinate reference system
Based on Eq 2.9 of the Technical report in:
https://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=08A583E84796E221D446200475B7841A?doi=10.1.1.468.5407&rep=rep1&type=pdf
:param quaternion: Unit quaternion representation
:type quaternion: list of length 4
:return: [yaw, pitch, roll] in radians, or degrees if `degrees=True`
:rtype: list of length 3
"""
qr = quaternion[axis_qw]
qi= quaternion[axis_front]
qj = quaternion[axis_left]
qk = quaternion[axis_up]
# Squares of vector components to speed-up calculations
sqi = qi*qi
sqj = qj*qj
sqk = qk*qk
### Euler angles
# roll (x-axis rotation)
sinr_cosp = 2 * (qr * qi + qj * qk)
cosr_cosp = 1 - 2 * (sqi + sqj)
roll = math.atan2(sinr_cosp, cosr_cosp)
# pitch (y-axis rotation)
sinp = 2 * (qr * qj - qk * qi)
# Evaluates singularity at +/- 90 degrees to prevent GIMBAL LOCK. Happening at north/south pole.
pitch = 0
if (abs(sinp) >= 1):
print("GIMBAL LOCK!! with quaternion", quaternion)
pitch = math.copysign(math.pi / 2, sinp) # use 90 degrees if out of range
else:
pitch = math.asin(sinp)
# yaw (z-axis rotation)
siny_cosp = 2 * (qr * qk + qi * qj)
cosy_cosp = 1 - 2 * (sqj + sqk)
yaw = math.atan2(siny_cosp, cosy_cosp)
# Transform from radians to degrees
factor = 1
if(degrees): ## CONVERT FROM RADIANS TO DEGREES
factor = 180.0 / math.pi
return [yaw*factor, pitch*factor, roll*factor]
|
92eda45932a20045ca578fcf5696bb567f32d330
| 489,780
|
def mock_context_one_device_read_endpoint_no_write(mock_context_class, mock_device,
mock_interface_settings_endpoint_factory,
valid_read_endpoint_address):
"""
Fixture that yields a mock USB context that yields one device that matches on
class, subclass, and protocol but only has a valid read endpoint.
"""
settings = mock_interface_settings_endpoint_factory(valid_read_endpoint_address)
mock_device.iterSettings.return_value = [settings]
mock_context_class.getDeviceList.return_value = [mock_device]
return mock_context_class
|
052e4d1748a17fc77cbce178ee57792a13a579a3
| 427,089
|
def valid_page_name(page):
"""
Checks for valid mainspace Wikipedia page name
Args:
page: The page name to validate
Returns:
True if `page` is valid, False otherwise
"""
NON_MAINSPACE = ['File:',
'File talk:',
'Wikipedia:',
'Wikipedia talk:',
'Project:',
'Project talk:',
'Portal:',
'Portal talk:',
'Special:',
'Help:',
'Help talk:',
'Template:',
'Template talk:',
'Talk:',
'Category:',
'Category talk:']
return all(not page.startswith(non_main) for non_main in NON_MAINSPACE)
|
5a6ab243b70294db3d61903168444f531521c86c
| 561,796
|
from typing import Tuple
def get_sift_bin_ksize_stride_pad(patch_size: int, num_spatial_bins: int) -> Tuple:
"""Returns a tuple with SIFT parameters, given the patch size
and number of spatial bins.
Args:
patch_size: (int)
num_spatial_bins: (int)
Returns:
ksize, stride, pad: ints
"""
ksize: int = 2 * int(patch_size / (num_spatial_bins + 1))
stride: int = patch_size // num_spatial_bins
pad: int = ksize // 4
out_size: int = (patch_size + 2 * pad - (ksize - 1) - 1) // stride + 1
if out_size != num_spatial_bins:
raise ValueError(
f"Patch size {patch_size} is incompatible with \
requested number of spatial bins {num_spatial_bins} \
for SIFT descriptor. Usually it happens when patch size is too small\
for num_spatial_bins specified"
)
return ksize, stride, pad
|
e05426294afbc3c2a0c2a06d6b30903e72b25c39
| 465,506
|
def pltime(self, tmin="", tmax="", **kwargs):
"""Defines the time range for which data are to be displayed.
APDL Command: PLTIME
Parameters
----------
tmin
Minimum time (defaults to the first point stored).
tmax
Maximum time (defaults to the last point stored).
Notes
-----
Defines the time (or frequency) range (within the range stored) for
which data are to be displayed. Time is always displayed in the Z-axis
direction for 3-D graph displays. If XVAR = 1, time is also displayed
in the X-axis direction and this control also sets the abscissa scale
range.
"""
command = f"PLTIME,{tmin},{tmax}"
return self.run(command, **kwargs)
|
c2d65b2cb221f83972c9fbe9ba615266a308550f
| 106,789
|
def rpc_plugins_list(handler):
"""
Return information regarding enabled plugins in the server.
:return: A dictionary representing enabled plugins and their meta-data.
:rtype: dict
"""
plugin_manager = handler.server.plugin_manager
plugins = {}
for _, plugin in plugin_manager:
plugins[plugin.name] = {
'description': plugin.formatted_description,
'name': plugin.name,
'title': plugin.title,
'version': plugin.version
}
return plugins
|
d534e3ba9379947821fc2a49b0439b4a4269d9ff
| 20,289
|
from typing import List
def get_effective_lindbladian_object_names() -> List[str]:
"""Return the list of valid effective-lindbladian-related object names."""
names = []
names.append("hamiltonian_vec")
names.append("hamiltonian_mat")
names.append("effective_lindbladian_mat")
names.append("effective_lindbladian")
return names
|
7c5f63e2ab48fd9cb6e0f53fcab895261311e5b5
| 31,601
|
def calculate_chunk_slices(items_per_chunk, num_items):
"""Calculate slices for indexing an adapter.
Parameters
----------
items_per_chunk: (int)
Approximate number of items per chunk.
num_items: (int)
Total number of items.
Returns
-------
list of slices
"""
assert items_per_chunk > 0
assert num_items > 0
return [slice(i, min(i + items_per_chunk, num_items))
for i in range(0, num_items, items_per_chunk)]
|
fd42f9be9149d7a59498e17bb76329a8e96a6ae7
| 654,976
|
import csv
def get_urls_from_data(data_path: str, sample):
"""
Finds and returns a list of URLs from the recipes.csv dataset.
:param data_path: path to the input csv list
:param sample: if given, the function will return only n=sample urls
:return: list of urls
"""
bbc_urls = []
all_recipes_urls = []
with open(data_path) as csv_file:
csv_reader = csv.reader(csv_file)
for index, row in enumerate(csv_reader):
if sample > 0:
if bbc_urls.__len__() < sample and "www.bbcgoodfood.com" in row[0]:
bbc_urls.append(row[0])
elif "www.bbcgoodfood.com" in row[0]:
bbc_urls.append(row[0])
return {
"bbc": bbc_urls,
"all_recipes": all_recipes_urls
}
|
7eb492e57f7060ca3c4384147fa792ef72c21fef
| 468,425
|
def exists(dict, key):
"""
Check if a key exists in a dict
"""
return key in dict.keys()
|
3dd531b8a13af2e8905f721e05756bd8184e24c4
| 696,741
|
def _Shorten(s, prefixofs, suffixofs, maxlen):
"""Shorten the given string if its length is >= maxlen.
Note: maxlen should generally be considerably bigger than
prefixofs + suffixofs. It's disconcerting to a reader when
you have a "..." to replace 10 bytes, but it feels fine when the
"..." replaces 500 bytes.
Args:
s: the string to shorten.
prefixofs: the number of chars to keep at the beginning of s.
suffixofs: the number of chars to keep at the end of s.
maxlen: if the string is longer than this, shorten it.
Returns:
A shortened version of the string.
"""
s = str(s)
if len(s) >= maxlen:
# When the string exceeds the limit, we deliberately shorten it to
# considerably less than the limit, because it's disconcerting when
# you have a "..." to replace 10 bytes, but it feels right when the
# "..." replaces 500 bytes.
s = s[0:prefixofs] + '.....' + s[-suffixofs:]
return s
|
00dbbf636e3aa39af8cf73984fbffec33b454349
| 54,281
|
def cal_origin_responsetime(dfs):
"""
calculates origin_responsetime as,
sum("turnaroundtimemsec")
where cachestatus == 0 and cacherefreshsrc == 'origin'
sample output,
```
"origin_response_time": 0,
```
"""
return int(
dfs.query("cachestatus == 0 and cacherefreshsrc == 'origin'")[
"turnaroundtimemsec"
].sum()
)
|
1150635d20916f97a731c4bb6e513c34b33e59f2
| 88,255
|
def getSancaiWuxing(x_ge):
"""
根据天格、人格、地格计算得出五行属性
尾数为1,2五行为木;尾数为3,4五行为火;尾数为5,6五行为土;尾数为7,8五行为金;尾数为9,0五行为水
:param x_ge: x格
:return: 五行
"""
wuxing = ''
if (x_ge % 10) in [1, 2]:
wuxing = '木'
elif (x_ge % 10) in [3, 4]:
wuxing = '火'
elif (x_ge % 10) in [5, 6]:
wuxing = '土'
elif (x_ge % 10) in [7, 8]:
wuxing = '金'
elif (x_ge % 10) in [9, 0]:
wuxing = '水'
return wuxing
|
047b25d00630d6d1efd695ff03f8733b6f70af64
| 270,677
|
def is_entry_a_header(key, value, entry):
"""Returns whether the given entry in the header is a expected header."""
return (key.lower() in entry.lower() or
value.lower() in entry.lower())
|
c0b8b0acf3231939dd92f13ed19829916dba647a
| 426,301
|
def create_model_dict(input_channels, num_classes=[4, 1], name='2d'):
"""
Creates `model_dict` dictionary from parameters.
Parameters
----------
input_channels: int
1 indicates gray-channle image, 3 indicates RGB image.
num_classes: list
[4, 1] -> 4 indicates offset in x, offset in y, margin in x, margin in y; 1 indicates seediness score
name: string
"""
model_dict = {
'name': 'branched_erfnet' if name=='2d' else 'branched_erfnet_3d',
'kwargs': {
'num_classes': num_classes,
'input_channels': input_channels,
}
}
print(
"`model_dict` dictionary successfully created with: \n -- num of classes equal to {}, \n -- input channels equal to {}, \n -- name equal to {}".format(
input_channels, num_classes, name))
return model_dict
|
00c57affbb449847cf7edb2ef55778c13ea031d9
| 147,412
|
def permutation_as_config_number( p ):
"""
A numeric representation of a numeric list.
Example:
>>> permutation_as_config_number( [ 1, 1, 0, 0, 1 ] )
11001
"""
tot = 0
for num in p:
tot *= 10
tot += num
return tot
|
9cc81dd7d45cc42a654014f6007746ad3da1492e
| 525,377
|
from typing import Sequence
import random
def gen_rhythm(
beats: int = 4, note_values: Sequence[int] = (8, 6, 4, 3, 2, 1)
) -> Sequence[int]:
"""Generates a rhythm of the given length with the given note values.
Args:
beats: Length of the rhythm to generate in quarter notes (default 4)
note_values: The note values in 8th notes to use when generating the
rhythm. For instance, a quarter note's note value would be 2 (two
8th notes). (default (8, 6, 4, 3, 2, 1))
Returns:
A sequence of notes by their values in 8ths. For example, [1, 3] would
be an 8th note followed by a dotted quarter note.
Raises:
ValueError: It may not be possible to generate a rhythm of a given
length given specific note values. If that happens, this error
is raised.
"""
iter_max = 500
notes = [0]
eighths = beats * 2
current_eighths = 0
iter_counter = 0
while current_eighths != eighths:
iter_counter += 1
if iter_counter > iter_max:
raise ValueError(
"Given note values cannot create rhythm of the given length"
)
current_eighths -= notes.pop()
while current_eighths < eighths:
note_val = random.choice(note_values)
notes.append(note_val)
current_eighths += note_val
return notes
|
081d860f801709dca856a52f213faff67ec9cf0d
| 219,746
|
def bbands_inside_kchannels(dataframe_name):
"""Creates signals for long position
Args:
dataframe_name (df): Dataframe containing indicator data for Bollinger Bands and Keltner Channels
Returns:
A dataframe of:
original data passed to function,
bbkc_squeeze (flt): Signals column (1.0 = True, 0.0 = False)
"""
# Create signal for bollinger band is inside keltner channel
selection = dataframe_name.loc[((dataframe_name['bb_upper'] < dataframe_name['kc_upper']) & (dataframe_name['bb_lower'] >= dataframe_name['kc_lower'])), :].index
dataframe_name['bbkc_squeeze'] = 0.0
dataframe_name.loc[selection, 'bbkc_squeeze'] = 1.0
# Return dataframe with features and target
return dataframe_name
|
9f7bbc5da186183eae1e9d1808edbc3d4df0e1e0
| 583,016
|
def cos_angle(left_side: float, right_side: float, third_side: float) -> float:
"""
Calculate cos of angle if give 3 side of triangle
"""
return (left_side ** 2 + right_side ** 2 - third_side ** 2) / (2 * left_side * right_side)
|
7d1de7b032ceb21ede48f8ad55a3b7009e9b9b0a
| 551,652
|
def extract_tracklist_begin_num(content):
"""Return list of track names extracted from messy web content.
The name of a track is defined as a line which begins with a number
(excluding whitespace).
"""
tracklist = []
for line in content.splitlines():
# Empty line
if not line:
continue
# Strip leading and trailing whitespace
line.lstrip()
line.rstrip()
if line[0].isdigit():
tracklist.append(line)
return tracklist
|
7d860fb0ea444ae0d9bd536a4644fa6b1c11a826
| 10,299
|
def edge_list_get_tail_index(edge_list, tail_index):
"""
Takes a list of edges and returns an edge if the tail_index matches the
given index, or None otherwise.
"""
for edge in edge_list :
if edge.tail_index == tail_index :
return edge
return None
|
1d7f55afec3fb9da269d1c45dd111ce05cb10bd5
| 28,575
|
def to_lbs(amount):
"""Converts an amount from grams to lbs rounded to two decimal places"""
return round(amount*.0022, 2)
|
12ec52a2e43ddc9d225281f7d893403fb8050b74
| 312,819
|
import uuid
def is_valid_uuid(val):
"""
Check if a string is a valid uuid
:param val: uuid String
:return: Returns true if is a string is a valid uuid else False
"""
try:
uuid.UUID(str(val))
return True
except ValueError:
return False
|
d04f658d3ae2fa85377e110b0a6716bc34ee9df0
| 9,689
|
def prefix(pattern, k):
"""we define PREFIX(Pattern) as the first (k-1)-mers in a k-mer Pattern"""
return pattern[k - 1:]
|
a5ba03c9710943fd4126b54a3ad95840ac401658
| 163,621
|
def is_palindrome(num):
"""
Returns true if num is a palindrome.
"""
return str(num) == str(num)[::-1]
|
490ca1326254e525bcefb21e7822c82ad730962a
| 40,119
|
from typing import List
def is_subcat(c: List[str], sub_c: List[str]) -> bool:
"""
Return True if sub_c is a subcategory of c.
"""
# if c is more precize category then sub_c can't be subcategory of c.
if len(c) > len(sub_c):
return False
for c_el, x_el in zip(c, sub_c):
if c_el != x_el:
return False
return True
|
0307399f695daf04f0d31a5c0b7d7def5fdcaa3a
| 449,985
|
from functools import reduce
def iterative_hash(s):
"""Compute the has code of the given string as in:
net/traffic_annotation/network_traffic_annotation.h
Args:
s: str
The seed, e.g. unique id of traffic annotation.
Returns: int
A hash code.
"""
return reduce(lambda acc, c: (acc*31 + ord(c)) % 138003713, s, 0)
|
90a1820fc4b389dbed35f0339bac429f3c71baa5
| 537,881
|
def filter_headline(df, non_disaggregation_columns):
"""Given a dataframe filter it down to just the headline data.
In the case of multiple units it will keep all headline for each unit.
"""
special_cols = [col for col in non_disaggregation_columns if col in df.columns]
# Select the non-data rows and filter rows that are all missing (nan)
disag = df.drop(special_cols, axis=1)
headline_rows = disag.isnull().all(axis=1)
headline = df.filter(special_cols, axis=1)[headline_rows]
return headline
|
3e2697a9802a6d5493ea8b81366ca5bf591d17e4
| 71,409
|
def condition(cond, fn, x):
""" Only apply fn if condition is true """
if cond(x):
return fn(x)
else:
return x
|
63283409fe05f5eae26e3c7b55320c5349690515
| 206,158
|
import time
def time_function(function, *args, n=10000, **kwargs):
""" time how long it takes for a function to evaluate n times """
initial_time = time.time();
for _ in range(n):
function(*args, **kwargs);
final_time = time.time();
return final_time - initial_time;
|
c1b094f5ae9f58aaa68dfad10802ac6b9dce4290
| 530,586
|
import torch
def evaluate_sce(confidences: torch.Tensor,
true_labels: torch.Tensor,
n_bins: int = 15) -> float:
"""
Args:
confidences (Tensor): a tensor of shape [N, K] of predicted confidences.
true_labels (Tensor): a tensor of shape [N,] of ground truth labels.
n_bins (int): the number of bins used by the histrogram binning.
Returns:
sce (float): static calibration error of predictions.
"""
ticks = torch.linspace(0, 1, n_bins + 1)
bin_lowers = ticks[:-1]
bin_uppers = ticks[ 1:]
n_objects, n_classes = confidences.size()
sce = torch.zeros(1, device=confidences.device)
for cur_class in range(n_classes):
for bin_lower, bin_upper in zip(bin_lowers, bin_uppers):
cur_class_conf = confidences[:, cur_class]
in_bin = cur_class_conf.gt(
bin_lower.item()
) * cur_class_conf.le(
bin_upper.item()
)
bin_acc = true_labels[in_bin].eq(cur_class)
bin_conf = cur_class_conf[in_bin]
bin_size = torch.sum(in_bin)
if bin_size > 0:
avg_confidence_in_bin = torch.mean(bin_conf.float())
avg_accuracy_in_bin = torch.mean(bin_acc.float())
delta = torch.abs(avg_confidence_in_bin - avg_accuracy_in_bin)
sce += delta * bin_size / (n_objects * n_classes)
return sce.item()
|
cb0ac9e02a0b8d27c5b65b599c71d766268f2993
| 592,654
|
from typing import Tuple
from typing import Callable
import re
import operator
def parse_condition(condition: str) -> Tuple[Callable[[int, int], bool], int]:
"""
Parse a string representing a comparison like 'x < 1', returning
a function representing the operation, and the int value 1,
in the example case.
If a string like '1 > x' is used, it will be converted to 'x < 1' first.
"""
condition_regex = re.compile(r'^(x|\d+) ?(<|>|<=|>=|==|!=) ?(x|\d+)')
matches = condition_regex.match(str(condition))
if not matches:
raise ValueError('Unrecognized condition string')
if matches[1] == 'x':
op, val = matches[2], int(matches[3])
else:
op, val = matches[2], int(matches[1])
op_dict = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
opposite_op = {'<': '>', '<=': '>=', '==': '==',
'>': '<', '>=': '<=', '!=': '!='}
try:
if matches[1] == 'x':
real_op = op_dict[op]
else:
real_op = op_dict[opposite_op[op]]
except KeyError:
raise ValueError('Unrecognized operation "%s"' % op)
return real_op, val
|
55efdc48031b12edabf8890fbec58fefeb0104aa
| 174,929
|
from datetime import datetime
def MicrosecondsToDatetime(microseconds):
"""Returns a datetime given the number of microseconds, or None."""
if microseconds:
return datetime.utcfromtimestamp(float(microseconds) / 1000000)
return None
|
69fd3dc3b8d1a97e7a64037cabe988365b2c6e63
| 709,075
|
def insertion_sort(arr):
"""
My Python implementation of insertion sort
Sorts a list of numbers and returns the sorted list
Time complexity: O(n^2)
Space complexity: O(1)
"""
for i in range(len(arr)):
# Store current value
current_value = arr[i]
# Initialize j to current index
j = i
# Move all previous values greater than current value to the right
while j > 0 and arr[j - 1] > current_value:
arr[j] = arr[j - 1]
j -= 1
# Add current value to the empty slot
arr[j] = current_value
return arr
|
1cd171f10085d28a33db09c13875e447975d2ecd
| 284,360
|
from typing import Dict
from typing import Optional
def format_name(data: Dict, is_recipe=True) -> Optional[str]:
"""
Returns recipe 'externalName' if it exists, otherwise returns 'name'.
Default returns 'name' for non-recipe (ingredient) types if 'name'
exists. If 'name' doesn't exist, returns None.
"""
if data.get('externalName') and is_recipe:
return data['externalName']
return data.get('name') or None
|
c662dc6f131bd1be3fd56bbba5c98bc186a87c12
| 223,114
|
def transpose_batch_sizes(lengths):
"""
Given a list of sequence lengths per batch size (ie for an RNN where sequence lengths vary),
converts this into a list of batch sizes per timestep
:param lengths: Sorted (descending order) list of ints
:return: A list of length lengths[0]
"""
max_len = lengths[0]
length_pointer = len(lengths) - 1
end_inds = []
for i in range(max_len):
while (length_pointer > 0) and lengths[length_pointer] <= i:
length_pointer -= 1
end_inds.append(length_pointer + 1)
return end_inds
|
e24a2a2f73fea0d50693260c0eee960b0507dc72
| 399,625
|
import copy
import random
def staticDepthLimit(max_depth):
"""Implement a static limit on the depth of a GP tree, as defined by Koza
in [Koza1989]. It may be used to decorate both crossover and mutation
operators. When an invalid (too high) child is generated, it is simply
replaced by one of its parents.
This operator can be used to avoid memory errors occuring when the tree
gets higher than 90-95 levels (as Python puts a limit on the call stack
depth), because it ensures that no tree higher than *max_depth* will ever
be accepted in the population (except if it was generated at initialization
time).
:param max_depth: The maximum depth allowed for an individual.
:returns: A decorator that can be applied to a GP operator using \
:func:`~deap.base.Toolbox.decorate`
.. note::
If you want to reproduce the exact behavior intended by Koza, set
the *max_depth* param to 17.
.. [Koza1989] J.R. Koza, Genetic Programming - On the Programming of
Computers by Means of Natural Selection (MIT Press,
Cambridge, MA, 1992)
"""
def decorator(func):
def wrapper(*args, **kwargs):
keep_inds = [copy.deepcopy(ind) for ind in args]
new_inds = list(func(*args, **kwargs))
for i, ind in enumerate(new_inds):
if ind.height > max_depth:
new_inds[i] = random.choice(keep_inds)
return new_inds
return wrapper
return decorator
|
cdcb1e58a681b622ced58e9aa36562e1fedb6083
| 6,104
|
def _collect_column_constraints(column, unique):
"""
Collect constraints for a column.
Use column information as well as unique constraint information.
Note: for a unique constraint on a single column we set
column / constraints / unique = True
(and store all multicolumn uniques in the table realm)
"""
res = {}
if 'null' in column:
res['required'] = not column['null']
for constr_i, constr in enumerate(unique):
if column['column_name'] in constr['fields']:
if len(constr['fields']) == 1:
res['unique'] = True
return res
|
d6e8636e15d45378bc406986df87f694f58e604e
| 521,018
|
def contains_subsets(iter_of_sets):
"""
Checks whether a collection of sets contains any sets which are subsets of
another set in the collection
"""
for si in iter_of_sets:
for sj in iter_of_sets:
if si != sj and set(sj).issubset(si):
return True
return False
|
2b5055f0a31f5f00d975b49b08a4976c3c251fc5
| 17,285
|
def to_cert_dat(key):
"""
Convert input raw key to list
:param key: Input raw key
:return: Key as list format
"""
cert_data = list()
for i in key:
cert_data.append(ord(i))
return cert_data
|
91a3336da9d2abe8055bc98089acb0e0cca527bb
| 523,432
|
def infer_dpdk_machine(user_cflags):
"""Infer the DPDK machine identifier (e.g., 'ivb') from the space-separated
string of user cflags by scraping the value of `-march` if it is present.
The default if no architecture is indicated is 'native'.
"""
arch = 'native'
# `-march` may be repeated, and we want the last one.
# strip features, leave only the arch: armv8-a+crc+crypto -> armv8-a
for flag in user_cflags.split():
if flag.startswith('-march'):
arch = flag[7:].split('+')[0]
MAPPING = {
'native': 'native',
'nehalem': 'nhm',
'westmere': 'wsm',
'sandybridge': 'snb',
'ivybridge': 'ivb',
'armv8-a': 'armv8a',
}
return MAPPING.get(arch, 'native')
|
67257b4aac088581c399bc20d4d4e89ad17c9dae
| 451,689
|
def format_remove_duplicates(text, patterns):
"""Removes duplicated line-basis patterns.
Based on simple pattern matching, removes duplicated lines in a block
of lines. Lines that match with a same pattern are considered as
duplicates.
Designed to be used as a filter function for Jinja2.
Args:
text: A str of multi-line text.
patterns: A list of str where each str represents a simple
pattern. The patterns are not considered as regexp, and
exact match is applied.
Returns:
A formatted str with duplicates removed.
"""
pattern_founds = [False] * len(patterns)
output = []
for line in text.split('\n'):
to_be_removed = False
for i, pattern in enumerate(patterns):
if pattern not in line:
continue
if pattern_founds[i]:
to_be_removed = True
else:
pattern_founds[i] = True
if to_be_removed:
continue
output.append(line)
# Let |'\n'.join| emit the last newline.
if output:
output.append('')
return '\n'.join(output)
|
0547ec746f0d2d8bce916916c2b235de1569886b
| 106,936
|
def to_uint256(value: int) -> bytes:
"""Encodes an unsigned integer as a big endian uint256."""
return value.to_bytes(32, "big")
|
236f5f16d13258ed1ad9bf194b2cebbc14666305
| 611,765
|
def get_index_names(df):
"""
Get names from either single or multi-part index
"""
if df.index.name is not None:
df_index_names = [df.index.name]
else:
df_index_names = list(df.index.names)
df_index_names = [x for x in df_index_names if x is not None]
return df_index_names
|
143f7e86594d39ccb19ab1e4e36cd9933cb07304
| 44,949
|
def floor_log2(x):
""" floor of log2 of abs(`x`)
Embarrassingly, from https://en.wikipedia.org/wiki/Binary_logarithm
Parameters
----------
x : int
Returns
-------
L : None or int
floor of base 2 log of `x`. None if `x` == 0.
Examples
--------
>>> floor_log2(2**9+1)
9
>>> floor_log2(-2**9+1)
8
>>> floor_log2(0.5)
-1
>>> floor_log2(0) is None
True
"""
ip = 0
rem = abs(x)
if rem > 1:
while rem >= 2:
ip += 1
rem //= 2
return ip
elif rem == 0:
return None
while rem < 1:
ip -= 1
rem *= 2
return ip
|
428283df49a4ede4f3a9ab41ccaa8fd7e266109a
| 325,865
|
def fix_ical_datetime_format(dt_str):
"""
ICAL generation gives timezones in the format of 2018-06-30T14:00:00-04:00.
The Timezone offset -04:00 has a character not recognized by the timezone offset
code (%z). The being the colon in -04:00. We need it to instead be -0400
"""
if dt_str and ":" == dt_str[-3:-2]:
dt_str = dt_str[:-3] + dt_str[-2:]
return dt_str
return dt_str
|
640d7222eac4e32b954502e740f537db96fac7f1
| 203,020
|
import random
def des_num_exposures(bands=''):
"""
Sample from the effective number of exposures for DES
"""
# Figure 5 in https://arxiv.org/pdf/1501.02802.pdf
dist = {'g': {'VALUES': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'WEIGHTS': [0.040, 0.113, 0.267, 0.311, 0.178, 0.062, 0.019, 0.007, 0.003]},
'r': {'VALUES': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'WEIGHTS': [0.041, 0.119, 0.284, 0.321, 0.167, 0.046, 0.014, 0.006, 0.002]},
'i': {'VALUES': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'WEIGHTS': [0.043, 0.121, 0.291, 0.334, 0.165, 0.033, 0.009, 0.003, 0.001]},
'z': {'VALUES': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'WEIGHTS': [0.039, 0.106, 0.272, 0.332, 0.183, 0.048, 0.013, 0.005, 0.002]},
'Y': {'VALUES': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'WEIGHTS': [0.034, 0.074, 0.195, 0.305, 0.241, 0.099, 0.035, 0.012, 0.005]}
}
return [random.choices(dist[b]['VALUES'], dist[b]['WEIGHTS'])[0] for b in bands.split(',')]
|
55ab17b4853cdcf952c79698313622aecf4c7915
| 464,288
|
def pick_first_not_none(*values):
"""Returns first of values that is *not* None (or None if all are/were)."""
for val in values:
if val is not None:
return val
return None
|
2b7408236137708687f39b28c5c28d8214337627
| 293,108
|
def scale_down(src_size, size):
"""Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106)
"""
w, h = size
sw, sh = src_size
if sh < h:
w, h = float(w * sh) / h, sh
if sw < w:
w, h = sw, float(h * sw) / w
return int(w), int(h)
|
7ee38185ac72ac8835d0203dc8d63d7db88e3809
| 316,145
|
def _ListTypeFormatString(value):
"""Returns the appropriate format string for formatting a list object."""
if isinstance(value, tuple):
return '({0})'
if isinstance(value, set):
return '{{{0}}}'
return '[{0}]'
|
d535b78c9604566a86df5300af53f4bbe60fd4b8
| 340,157
|
def flat_list(list_):
"""
Flat the nested lists `list_`.
Example:
----------
>>> flat_list([0, 2, [1, 4, 2], [1, 3, 4]])
[0, 2, 1, 4, 2, 1, 3, 4]
"""
# Deprecated realization of the function, as elements may be strings with characters '[' or ']'.
# items = str(list_).replace('[', '').replace(']', '').split(',')
# return list(eval(','.join([x for x in items if x.strip() != ''])))
flattened = []
for x in list_:
if isinstance(x, list):
flattened.extend(flat_list(x))
else: flattened.append(x)
return flattened
|
2bb676d067088afa7698caa0c0b60b8b9a05d973
| 480,132
|
def dict_enter(dct, key, default):
"""Access dictionary entry with a default value"""
if key not in dct:
dct[key] = default
return dct[key]
|
a903b29a34cb5cad511297fee73b0b0f4d3d94ad
| 201,804
|
def sal_res_crain(resistivity, temperature):
"""Return salinity following Crain's petrophysical handbook.
Salinity is calculated as a function of resistivity and temperature.
Note
----
Resistivity OR temperature can be arrays, but not both at the same time.
Parameters
----------
resistivity : float or array
Resistivity in Ohm.m.
temperature : float or array
Temperature in °C.
Returns
-------
salinity : float or array
Salinity in parts per million (ppm).
"""
return 400000/(temperature*9/5 + 32)/resistivity**1.14
|
32de84bf6c74efed730d52531a4078006eb7d7e6
| 161,412
|
def findPathsWithoutStats(cursor):
"""
===========================================================================
Find Path without stats
===========================================================================
**Args**:
* cursor: cursor db connection
**Returns**:
Tuple: ``(id, count)``
"""
cursor.execute("SELECT id, count FROM lmp_logPathsT WHERE duration_avg IS NULL")
return cursor.fetchall()
|
f9bef5e0b4273b605ee3e62e8197d74813e7df99
| 543,991
|
from typing import Union
from typing import List
from typing import Tuple
def list_unpack(some_list: Union[List, Tuple]) -> List:
"""
Функция для распаковки списков. Распаковывает вложенные
списки на один уровень.
Parameters
----------
some_list: list or Tuple
Список c уровнем вложенности равным n
Return
------
new_list: Tuple
Список,с уровнем вложенности n - 1; n - 1 >= 1
Example
-------
>>> some_list = [1, 2, [3, 4], [5, [6]]]
>>> list_unpack(some_list)
(1, 2, 3, 4, 5, [6])
"""
new_list = []
for element in some_list:
if isinstance(element, (list, tuple)):
for i in element:
new_list.append(i)
else:
new_list.append(element)
return new_list
|
868cdbb6bf9d26c3c0af0d36f58e7b2572ad7b2b
| 228,919
|
def blocks_to_table(blocks, block_struct):
"""A function that takes a codon table represented in block
structure form and returns the representation as a traditional codon
table
Parameters
----------
dict blocks: a python dict representing the codon table in block form
dict block_struct: a python dict representing the table block structure
Returns
-------
dict table: a python dict representing the codon table
bool False: an "exception" if input table does not match block_struct
"""
# declare table to return
table = {}
# loop over blocks in block_struct and assign to table using blocks
for block_ind, codon_list in block_struct.items():
block_aa = blocks[block_ind]
for codon in codon_list:
table[codon] = block_aa
# return filled codon table
return table
|
8f99a5483ab4a3c508d542c492fb89f1c89657f1
| 133,889
|
import torch
def apply_linear_constraint(lin, a, x, *args, inequality=False, v=None,
**kwargs):
"""Apply a linear constraint
Parameters
----------
lin : Callable
the linear constraint of the form lin(x, *args, **kwargs) = a that is linear in x
inequality : bool
assume that lin(x, *args, **kwargs) >= a
Returns
-------
y : Tensor
x transformed to satisfy the constraint
"""
val_x = lin(x, *args, **kwargs)
# use a constant adjustment
# x - alpha * v
if v is None:
v = torch.ones(x.size(-1))
val_v = lin(v, *args, **kwargs)
alpha = (val_x - a)
if inequality:
alpha = alpha.clamp(max=0)
return x - v * (alpha / val_v)
|
e46de92b38a641373056ac5965120f84a35f7b70
| 94,565
|
def overlaps(df, idx):
"""
Check if the note at the given index in the given dataframe overlaps any
other notes in the dataframe.
Parameters
----------
df : pd.DataFrame
The DataFrame to check for overlaps.
idx : int
The index of the note within df that might overlap.
Returns
-------
overlap : boolean
True if the note overlaps some other note. False otherwise.
"""
note = df.loc[idx]
df = df.loc[
(df["pitch"] == note.pitch) & (df["track"] == note.track) & (df.index != idx)
]
overlap = any(
(note.onset < df["onset"] + df["dur"]) & (note.onset + note.dur > df["onset"])
)
return overlap
|
382c21c7b2232b40ce7c563d677afa8a70f5bfcc
| 44,038
|
def empirical_power(n_tp, n_at):
"""Computer empirical power.
Input arguments:
================
n_tp : int
The observed number of true positives.
n_at : int
The number of hypotheses for which the alternative is true.
Output arguments:
=================
epwr : float
Empirical power.
"""
"""Compute the empirical power. Check that the result is in [0, 1]."""
pwr = float(n_tp) / float(n_at)
if ((pwr > 1) | (pwr < 0)):
raise Exception('Invalid input parameters!')
return pwr
|
e95e99376401b8a520a44b01412503ca4dc16cf4
| 159,361
|
def _get_plugin_id_set(plugin_info_list):
"""
Return a set with the ids from plugin info list.
"""
return {plugin_info.id for plugin_info in plugin_info_list}
|
c816854edd31c6d3a6a8e25b90193c776914bf80
| 212,906
|
def make_sagemaker_input_data_configs(data_channels: list):
"""restructures a list of data names as sagemaker input data configs"""
return {name: {} for name in data_channels}
|
6ca5a8178ac2e8f8bdc9f07709d32427be395197
| 132,535
|
import time
def datetime_to_timestamp(d):
"""convert a datetime object to seconds since Epoch.
Args:
d: a naive datetime object in default timezone
Return:
int, timestamp in seconds
"""
return int(time.mktime(d.timetuple()))
|
356ac090b0827d49e9929a7ef26041b26c6cc690
| 709,688
|
from typing import OrderedDict
def dedupedlist(mylist):
"""Dedupe a list"""
return list(OrderedDict.fromkeys(mylist))
|
f51ddf538a226ab22c9dfcda1335a7c025b8a4df
| 584,999
|
def vehicle_type_and_mav_sys_id(vehicle_id, vehicle_color):
"""Get the vehicle_type and mav_sys_id from the vehicle's id and color."""
# valid MAV_SYS_IDs 1 to 250
# the first 25 vehicles per team are iris
# the second 25 vehicles per team are plane (delta_wing)
vehicle_type = 'iris' if vehicle_id <= 25 else 'delta_wing'
# BLUE uses 1 to 50
# GOLD uses 101 to 150
mav_sys_id = vehicle_id
if vehicle_color == 'gold':
mav_sys_id += 100
return vehicle_type, mav_sys_id
|
0f2f415f4242f8586e46b542380c280989fb6ec1
| 604,502
|
def qiskit_1qb(qc):
"""
Returns the list of the qiskit gate methods that affect 1 qubit and
take no parameter.
"""
return [qc.x, qc.y, qc.z, qc.iden, qc.s, qc.t, qc.sdg, qc.tdg, qc.h]
|
3746023802daac90d13b9ce00f803fed310a98e0
| 515,941
|
from typing import Callable
from typing import Any
def lazy_property(f: Callable):
"""Decorator that makes a property lazy-evaluated.
Args:
f: the function to convert to a lazy property.
"""
attr_name = '_lazy_' + f.__name__
@property
def _lazy_property(self) -> Any:
if not hasattr(self, attr_name):
setattr(self, attr_name, f(self))
return getattr(self, attr_name)
return _lazy_property
|
45486f2ed0dba328d2dd3c727ec2a733ee8e704b
| 284,372
|
import pickle
def _to_pickle(data):
"""Dump data into a PICKLE string."""
return pickle.dumps(data)
|
6bd05b95445b67d16b44d9beecd19ff961d5c1c4
| 205,189
|
def get_intel_doc_label_item(intel_doc_label: dict) -> dict:
""" Gets the relevant fields from a given intel doc label.
:type intel_doc_label: ``dict``
:param intel_doc_label:
The intel doc label obtained from api call
:return: a dictionary containing only the relevant fields.
:rtype: ``dict``
"""
return {
'ID': intel_doc_label.get('id'),
'Name': intel_doc_label.get('name'),
'Description': intel_doc_label.get('description'),
'IndicatorCount': intel_doc_label.get('indicatorCount'),
'SignalCount': intel_doc_label.get('signalCount'),
'CreatedAt': intel_doc_label.get('createdAt'),
'UpdatedAt': intel_doc_label.get('updatedAt'),
}
|
578467798f08bfd0776aa285790dc95d4686a830
| 45,731
|
def check_bounds(fit_result):
""" Check if the obtained fit result is within bounds"""
for param in fit_result.model.params:
value = fit_result.params[param.name]
if value < param.min:
return False
elif value > param.max:
return False
return True
|
d2b40665c7265c0acf1ca933411b31f661ec76c0
| 578,213
|
def process_uri(uri, tls=False, user=None, password=None):
"""
Extracts hostname, protocol, user and passworrd from URI
Parameters
----------
uri: str or None
Connection string in the form username:password@hostname:port
tls: boolean
Whether TLS is enabled
username: str or None
Username if not supplied as part of the URI
password: str or None
Password if not supplied as part of the URI
"""
if '://' in uri:
protocol, uri = uri.split('://')
else:
protocol = 'grpc+tls' if tls else 'grpc+tcp'
if '@' in uri:
if user or password:
raise ValueError(
"Dremio URI must not include username and password "
"if they were supplied explicitly."
)
userinfo, hostname = uri.split('@')
user, password = userinfo.split(':')
elif not (user and password):
raise ValueError(
"Dremio URI must include username and password "
"or they must be provided explicitly."
)
else:
hostname = uri
return protocol, hostname, user, password
|
a1c2f4e6665dd7762a3fc9f8712a4b0f1fef8e76
| 403,776
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.