content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def freq_for_shape(freq, shape):
"""
Given a base frequency as int, generate noise frequencies for each spatial dimension.
:param int freq: Base frequency
:param list[int] shape: List of spatial dimensions, e.g. [height, width]
"""
height = shape[0]
width = shape[1]
if height == width:
return [freq, freq]
elif height < width:
return [freq, int(freq * width / height)]
else:
return [int(freq * height / width), freq]
|
54fb5f2ccac34db1531a8ecea2a79eee89fffe44
| 52,569
|
from typing import Tuple
def get_feat_sizes(image_size: Tuple[int, int],
max_level: int):
"""Get feat widths and heights for all levels.
Args:
image_size: A tuple (H, W).
max_level: maximum feature level.
Returns:
feat_sizes: a list of tuples (height, width) for each level.
"""
feat_sizes = [{'height': image_size[0], 'width': image_size[1]}]
feat_size = image_size
for _ in range(1, max_level + 1):
feat_size = ((feat_size[0] - 1) // 2 + 1, (feat_size[1] - 1) // 2 + 1)
feat_sizes.append({'height': feat_size[0], 'width': feat_size[1]})
return feat_sizes
|
03570cac56616a8f67484f39405e19259f2fa9f1
| 52,573
|
from typing import List
def precision_recall(fits: List, labels: List, target)->List[float]:
"""
Computes the precision and recall and F1 score
for an individual class label 'target',
which can be any object with an equivalence relation via ==
:param fits:
:param labels:
:param target:
:return:
"""
N = len(labels)
# Generate the counts of true and false positives
true_positives = len([True for i in range(N)
if (fits[i] == target and labels[i] == target)])
false_positives = len([True for i in range(N)
if (fits[i] == target and labels[i] != target)])
false_negatives = len([True for i in range(N)
if (fits[i] != target and labels[i] == target)])
if true_positives == 0:
return [0, 0, 0]
precision = true_positives / (true_positives + false_positives)
recall = true_positives / (true_positives + false_negatives)
f1 = 2.0 * precision * recall / (precision + recall)
return [precision, recall, f1]
|
cebea34ba7927faad0558c84763ed6ff7a4ab886
| 52,574
|
import torch
def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor:
"""Convert 3d vector of axis-angle rotation to 4x4 rotation matrix
Args:
angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations.
Returns:
torch.Tensor: tensor of 4x4 rotation matrices.
Shape:
- Input: :math:`(N, 3)`
- Output: :math:`(N, 4, 4)`
Example:
>>> input = torch.rand(1, 3) # Nx3
>>> output = kornia.angle_axis_to_rotation_matrix(input) # Nx4x4
"""
def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6):
# We want to be careful to only evaluate the square root if the
# norm of the angle_axis vector is greater than zero. Otherwise
# we get a division by zero.
k_one = 1.0
theta = torch.sqrt(theta2)
wxyz = angle_axis / (theta + eps)
wx, wy, wz = torch.chunk(wxyz, 3, dim=1)
cos_theta = torch.cos(theta)
sin_theta = torch.sin(theta)
r00 = cos_theta + wx * wx * (k_one - cos_theta)
r10 = wz * sin_theta + wx * wy * (k_one - cos_theta)
r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta)
r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta
r11 = cos_theta + wy * wy * (k_one - cos_theta)
r21 = wx * sin_theta + wy * wz * (k_one - cos_theta)
r02 = wy * sin_theta + wx * wz * (k_one - cos_theta)
r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta)
r22 = cos_theta + wz * wz * (k_one - cos_theta)
rotation_matrix = torch.cat(
[r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1)
return rotation_matrix.view(-1, 3, 3)
def _compute_rotation_matrix_taylor(angle_axis):
rx, ry, rz = torch.chunk(angle_axis, 3, dim=1)
k_one = torch.ones_like(rx)
rotation_matrix = torch.cat(
[k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1)
return rotation_matrix.view(-1, 3, 3)
# stolen from ceres/rotation.h
_angle_axis = torch.unsqueeze(angle_axis, dim=1)
theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2))
theta2 = torch.squeeze(theta2, dim=1)
# compute rotation matrices
rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2)
rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis)
# create mask to handle both cases
eps = 1e-6
mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device)
mask_pos = (mask).type_as(theta2)
mask_neg = (mask == False).type_as(theta2) # noqa
# create output pose matrix
batch_size = angle_axis.shape[0]
rotation_matrix = torch.eye(4).to(angle_axis.device).type_as(angle_axis)
rotation_matrix = rotation_matrix.view(1, 4, 4).repeat(batch_size, 1, 1)
# fill output matrix with masked values
rotation_matrix[..., :3, :3] = \
mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor
return rotation_matrix
|
461779558e8cf06e05a018253961e4756ada1a47
| 52,576
|
def Get_inv_Detail(rc, IP_Address, vrf_id):
"""
Get all detail information for an IP Address in a VRF
"""
resp = rc.get('/inventory/' + IP_Address + '-' + vrf_id)
if resp.status_code != 200:
print("Failed to retrieve inventory detail for " + IP_Address)
print(resp.status_code)
print(resp.text)
else:
return resp.json()
|
2e5ad550bf6575b5ea2eb9fa76fe6d6ae8f89bc2
| 52,577
|
import re
def number_key(name):
"""
Sorting: https://stackoverflow.com/questions/4287209/sort-list-of-strings-by-integer-suffix-in-python
:param name:
:return:
"""
parts = re.findall('[^0-9]+|[0-9]+', name)
L = []
for part in parts:
try:
L.append(int(part))
except ValueError:
L.append(part)
return L
|
126c334c3adfdd8ef1bf9777c925d6a7c172ac0a
| 52,578
|
def create_vm_list(vms):
"""
Return a list of (vm_uuid, vm_name) with given input
@Param vms: a list of sqlite3.Row tuple.
Each tuple has format like this (vm_uuid, tenant_id, vm_name)
Example:
Input: [(u'564d6857-375a-b048-53b5-3feb17c2cdc4', u'88d98e7a-8421-45c8-a2c4-7ffb6c437a4f', 'ubuntu-VM0.1'), (u'564dca08-2772-aa20-a0a0-afae6b255fee', u'88d98e7a-8421-45c8-a2c4-7ffb6c437a4f', 'ubuntu-VM1.1')]
Output: [(u'564d6857-375a-b048-53b5-3feb17c2cdc4', 'ubuntu-VM0.1'), (u'564dca08-2772-aa20-a0a0-afae6b255fee', 'ubuntu-VM0.1')]
"""
return [(v[0], v[2]) for v in vms]
|
19d5f0a0e9cc2eb1961d66880a9695d742899b35
| 52,582
|
import csv
def get_abilities(csv_dir):
"""Get information to build pokediadb.models.Ability objects.
Args:
csv_dir (pathlib.Path): Path to csv directory.
Returns:
dict: Dict of dict containing infos to build
pokediadb.models.Ability object.
Raises:
FileNotFoundError: Raised if abilities.csv does not exist.
"""
pkm_abilities = {}
with (csv_dir / "abilities.csv").open(encoding="utf8") as f_ability:
reader = csv.reader(f_ability)
next(reader) # Skip header
for row in reader:
ability_id = int(row[0])
# Skip weird abilities
if ability_id > 10000:
break
pkm_abilities[ability_id] = {
"id": int(row[0]), "generation": int(row[2])
}
return pkm_abilities
|
13bd13d0e67d197220a8b6e0b5d74a12479adb22
| 52,589
|
from typing import Sequence
from typing import Tuple
from typing import Any
from typing import Optional
from typing import Dict
def make_search_query(filter_groups: Sequence[Sequence[Tuple[str, Any, Optional[str]]]],
*,
sort_orders: Optional[Sequence[Tuple[str, str]]] = None,
page_size: Optional[int] = None,
current_page: Optional[int] = None):
"""
Build a search query.
Documentation: https://devdocs.magento.com/guides/v2.4/rest/performing-searches.html
Filter groups are AND clauses while filters are OR clauses:
[[("a", 1, "eq"), ("b", 2, "eq")], [("c", 3, "eq")]]
Means ``(a=1 OR b=2) AND c=3``. There’s no way to do an OR between AND clauses.
:param filter_groups: sequence of filters. Each filter is a sequence of conditions.
Each condition is a tuple of (field, value, condition_type). The condition_type can be None if it's "eq"
(the default). See the documentation for the list of possible condition_types.
:param sort_orders: sequence of tuples (field, direction) for the sort order.
The direction should be "asc" or "desc".
:param page_size:
:param current_page:
:return:
"""
query_params: Dict[str, Any] = {}
if page_size is not None:
query_params["searchCriteria[pageSize]"] = page_size
if current_page is not None:
query_params["searchCriteria[currentPage]"] = current_page
for filter_group_index, filter_group in enumerate(filter_groups):
for filter_index, filter_ in enumerate(filter_group):
for k, v in (
("field", filter_[0]),
("value", filter_[1]),
("condition_type", filter_[2]),
):
# NOTE: from the doc, "condition_type is optional if the operator is eq".
if k == "condition_type" and v is None:
continue
query_params[f"searchCriteria[filter_groups][{filter_group_index}][filters][{filter_index}][{k}]"] = v
if sort_orders:
for i, (field, direction) in enumerate(sort_orders):
query_params[f"searchCriteria[sortOrders][{i}][field]"] = field
query_params[f"searchCriteria[sortOrders][{i}][direction]"] = direction
return query_params
|
504fc095477917b37f43e6c20486d1feac791e2a
| 52,590
|
def _AnyDerivedValues(table_data):
"""Return True if any value in the given table_data was derived."""
for row in table_data:
for cell in row.cells:
for item in cell.values:
if item.is_derived:
return True
return False
|
ee053fb475d2b33c8965b72ef40e9d9393ae125e
| 52,592
|
def format_number(price: float) -> str:
"""
Format large numbers to make them more readable with K, M,
and B suffixes.
:param price: The price of interest.
:return: A string containing the price formatted with a suffix.
"""
is_negative = price < 0
prefix = '-' if is_negative else ''
if is_negative:
price = -price
cutoffs = [(1e9, 'B'), (1e6, 'M'), (1e3, 'K'), (1, '')]
for cutoff, suffix in cutoffs:
if price >= cutoff:
number = f'{price / cutoff:.2f}'
while len(number) > 4 or number[-1] == '.':
number = number[:-1]
return prefix + number + suffix
return f'{price:.2f}'
|
47f1f17e1bc282596974f060e467fe28bfa73fdd
| 52,596
|
def get_car_unchanging_properties(car):
"""
Gets car properties that are expected to not change at all
for a given car VIN/ID during a reasonable timescale (1 week to 1 month)
:param car: car info in original system JSON-dict format
:return: dict with keys mapped to common electric2go format
"""
return {
'vin': car['Id'],
'license_plate': car['Name'],
'model': 'Toyota Prius C'
}
|
0b339e82878d3b3adde1155fb83f8c2c300c97ae
| 52,604
|
def format_num(num) -> str:
"""
Examples:
format_num(10000) -> 10,000
format_num(123456789) -> 123,456,789
:param num:
:return:
"""
num = str(num)
ans = ''
for i in range(len(num)-3, -4, -3):
if i < 0:
ans = num[0:i+3] + ans
else:
ans = ',' + num[i:i+3] + ans
return ans.lstrip(',')
|
a6e4059f81edd0cc1afa55eacd7440f4876513e0
| 52,607
|
import requests
def request_api(ids):
"""
Does the API request on datagouv
Keyword argument:
ids (str): datagouv unique id
Returns:
res (json): full json response
"""
res = requests.get(f'https://www.data.gouv.fr/api/1/datasets/{ids}')
return res.json()
|
a24f86e74633e83830e299936d9a54adc1c268d8
| 52,608
|
def quote(input_str):
"""Adds single quotes around a string"""
return "'{}'".format(input_str)
|
2756faeadffa84a8ff09a8ce8ea4b0b8ba328d24
| 52,612
|
def isCallable(obj):
""" Returns boolean whether object can be called (like a function)"""
return hasattr(obj, '__call__')
|
e2e555ee76d88baf431b4f2451e9c13e89c26e34
| 52,615
|
def filter_ndk_visible(entries):
"""
Filter the given entries by removing those that are not NDK visible.
Args:
entries: An iterable of Entry nodes
Yields:
An iterable of Entry nodes
"""
return (e for e in entries if e.applied_ndk_visible == 'true')
|
1b4e039043dc2b71d62d0428b11a39fa3ff44083
| 52,616
|
def calculate_event_handler_snapshot_difference(client, snapshot_old, snapshot_new):
"""
Calculates the difference between two event handler snapshot returning the difference.
Parameters
----------
client : ``Client``
The respective client instance.
snapshot_old : `dict` of (`str`, `list` of `async-callable`) items
An old snapshot taken.
snapshot_new : `dict` of (`str`, `list` of `async-callable`) items
A new snapshot.
Returns
-------
snapshot_difference : `None` or `list` of `tuple` \
(`str`, (`None` or `list` of `async-callable`), (`None` or `list` of `async-callable`))
A list of event handler differences in a list of tuples.
The tuple has 3 elements, where the 0th element is the name of the respective event, meanwhile the 1th element
contains the removed event handlers and the 2th the added ones.
If there is no difference between two snapshots, returns `None`.
"""
snapshot_difference = []
event_names = {*snapshot_old.keys(), *snapshot_new.keys()}
for event_name in event_names:
old_handlers = snapshot_old.get(event_name, None)
new_handlers = snapshot_new.get(event_name, None)
if (old_handlers is not None) and (new_handlers is not None):
for index in reversed(range(len(old_handlers))):
handler = old_handlers[index]
try:
new_handlers.remove(handler)
except ValueError:
pass
else:
del old_handlers[index]
if not new_handlers:
new_handlers = None
if not old_handlers:
old_handlers = None
if (old_handlers is not None) or (new_handlers is not None):
snapshot_difference.append((event_name, old_handlers, new_handlers))
if not snapshot_difference:
snapshot_difference = None
return snapshot_difference
|
7ff9f6afd548e3a49c797a259c5177316b427469
| 52,619
|
def any_key_exists(config, keys):
""" checks if any of the keys exist in the config object """
return any(key in config for key in keys)
|
4261940f1091fcfac617f1e7436c613cbbf82047
| 52,624
|
def read_ecs(contents, reaction_masters, master_reactions):
"""
Read Rhea EC number to Rhea ID mapping data.
Parameters
----------
contents : iterable of dicts
EC number string keyed by 'EC' and Rhea ID string keyed by
'RHEA'.
reaction_masters : dict
Mapping from directed reaction Rhea ID strings to undirected
master reaction Rhea ID strings.
masters_reactions: dict
Mapping from undirected master reaction Rhea ID strings to
directed reaction Rhea ID strings.
Returns
-------
tuple of 2 dicts
Complementary dicts that map EC numbers to Rhea IDs and vice
-versa.
[0] maps EC number strings to Rhea ID strings.
[1] maps Rhea ID strings to EC number strings.
"""
ec_reactions, reaction_ecs = {}, {}
for entry in contents:
ec = entry['EC']
rhea = entry['RHEA']
master = reaction_masters.get(rhea, rhea)
reactions = master_reactions.get(master, [])
ec_reactions.setdefault(ec, set()).update(reactions)
for reaction in reactions:
reaction_ecs.setdefault(reaction, set()).add(ec)
print('RHEA: EC {}, reactions {}'.format(ec, reactions))
for ec, reactions in ec_reactions.items():
ec_reactions[ec] = list(reactions)
for reaction, ecs in reaction_ecs.items():
reaction_ecs[reaction] = list(ecs)
print('RHEA: {} enzymes found, linked to {} reactions'.format(
len(ec_reactions), len(reaction_ecs)))
return ec_reactions, reaction_ecs
|
7b50d6c823e35a588bf8b6abcff3762381549b51
| 52,628
|
def check_no_hp_in_lhn(ind, do_correction=True):
"""
Checks if individuum dict holds HP within LHN. If yes and do_correction is
True, erases HP from LHN.
Parameters
----------
ind : dict
Individuum dict for GA run
do_correction : bool, optional
Defines, if ind dict should be modified, if necessary (default: True).
If True, can erase HP form LHN network building nodes.
Returns
-------
is_correct : bool
Boolean to define, if no heat pump is found within ind dict.
If True, no heat pump has been found. If False, HP exists in LHN
network
"""
is_correct = True
# check if heatpump is connected to LHN at ind1
if len(ind['lhn']) > 0:
for subcity in ind['lhn']:
for node in subcity:
if ind[node]['hp_aw'] > 0 or ind[node]['hp_ww'] > 0:
is_correct = False
if do_correction:
# if node has lhn and hp delete lhn connection
subcity.remove(node)
else: # pragma: no cover
break
return is_correct
|
b1fd0aa1556453f09bbdd53a49c9c3a294210e7a
| 52,634
|
def _search_by_lun(disks_service, lun_id):
"""
Find disk by LUN ID.
"""
res = [
disk for disk in disks_service.list(search='disk_type=lun') if (
disk.lun_storage.id == lun_id
)
]
return res[0] if res else None
|
0a8c92a3bcc1c92c5f65d8a2c0e333a28b9b8595
| 52,635
|
def itemKey(item):
"""
Build the form item's key from the the item's name and the name of all
ancestors.
"""
parts = [item.name]
parent = item.itemParent
while parent is not None:
parts.append(parent.name)
parent = parent.itemParent
parts.reverse()
return '.'.join(parts)
|
29d317d62960594562577adc06b0db5279a7fbb6
| 52,636
|
import ipaddress
def octets(addr):
"""Return a list of octets
"""
ip = ipaddress.IPv4Interface(addr).ip
return list(map(int, str(ip).split(".")))
|
82ed22e1ea723560ef230d0744193d333411f44f
| 52,638
|
def ema(prices, days, correctie ,smoothing=2):
"""Calculate exponential moving average
https://python.plainenglish.io/how-to-calculate-the-ema-of-a-stock-with-python-5d6fb3a29f5
Args:
prices ([type]): [description]
days (int): [description]
correctie (int): Needed because the first values will be NaN
smoothing (int, optional):[description]. Defaults to 2.
Returns:
[type]: [description]
"""
#
#ema = [sum(prices[:days]) / days]
ema = []
for price in prices[:days-1+correctie]:
ema.append(None)
ema.append(sum(prices[correctie:+correctie+days]) / days)
for price in prices[days+correctie:]:
if price != None and ema[-1]!=None:
ema.append((price * (smoothing / (1 + days))) + ema[-1] * (1 - (smoothing / (1 + days))))
else:
ema.append(None)
return ema
|
608c4dd57f6791006c778f07a17742a218c57def
| 52,640
|
def lerp(a, b, t):
""" Returns the linear interpolation between a and b at time t between 0.0-1.0.
For example: lerp(100, 200, 0.5) => 150.
"""
if t < 0.0:
return a
if t > 1.0:
return b
return a + (b - a) * t
|
c3aa5ef60c44ad5d1fc287ed90493d8b28cabcd5
| 52,646
|
def clean_column(string):
"""
Description :
This function allow you to transform a string separated by comas in a list of string :
Transforming the string in a list
Removing the duplicated
Removing empty space
Args:
string: string
Returns:
output: list of string
"""
list_of_strng = list(set(string.split(",")))
list_of_strng = list(filter(None,list_of_strng))
return list_of_strng
|
9684fd461e6a47ef174b634bb4c88e407ca0de7e
| 52,647
|
def calc_relu(func, in_data, **kwargs):
"""[ReLU](https://docs.chainer.org/en/v4.3.0/reference/generated/chainer.functions.relu.html)
Max operation can be executed by a single instruction.
chainer-computational-cost considers it as one floating point operation.
| Item | Value |
|:-------|:------|
| FLOPs | $$\| x \|$$ |
| mread | $$\| x \|$$ |
| mwrite | $$\| x \|$$ |
| params | N/A |
"""
x, = in_data
return (x.size, x.size, x.size, {})
|
3aba6b229a317836f8244a8c88f51f6006e4b67f
| 52,652
|
def find_prob(lhs, rhs, rules):
"""
This function returns the probability of the rule given it's lhs and rhs
"""
for rule in rules:
if len(rule[1])==1 and rule[0]==lhs and rhs==rule[1][0]:
return rule[2]
return 0
|
2f5fc053b985eab415f7fe64e9667f51330b00de
| 52,654
|
def wrap_angle(x):
"""Wraps an angle in degrees between -180 and 180 degrees"""
x = (x + 180) % 360
if x < 0:
x += 360
return x - 180
|
5e2b3f918692787fb95814111639380e8f31b220
| 52,658
|
from typing import Mapping
def merge_recursive(*nested_data, conflict='error', path=None):
"""Merge nested dictionaries `nested1` and `nested2`.
Parameters
----------
*nested_data: dict of dict
Nested dictionaries that should be merged.
path: list of str
Path inside the nesting for useful error message
conflict: "error" | "first" | "last"
How to handle conflicts: raise an error (if the values are different),
or just give priority to the first or last `nested_data` that still has a value,
even if they are different.
Returns
-------
merged: dict of dict
A single nested dictionary with the keys/values of the `nested_data` merged.
Dictionary values appearing in multiple of the `nested_data` get merged recursively.
"""
if len(nested_data) == 0:
raise ValueError("need at least one nested_data")
elif len(nested_data) == 1:
return nested_data[0]
elif len(nested_data) > 2:
merged = nested_data[0]
for to_merge in nested_data[1:]:
merged = merge_recursive(merged, to_merge, conflict=conflict, path=path)
return merged
nested1, nested2 = nested_data
if path is None:
path = []
merged = nested1.copy()
for key, val2 in nested2.items():
if key in merged:
val1 = merged[key]
if isinstance(val1, Mapping) and isinstance(val2, Mapping):
merged[key] = merge_recursive(val1, val2,
conflict=conflict,
path=path + [repr(key)])
else:
if conflict == 'error':
if val1 != val2:
path = ':'.join(path + [repr(key)])
msg = '\n'.join([f"Conflict with different values at {path}; we got:",
repr(val1), repr(val2)])
raise ValueError(msg)
elif conflict == 'first':
pass
elif conflict == 'last':
merged[key] = val2
else:
merged[key] = val2
return merged
|
c7463b75d18844e73d2c06152e460645da1bbe9e
| 52,660
|
def ficha(nome='', gols=0):
"""Ficha
Parâmetros:
nome (string): Nome do jogador
gols (int, optional): Gols marcados pelo jogador. Defaults to 0.
Returns:
lista: Lista com o nome do jogador e o tanto de gols que o mesmo fez
"""
print('-' * 50)
if len(nome) == 0:
nome = '<desconhecido>'
lst = nome, gols
return lst
|
fd390dfbcce32a8fa8173c486709a56385220ffd
| 52,663
|
def get_publisher_id(xml_soup, issn):
"""Gets the publisher id from the XML. If it can't find one, it returns none. Ultimately it returns a tuple of the issn and the publisher_id"""
publisher_id = None
if xml_soup.publisher:
# if the issn returns a publisher id, print it
publisher_id = xml_soup.publisher.attrs['id']
return (issn, publisher_id)
else:
# if the issn returns something invalid, ie. nothing in it, return none.
return (issn, publisher_id)
|
cc6cdfe7c50f466824177709e8cc54ff903c7b91
| 52,664
|
def timedelta_filter(dates, timedelta):
"""
Make sure there is a minimum time delta between each date in the given list
"""
filtered_dates = [dates[0]]
for date in sorted(dates[1:]):
if date - filtered_dates[-1] > timedelta:
filtered_dates.append(date)
return filtered_dates
|
6c6ba9e7b9ae68e130d4272702b575a160abd895
| 52,665
|
import shutil
def cmd_exists(cmd):
"""
Given a command name, checks to see if it exists on the system, return True or False
"""
return shutil.which(cmd) is not None
|
18fce1922cf627ce087fa09b391faa0fad871839
| 52,669
|
def load_data(batch_size,*args,**kwargs):
""" Load data and build dataloader.
Parameters
----------
batch_size : int
batch size for batch training.
Returns
-------
trainloader : Dataloader
Dataloader for training.
testloader : Dataloader
Dataloader for test.
validloader : Dataloader
Dataloader for validation.
"""
trainloader, testloader, validloader = None, None, None
return trainloader, testloader, validloader
|
209e2a9d7753968afb6e26d71b77fb2a1f27813e
| 52,672
|
def gcd(a, b):
"""
ユークリッドの互除法による最大公約数(gratest common divisor)算出
注意: a >= b であること
Parameters
----------
a : int
1つ目の数値
b : int
2つ目の数値
Returns
-------
int
aとbの最大公約数
"""
if a == 0:
return 0
elif b == 0:
return a
a %= b
while a != 0:
a, b = b, a
a %= b
return b
|
7f02c0e337c7253737d3114984d5bad10aee1b3f
| 52,680
|
def get_output_field(annotation):
"""
Return the output field of an annotation if it can be determined.
:param annotation: The annotation to get the output field from.
:return: The output field of the annotation or None if it can't be
determined.
:rtype: django.db.models.Field | None
"""
return getattr(annotation, 'output_field', None)
|
93efb53c266f3fec9f111caecc95f9e3d64902e0
| 52,683
|
def generate_bn_mat_gamma_0(n, k):
"""
Defined in the paper eq (24)
"""
if n == 0:
return 0.5
return (2.0 * k * (k + 1) + n * (2 * k + n + 1)) / ((2 * k + n) * (2 * k + n + 2))
|
0aece8e9f2cfec602c041f70d63eb6e954e55586
| 52,685
|
def get_social_links_data(coin_choice, df):
"""Gets Social media links from coingecko df"""
links_json = df.loc[df.name == coin_choice, "links"].values[0]
homepage = links_json.get("homepage")[0]
twitter_screen_name = links_json.get("twitter_screen_name")
twitter_link = f"https://twitter.com/{twitter_screen_name}"
subreddit_url = links_json.get("subreddit_url")
gitlinks = links_json.get("repos_url").get("github", [""])
google = f"https://www.google.com/search?q={coin_choice}"
return {
"twitter": twitter_screen_name,
"github": gitlinks,
"reddit": subreddit_url,
"homepage": homepage,
"google": google,
}
|
6dc3c742cce5dabc7f197b86d1296f87e66ed2cc
| 52,688
|
def _Contains(i, j, areas, lens, cls):
"""Return True if path i contains majority of vertices of path j.
Args:
i: index of supposed containing path
j: index of supposed contained path
areas: list of floats - areas of all the paths
lens: list of ints - lenths of each of the paths
cls: dict - maps pairs to result of _ClassifyPathPairs
Returns:
bool - True if path i contains at least 55% of j's vertices
"""
if i == j:
return False
(jinsidei, joni) = cls[(i, j)]
if jinsidei == 0 or joni == lens[j] or \
float(jinsidei) / float(lens[j]) < 0.55:
return False
else:
(insidej, _) = cls[(j, i)]
if float(insidej) / float(lens[i]) > 0.55:
return areas[i] > areas[j] # tie breaker
else:
return True
|
a88cb7a16b7b856cdcd5dc30991cc1c1efa4387b
| 52,690
|
def get_words_and_preds(sdp_lines):
"""
Returns the words in the lines, split to columns.
and the predicates (subset of words).
"""
words = [line.strip().split("\t")
for line in sdp_lines
if (not line.startswith('#')) and \
line.strip()]
preds = [word
for word in words
if word[5] == '+']
return (words, preds)
|
1b0dfc1fe10ed8db5f356751fc114fc9fb6c4490
| 52,693
|
def format_cardinality(rows, cardinality, nulls):
"""Format cardinality of the column
Args:
rows (int): number of rows
cardinality (int): number of distinct values
nulls (int): number of null-values
Returns:
str: cardinality with the format '%.2f'.
"""
if rows is None or cardinality is None or nulls is None:
return "N/A"
if rows == nulls:
return 'N/A'
return "%.02f" % (float(cardinality) / float(rows - nulls) * 100) + " %"
|
41b8192afb79b5782c11f458361a9159e3ea05d7
| 52,701
|
import inspect
def _is_context_manager(func):
"""
Detect if the given method or function is decorated with @contextmanager.
Parameters
----------
func : function or method
The function or method to be tested.
Returns
-------
bool
True if the function or method is has the @contextmanager decorator,
otherwise False.
"""
src = inspect.getsource(func).lstrip()
return 'return GeneratorContextManager' in src or src.startswith('@contextmanager')
|
2f00d93c8a67ef42981bfad0e34776b9fa07c8ad
| 52,706
|
def total_completion_time(schedule):
"""
Computes the total completion time for a given schedule.
Args:
schedule (list): a list of jobs (node object) listed according to the schedule.
Returns:
sum_Cj (float): the total completion time of all jobs in schedule.
"""
Cj = 0
sum_Cj = 0
for job in schedule:
Cj += job.processing
sum_Cj += Cj
return sum_Cj
|
a44f121c7ee2f00604d90781dcbd93d61d182964
| 52,709
|
def ExpMag( x, params ):
"""Compute surface brightness for a profile consisting of an exponential,
given input parameters in vector params:
params[0] = mu_0
params[1] = h
"""
mu_0 = params[0]
h = params[1]
return mu_0 + 1.085736*(x/h)
|
6389bfa31cad6e17bf5e119222d04450bd1ec5c1
| 52,710
|
import math
def get_angle(start_point: tuple, end_point: tuple) -> float:
"""
Determines the angle between the line specified by `start_point` and `end_point`.
"""
dx = end_point[0] - start_point[0]
dy = end_point[1] - start_point[1]
rads = math.atan2(-dy, dx)
rads %= 2 * math.pi
degrees = math.degrees(rads)
return degrees
|
eb6c41c5e39f2ce562e3acc1fad5ca091d0bb7e3
| 52,712
|
import hashlib
import json
def hash_object(obj):
"""deterministically hash a dict."""
hash = hashlib.sha256()
hash.update(json.dumps(obj, sort_keys=True).encode("utf-8"))
return hash.hexdigest()
|
6d65081d57761ac6355c4ae6dfd35a22272fbe0a
| 52,716
|
def df_div(df0, df1, axis=1):
"""Wrapper function to divide two Pandas data frames in a functional manner.
Args:
df0 (:obj:`pd.DataFrame`): First data frame.
df1 (:obj:`pd.DataFrame`): Second data frame.
axis (int): Axis; defaults to 1.
Returns:
The quotient from applying :meth:`pd.DataFrame.div` from ``df0`` to
``df1``.
"""
return df0.div(df1, axis=axis)
|
1b362ea7ae920ef79de68aeec7fb25eeba06ca3b
| 52,720
|
def _match_eq(x, y):
"""Check equality, with None treated as a wildcard."""
return (x is None) or (y is None) or (x == y)
|
b0b706b52a906cd0b73b591501ab8e144222c900
| 52,723
|
def are_in_same_row(cell, free_cell):
""" Check if the given cell is in the same row than the free_cell.
:arg cell, the coordinate of the cell in the table.
:arg free_cell, the coordinate of the free cell in the table.
"""
ycell = list(cell)[0][1]
yfreecell = list(free_cell)[0][1]
return ycell == yfreecell
|
45ed9d83fd4f3e095b7f5f85c348e78cc6cd35db
| 52,726
|
from typing import OrderedDict
def make_xflt(region):
"""
Make a dictionary for the XFLT#### keywords and values according
to the provided region.
Return:
a dictionary containing the XFLT#### keywords and values, e.g.,
{ 'XFLT0001': radius_out, 'XFLT0002': radius_out, 'XFLT0003': 0,
'XFLT0004': angle_begin, 'XFLT0005': angle_end }
"""
if region.get('shape') == 'annulus':
xflt = OrderedDict([
('XFLT0001', region.get('radius_out')),
('XFLT0002', region.get('radius_out')),
('XFLT0003', 0)
])
elif region.get('shape') == 'pie':
xflt = OrderedDict([
('XFLT0001', region.get('radius_out')),
('XFLT0002', region.get('radius_out')),
('XFLT0003', 0),
('XFLT0004', region.get('angle_begin')),
('XFLT0005', region.get('angle_end'))
])
else:
xflt = None
return xflt
|
ef47994fa621a783ee35464f526853d9425113e4
| 52,728
|
def merge_tuples(*tuples):
"""
Utility method to merge a number of tuples into a list.
To aid output, this also converts numbers into strings for easy output
:param tuples:
:return: List[String]
"""
return [str(j) for i in tuples for j in (i if isinstance(i, tuple) else (i,))]
|
ea9d889fc2324fd86a9ca6a38584396e64bd5ac2
| 52,729
|
import math
def power2(x):
"""power2(x) -> nearest power of two
Calculate the nearest power of two that is equal
or greater than x
"""
p = math.log(x) / math.log(2)
return 2**int(math.ceil(p))
|
0dac03123f426b526ed90d00377e8c7ba5ee541d
| 52,730
|
def format_opt_parameters(dict_, pos):
"""Format the values depending on whether they are fixed or estimated."""
# Initialize baseline line
val = dict_["coeffs"][pos]
is_fixed = dict_["fixed"][pos]
bounds = dict_["bounds"][pos]
line = ["coeff", val, " ", " "]
if is_fixed:
line[-2] = "!"
# Check if any bounds defined
if any(x is not None for x in bounds):
line[-1] = "(" + str(bounds[0]) + "," + str(bounds[1]) + ")"
return line
|
8e93353f9f5813d97248233789b606772d790d1b
| 52,731
|
from typing import Union
from typing import List
from typing import Tuple
def dot(
vec1: Union[List, Tuple],
vec2: Union[List, Tuple],
) -> Union[float, int]:
"""Two vectors dot product.
Parameters:
vec1: List[float] - first vector
vec2: List[float] - second vector
Returns:
Dot product of 2 vectors
Raises:
ValueError: if length not equal
"""
if len(vec1) != len(vec2):
raise ValueError('lengths of two vectors are not equal')
return sum([val1 * val2 for val1, val2 in zip(vec1, vec2)])
|
3a7ff194379f1cbf998432e7b651081fa7b72b03
| 52,736
|
from typing import Tuple
from typing import Set
def _calculate_solution_score(
solution: Tuple[str, ...], projects_urls: Set[str]
) -> int:
"""Calculate a score with the given solution.
Args:
solution (Tuple[str, ...]): A set of remote urls
projects_urls (Set[str]): A set of projects urls
Returns:
int: Lower score is a better solution
"""
# Less remotes, is better
score = len(solution)
# Shortest url for projects, is better
for url in projects_urls:
minimum = len(url)
for remote in solution:
if url.startswith(remote):
minimum = min(minimum, len(url) - len(remote))
score += minimum
return score
|
fa0f5b90bddd2a170b6c4a0567e9eeeb26b1d86b
| 52,737
|
def select_from_data(n, select=1.):
"""Calculate number of data based on a fraction or selection value.
Args:
n (int): Size of data.
select (int, float, optional): Fraction or selection value. A
floating-point value is used as a ratio with respect to n, [0, n*N]
(value is bounded by 1.0). An integer value is used as a selection
value with respect to n, [0, n] (if greater than n, it is set to
n). Negative values are set to 0. Default is 1.0.
Returns:
int: Number of elements.
Raises:
ValueError: If *select* is a negative value.
TypeError: If *select* is non-numeric.
"""
if isinstance(select, int):
if select < 0:
raise ValueError("negative select value, {}".format(select))
return min(select, int(n))
elif isinstance(select, float):
if select < 0.:
raise ValueError("negative select value, {}".format(select))
return round(min(select, 1.) * int(n))
else:
raise TypeError("invalid select type, {}".format(select))
|
df92725d22e32b892e43b186c87d2e4cd35123e8
| 52,749
|
def find_indices(lst, condition):
"""
Find the indices of elements in a list `lst` that match a condition.
"""
return [i for i, elem in enumerate(lst) if condition(elem)]
|
51a7af66266f03cadb5cff0876cafd8ac5e48420
| 52,750
|
def get_different_items(list1, list2):
""" Compare the list1 and list2 and return the elements of list2 that is not in list1 """
difference = []
if len(list1) < len(list2):
for i in list2:
if i not in list1:
difference.append(i)
return difference
|
03c2f44cd80c3c615a60e775dcc56e9a4b1d9e50
| 52,756
|
def labelstring2list(labels):
"""
String to a list of labels.
"""
return [_i.strip() for _i in labels.split(",")]
|
456dc2186ac391f1ff21f7aee6c2572596485942
| 52,764
|
def lerp2d(a: tuple, b: tuple, r: float) -> tuple:
"""Returns a point interpolated from a to b, at r."""
return (
a[0] + (b[0] - a[0]) * r,
a[1] + (b[1] - a[1]) * r
)
|
03efab57d09f12a313ac8f649ea461931af0f979
| 52,770
|
import time
def to_epoch(datetime_obj, astype=int):
"""Convert datetime.datetime into epoch seconds
Currently assumes input datetime is expressed in localtime. Does not handle
timezones very well. Once Python2 support is dropped from pytokio, this will
be replaced by Python3's datetime.datetime.timestamp() method.
Args:
datetime_obj (datetime.datetime): Datetime to convert to seconds-since-epoch
astype: Whether you want the resulting timestamp as an int or float
Returns:
int or float: Seconds since epoch
"""
if astype == float:
return time.mktime(datetime_obj.timetuple()) + datetime_obj.microsecond / 1e6
return astype(time.mktime(datetime_obj.timetuple()))
|
ea227cde99fb5b7d64dfe81a340130fdeb5f281d
| 52,773
|
def comxyz(x,y,z):
"""Centre of mass given x, y and z vectors (all same size). x,y give position which has value z."""
Mx=0
My=0
mass=0
for i in range(len(x)):
Mx=Mx+x[i]*z[i]
My=My+y[i]*z[i]
mass=mass+z[i]
com=(Mx/mass, My/mass)
return com
|
014c52de3359e6e2b376c93e9ce7f5644b59b3d1
| 52,775
|
def ordered_groupby(collection, column):
"""Group collection by a column, maintaining the key
order from the collection.
Args:
collection (list): List of flat dictionaries.
column (str): Column (dict key) by which to group the list.
Returns:
grouped (dict): Dict of the column to subcollections.
"""
# Figure out the group order
group_order = []
for row in collection:
group = row[column]
if group not in group_order:
group_order.append(group)
# Group by in order
return {group: [row for row in collection
if row[column] == group]
for group in group_order}
|
4a0b927b8407506899376366e7d1cb4afb1ac423
| 52,776
|
def _is_prefixed(cmd_line):
"""Whether the supplied command line has already been prefixed
with an OS specific operation.
"""
if cmd_line is None:
raise ValueError("CommandLine is required field for task.")
return cmd_line.startswith('cmd.exe /c') or \
cmd_line.startswith('cmd /c') or \
cmd_line.startswith('/bin/bash -c') or \
cmd_line.startswith('/bin/sh -c')
|
290d6b5aa72d454435141c224af0d5fc5389930c
| 52,780
|
def is_subject(token1, token2):
"""Return True if `token1` is the subject of `token2`."""
return (
(token1.upos in ["NOUN", "PRON"])
and (token1.deprel == "nsubj")
and (token1.head == token2.id)
)
|
a090345a95f83c5fdb31d02bdf0e76fe9bb6c168
| 52,787
|
def convert_list_elements_to_list(list_to_convert):
"""Converts list elements in list to sequence of elements:
Example: [1, 2, 3, [4, 5], 6, [7]] = [1, 2, 3, 4, 5, 6, 7]
"""
converted_list = []
for element in list_to_convert:
if isinstance(element, list):
converted_list.extend(element)
else:
converted_list.append(element)
return converted_list
|
085183f88f8232ea6d11137ccfabc68dc12dcb68
| 52,788
|
import click
def validate_name(ctx: click.Context, param: click.Option, value: str) -> str:
"""
Validate that the name is a reasonable length.
:param ctx: see callbacks for click options
:param param: see callbacks for click options
:param value: see callbacks for click options
:return: Validated name otherwise a click.BadParameter exception is raised
"""
if value is None or len(value) > 20:
raise click.BadParameter(value)
return value
|
1d2293b9837ce059d365bda47b9c790ba1576bba
| 52,793
|
def solve_iter(n, memo):
"""
Dynamic programming in a down-top way.
The function can be enhanced by only store three elements, which can save sapce usage.
"""
if n == 1 or n == 2 or n == 3:
return memo[n]
else:
for i in range(4, n+1):
memo[i] = memo[i-1] + memo[i-2] + memo[i-3]
return memo[n]
|
00a26e05b96f2787c333c07d125cb9d14c3e42c4
| 52,795
|
import math
def cell_volume(unit_cell):
"""
Calculate the unit cell volume from the cell parameters given as a list/tuple of 6 values
"""
a, b, c, alpha, beta, gamma = unit_cell
alpha, beta, gamma = alpha * math.pi / 180.0, beta * math.pi / 180.0, gamma * math.pi / 180.0
v = a * b * c * ((1 - math.cos(alpha) ** 2 - math.cos(beta) ** 2 - math.cos(gamma) ** 2) + 2 * math.cos(
alpha) * math.cos(beta) * math.cos(gamma)) ** 0.5
return v
|
58f981595e415b55bc3fecca5b3038b98654241c
| 52,798
|
import re
def _parse_trace_span(header):
"""Given an X_CLOUD_TRACE header, extract the trace and span ids.
Args:
header (str): the string extracted from the X_CLOUD_TRACE header
Returns:
Tuple[Optional[dict], Optional[str]]:
The trace_id and span_id extracted from the header
Each field will be None if not found.
"""
trace_id = None
span_id = None
if header:
try:
split_header = header.split("/", 1)
trace_id = split_header[0]
header_suffix = split_header[1]
# the span is the set of alphanumeric characters after the /
span_id = re.findall(r"^\w+", header_suffix)[0]
except IndexError:
pass
return trace_id, span_id
|
b771c8190f91e1de7d2f1a304b5bf974fa3882ad
| 52,804
|
import torch
def center_of_mass(mask, esp=1e-6):
"""Calculate the centroid coordinates of the mask.
Args:
mask (Tensor): The mask to be calculated, shape (h, w).
esp (float): Avoid dividing by zero. Default: 1e-6.
Returns:
tuple[Tensor]: the coordinates of the center point of the mask.
- center_h (Tensor): the center point of the height.
- center_w (Tensor): the center point of the width.
"""
h, w = mask.shape
grid_h = torch.arange(h, device=mask.device)[:, None]
grid_w = torch.arange(w, device=mask.device)
normalizer = mask.sum().float().clamp(min=esp)
center_h = (mask * grid_h).sum() / normalizer
center_w = (mask * grid_w).sum() / normalizer
return center_h, center_w
|
4556e65ec15f3ebc238add82e5ba4815153315e2
| 52,805
|
def _any_match(path, patterns):
"""True if any of the given patterns match `path`."""
return any(path.match(pattern) for pattern in patterns)
|
e6f8906675b6a4eb5be14989a4465ca79fc0f55f
| 52,806
|
def to_studly_caps(s: str) -> str:
"""Converts a string from a snake_case_string to a StudlyCapsString."""
words = [w.capitalize() for w in s.split('_')]
return ''.join(words)
|
b241f22e7d842bc8b35029acabfe0333167ff190
| 52,808
|
def get_quantile(pos, percentile=0.5):
"""provided a list of (positions,coverage) pairs, return the median position"""
total = sum(cov for _, cov in pos)
n = 0
for p, cov in sorted(pos, key=lambda x: x[0]):
n += cov
if n >= total * percentile:
return p
raise ValueError(f"cannot find {percentile} percentile of {pos}")
|
41eb460aed30a5b077afc62dd41afc921174fe8c
| 52,813
|
def get_next_index(df, index_val, lock_bound=False, inc=+1):
"""Determine the index value that follows `index_val`
:param df: dataframe or series, having df.index.
:type df: pd.DataFrame or pd.Series
:param index_val: index value to start from
:param lock_bound: if true return same index if reaching bounds
:type lock_bound: bool
:param inc: Increment. default +1, use -1 to get previous index
:type inc: int
:return: neighbouring index value
"""
index_value_iloc = df.index.get_loc(index_val)
next_iloc = index_value_iloc + inc
try:
next_index_value = df.index[next_iloc]
except IndexError:
if lock_bound:
return index_value_iloc
else:
next_index_value = None
return next_index_value
|
0dfd204a4a956da024ec05f479efe2468e527f72
| 52,814
|
def get_max_order(basis, refname):
"""Get the maximum order of a basis."""
if refname in ["interval", "triangle", "tetrahedron"]:
return max(sum(i.indices) for i in basis)
if refname in ["quadrilateral", "hexahedron"]:
return max(max(i.indices) for i in basis)
if refname == "prism":
return max(max(sum(i.indices[:2]), i.indices[2]) for i in basis)
|
b94e35f990c8151aa2382c91d979f12fb77067fb
| 52,818
|
import torch
def contains_nan(x, max_value=float('inf')):
"""Check whether a tensor contains a NaN or an infinity value.
Args:
x (torch.Tensor or list): The input.
max_value (float): The highest acceptable value.
Returns:
(bool): Whether the tensor contains a NaN or infinity.
"""
nb_nans = 0
nb_infs = 0
if isinstance(x, list):
for el in x:
nb_nans += torch.isnan(el).sum()
nb_infs += (torch.abs(el) > float('inf')).sum()
else:
nb_nans = torch.isnan(x).sum()
nb_infs = (torch.abs(x) > float('inf')).sum()
return nb_nans > 0 or nb_infs > 0
|
bf8c5e0338891504964ea545e8529b2088d76c8c
| 52,820
|
def unfix_blockname(name):
"""The inverse of fix_blockname()."""
return "%3s%2d" % (name[0:3], int(name[3:5]))
|
e81787acb1a522c2ab76246cf961a6aad67c763b
| 52,822
|
def val(node):
"""
Normalize the given DOM node and return the value of its first child (the string content of the node).
"""
try:
node.normalize()
return node.firstChild.wholeText # Handles CDATASection too
except AttributeError:
return ''
|
a546bf067b39e4d6e169a7e012cf23d5bcc0b9ea
| 52,823
|
import math
def calculate_output_image_size(input_image_size, stride):
"""
Calculates the output image size when using Conv2dSamePadding with a stride
"""
image_height, image_width = input_image_size
image_height = int(math.ceil(image_height / stride))
image_width = int(math.ceil(image_width / stride))
return image_height, image_width
|
56aac68f763f4721a8dc893c4caee796a82776a2
| 52,827
|
import torch
def calculate_gradient_penalty(model,step,alpha, real_images, fake_images, lambd ,device):
"""Calculates the gradient penalty loss for WGAN GP"""
eps = torch.rand(real_images.size(0), 1, 1, 1, device=device)
eps = eps.expand_as(real_images)
x_hat = eps * real_images + (1 - eps) * fake_images.detach()
x_hat.requires_grad = True
px_hat = model(x_hat,step=step,alpha=alpha)
grad = torch.autograd.grad(outputs = px_hat.sum(),
inputs = x_hat,
create_graph=True
)[0]
grad_norm = grad.view(real_images.size(0), -1).norm(2, dim=1)
gradient_penalty = lambd * ((grad_norm - 1)**2).mean()
return gradient_penalty
|
3e95eb450e3eb1942100e25e6d004170d2b3250e
| 52,828
|
import torch
def load_pretrained_model(model, opt):
""" Load model weights from disk into the model that sits in main memory. """
ckpt_dict = torch.load(opt.model_fpath)
ckpt_state = ckpt_dict['state']
print ('loaded ckpt with accuracy: ', ckpt_dict['acc'])
model.load_state_dict(ckpt_state)
return model
|
165f6450247e619687d2e5c0c5131420543032eb
| 52,829
|
def retry_if_not_value_error(exception):
"""Forces retry to exit if a valueError is returned. Supplied to the
'retry_on_exception' argument in the retry decorator.
Args:
exception (Exception): the raised exception, to check
Returns:
(bool): False if a ValueError, else True
"""
return not isinstance(exception, ValueError)
|
730e56bea66fd7f8ef141283b0689030173183f5
| 52,837
|
def _lower_case(s):
"""Convert a string to lowercase and remember its original case."""
return s.lower(), [c.islower() for c in s]
|
79c086caa8960fd878dd284e6a7ae8ed7203717e
| 52,847
|
def npartition(string, n=1, delimiter=' '):
"""
Similar to python's built in partition method. But will
split at the nth occurence of delimiter
"""
groups = string.split(delimiter)
return (delimiter.join(groups[:n]), delimiter, delimiter.join(groups[n:]))
|
4fd77f15d3f71f6656b5b666c8d822713af9eae8
| 52,850
|
def name(self):
"""Return the name of the device."""
return self._name
|
6611bcb0b9ad9f9f0c3b7bb101bcf46b2373bcef
| 52,851
|
def rgb2hex(rgb):
"""Converts RGB colours to HEX
:param rgb: RGB colour
:return: HEX colour as a string with a '#'
"""
return '#' + '%02x%02x%02x' % (rgb.r, rgb.g, rgb.b)
|
7ddd7fab7840a42b2c090d107ffbf60d0ac8b630
| 52,856
|
def separate_data_and_labels(train):
"""Separates data and labels"""
train_x = []
train_y = []
for features, label in train:
train_x.append(features)
train_y.append(label)
return train_x, train_y
|
0af73b9a3f033979d3eb443fa8e7daa197cb41cb
| 52,857
|
def _parent(i):
"""Gets index of a parent of a node given the node's index."""
return (i - 1) >> 1
|
4d296413cf7bf30bf179649fd979acbfe721316a
| 52,861
|
def convertToInt(s):
"""
Convert to string to int. Protect against bad input.
@param s: Input string
@return: integer value. If bad return 0.
"""
try:
value = int(s)
except ValueError:
value = 0
return value
|
1dc7d2c061c95132136b970d89b97c4f7009367b
| 52,863
|
def daemon_launch_lock_path(root_path):
"""
A path to a file that is lock when a daemon is launching but not yet started.
This prevents multiple instances from launching.
"""
return root_path / "run" / "start-daemon.launching"
|
dec00a8a358132dc5e8dbb5084dbf118e9229ad0
| 52,872
|
def NoTests(path, dent, is_dir):
"""Filter function that can be passed to FindCFiles in order to remove test
sources."""
if is_dir:
return dent != 'test'
return 'test.' not in dent
|
ad9ce5a0a4df693a2fa2589808a393b8d30b7a23
| 52,873
|
def assign_indent_numbers(lst, inum, dic):
""" Associate keywords with their respective indentation numbers
"""
for i in lst:
dic[i] = inum
return dic
|
fc3a32692288ce3640fc41b40ee8b5dc9d472829
| 52,874
|
def locals(space):
"""Return a dictionary containing the current scope's local variables.
Note that this may be the real dictionary of local variables, or a copy."""
ec = space.getexecutioncontext()
return ec.gettopframe_nohidden().getdictscope()
|
f70a08fa39e963d0faee6761e3dcf9f7ea17694d
| 52,879
|
def apply_filters(matches, *filters):
"""
Apply a sequence of `filters` to a `matches` iterable. Return a new filtered
matches iterable.
A filter must accept a single arg: an iterable of tuples of (key, match,
line, lineno) and must return an iterable of tuples of (key, match, line,
lineno).
"""
for filt in filters:
matches = filt(matches)
return matches
|
b6be94e2bb29de13e3ac2cfc845fde3c2aa57558
| 52,885
|
import imp
def dynamic_import(config_name, config_path):
"""
Dynamic import a project.
Args:
config_name (str): module name
config_path (str): the dir that contains the .py with this module.
Examples::
>>> root = "/data/repos/cvpods_playground/zhubenjin/retinanet/"
>>> project = root + "retinanet.res50.fpn.coco.800size.1x.mrcnn_sigmoid"
>>> cfg = dynamic_import("config", project).config
>>> net = dynamic_import("net", project)
"""
fp, pth, desc = imp.find_module(config_name, [config_path])
return imp.load_module(config_name, fp, pth, desc)
|
4804918642cf4a8445cb131e080cd405196d2beb
| 52,891
|
def _run_simulation(simulation, sid = None, full_results = True):
""" Helper function for Parralel computation of several simulations.
Returns list of dictionaries of results for a particular simulation with id and policy
type added
"""
simulation.run()
if full_results:
result = simulation.results
else:
result = [simulation.results[-1]]
# put simulation identifier
for step_results in result:
step_results.update({'id':sid,
'policyType': simulation.policy.__class__.__name__,
})
return result
|
04aa4c20b6d954336d2616b2037e2afc3e5299c2
| 52,900
|
def obj_id(obj):
"""Return the last four digits of id(obj), for dumps & traces."""
return str(id(obj))[-4:]
|
61d8cfaad26c7f433eaa08110eb3d87c781266e3
| 52,903
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.