content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
from typing import Any
def is_less_than(value: Any, *, upper_bound: Any = 10) -> bool:
"""Checks whether the value is less than the upper_bound
:param value: The value to check if is less than
:param upper_bound: The upper bound
:return: Whether the value is less than upper_bound
"""
return value < upper_bound
|
0699fc80e34b772d97c81823d23a466c8884ac17
| 64,651
|
def to_str(arg):
"""
Convert all unicode strings in a structure into 'str' strings.
Utility function to make it easier to write tests for both
unicode and non-unicode Django.
"""
if type(arg) == list:
return [to_str(el) for el in arg]
elif type(arg) == tuple:
return tuple([to_str(el) for el in arg])
elif arg is None:
return None
else:
return str(arg)
|
f7d8a1a87fea4359bff889286c3f1420432090c7
| 64,652
|
def is_number(value) -> bool:
"""
Função que verifica se o parametro value é um número
@param value: valor a ser verificado
@return: Boolean
"""
try:
float(value)
except ValueError:
return False
return True
|
be21914ed1ddc50a2f2040464a1302d5348ce037
| 64,659
|
def sum_until_negative(a_list):
"""
@purpose Summing items in a list, stops as soon as a negative number is reached.
If first item is negative, or list is empty, returns 0
@parameter
a_list: A list passed to be summed until negative number reached
@Complexity: Worst Case - O(N): goes through the whole list when none is negative
Best Case - O(1): empty list or first is negative
@pre-condition An integer list is passed
@post-condition Returns the sum of numbers up until negative number reached
"""
try:
sum = 0
if len(a_list) > 0:
for i in range(len(a_list)):
if a_list[i] >= 0:
sum += a_list[i]
else: # if encountered a negative number, stop
return sum
return sum
else:
return sum
except TypeError:
return "Please only insert numerical type lists."
|
84d6820800413067205fa2bad6a169be17da14f8
| 64,660
|
def take(num):
"""Produce a sequence containing up to num elements from the head
of the input sequence and no more.
"""
def taker(input):
p = num
for elt in input:
if p == 0:
break
yield elt
p -= 1
return taker
|
c925fa66d5f622f9042921bce29dbf50a2b43277
| 64,663
|
def permutation_from_block_permutations(permutations):
"""Reverse operation to :py:func:`permutation_to_block_permutations`
Compute the concatenation of permutations
``(1,2,0) [+] (0,2,1) --> (1,2,0,3,5,4)``
:param permutations: A list of permutation tuples
``[t = (t_0,...,t_n1), u = (u_0,...,u_n2),..., z = (z_0,...,z_nm)]``
:type permutations: list of tuples
:return: permutation image tuple
``s = t [+] u [+] ... [+] z``
:rtype: tuple
"""
offset = 0
new_perm = []
for p in permutations:
new_perm[offset: offset +len(p)] = [p_i + offset for p_i in p]
offset += len(p)
return tuple(new_perm)
|
3b6508c103e39051ccfa909b54aa9ec506b35f86
| 64,666
|
import torch
def binary_accuracy_from_logits(outputs: torch.Tensor, labels: torch.Tensor) -> float:
"""
Function to compute binary classification accuracy based on model output (in logits) and ground truth labels
:param outputs: Tensor of model output in logits
:param labels: Tensor of ground truth labels
:return: Fraction of correct predictions
"""
outputs = (torch.sigmoid(outputs) > 0.5).float()
correct = (outputs == labels).sum().item()
return correct / labels.shape[0]
|
5ea097cfe8915255f345fee39f1b84dcd56233e3
| 64,667
|
def list2rofi(datas, sep):
"""
Convert python list into a list formatted for rofi
Parameters
----------
datas : list
elements stored in a list
sep : str
separator character
Returns
-------
str
elements separated by line-breaks
Examples
--------
>>> my_list = [1,2,3,4,5,6]
>>> list2rofi(my_list]
"1\n2\n3\n4\n5\n6"
"""
return '\n'.join(datas)
|
9beae585518af1341e68db9c4c5e83bbe56df06a
| 64,679
|
import torch
def min(tensor, dim, keepdim):
"""
Computes the minimum value of elements in input tensor.
:param tensor: PyTorch tensor.
:type tensor: `torch.nn.Module`
:param dim: Reduction dimension.
:type dim: `int`
:param keepdim: Whether the output has `dim` retained.
:type keepdim: `bool`
:return: Min of input tensor.
"""
return torch.min(tensor, dim, keepdim)[0]
|
0bfbc8ab0650cd7ec5b2c1b18961106979e3460b
| 64,682
|
import logging
def split(pattern, lyrics):
"""Split Binasphere lines.
Args:
pattern: List of integers indicating join pattern.
lyrics: String to split.
>>> split([0, 1, 1], 'a b c d e f')
['a d', 'b c e f']
"""
lyrics = lyrics.split()
num_lines = max(pattern) + 1
result = [list() for i in range(num_lines)]
i = 0
for word in lyrics:
val = pattern[i]
result[val].append(word)
i += 1
i %= len(pattern)
if i != 0:
logging.warning('Pattern is not fully matched, ended on %d', i)
return [' '.join(line) for line in result]
|
3dc8a9163b49d2f41a2405d253227af036c13048
| 64,683
|
def calculate_width(count, parent_count, parent_width):
"""calculate_width(count: int, parent_count: int, parent_width: int)
Calculate string width based on parent width and sample count.
"""
return int(count * parent_width // parent_count) if parent_count != 0 else 0
|
34dabcb8f0cf90866c7e4ab1b5834554f5341d5b
| 64,687
|
def load_doc(filename):
"""Read data from file.
Args:
filename (str): Name of the local file to be opened.
Returns:
str: Unique string containing the whole file.
"""
file = open(filename, 'r')
text = file.read()
file.close()
return text
|
cb849b3bea4b7d4117f6a1c3e61ffca8983ff278
| 64,688
|
def coalesce_buffers(
buffers: list[bytes],
target_buffer_size: int = 64 * 1024,
small_buffer_size: int = 2048,
) -> list[bytes]:
"""Given a list of buffers, coalesce them into a new list of buffers that
minimizes both copying and tiny writes.
Parameters
----------
buffers : list of bytes_like
target_buffer_size : int, optional
The target intermediate buffer size from concatenating small buffers
together. Coalesced buffers will be no larger than approximately this size.
small_buffer_size : int, optional
Buffers <= this size are considered "small" and may be copied.
"""
# Nothing to do
if len(buffers) == 1:
return buffers
out_buffers: list[bytes] = []
concat: list[bytes] = [] # A list of buffers to concatenate
csize = 0 # The total size of the concatenated buffers
def flush():
nonlocal csize
if concat:
if len(concat) == 1:
out_buffers.append(concat[0])
else:
out_buffers.append(b"".join(concat))
concat.clear()
csize = 0
for b in buffers:
size = len(b)
if size <= small_buffer_size:
concat.append(b)
csize += size
if csize >= target_buffer_size:
flush()
else:
flush()
out_buffers.append(b)
flush()
return out_buffers
|
f0e9f563bdc84c5059d1c71313750800dc4ae23d
| 64,689
|
def xstr(s): # pylint: disable=invalid-name
"""returns the input or 'None' to allow printing None"""
return 'None' if s is None else s
|
c71328c8dde757cfdd67d9386cb88c91fb12816a
| 64,694
|
def remove_prefix(text, prefix):
"""
Removes the `prefix` from `text`.
"""
if text.startswith(prefix):
prefix_len = len(prefix)
return text[prefix_len:]
return text
|
79b6414d2fd6dca5a0073475f479185a883705ca
| 64,700
|
def get_boards_list(main_node):
""" Get the list of the state of the boards from the last node
:param main_node: the last node
:return: list of board states
"""
boards = []
node = main_node
while True:
if node is None:
break
else:
boards.append(node.state)
node = node.parent
boards.reverse()
return boards
|
a2a4ab0f837102d495caaad3233d507cd540476b
| 64,701
|
def is_staff(user):
"""
Check if user is staff (he's staff if he has is_staff OR is_superuser flag)
"""
return user.is_staff or user.is_superuser
|
95fb755f477db4c56f56d127b8af10bff1b92c38
| 64,707
|
def is_float(input):
"""Checks whether input can be converted to float"""
try:
num = float(input)
except ValueError:
return False
return True
|
a5517c24c28cda2699a15ab89a42104f667cf431
| 64,709
|
import calendar
def dt_to_ut(dt):
""" Convert datetime to unixtime. """
return calendar.timegm(dt.timetuple())
|
4a16c1dcaffb047c28d9709c0c915ea8d5669344
| 64,710
|
import socket
def receive_all(conn: socket.socket, size: int) -> bytes:
"""Receive a given length of bytes from socket.
Args:
conn (socket.socket): Socket connection.
size (int): Length of bytes to receive.
Raises:
RuntimeError: If connection closed before chunk was read, it will raise an error.
Returns:
bytes: Received bytes.
"""
buffer = b""
while size > 0:
chunk = conn.recv(size)
if not chunk:
raise RuntimeError("connection closed before chunk was read")
buffer += chunk
size -= len(chunk)
return buffer
|
34ec809f6269fb4f9edcfb56c96381dc47c59bf8
| 64,715
|
import torch
def cubicInterp(x1, x2, f1, f2, g1, g2):
"""
find minimizer of the Hermite-cubic polynomial interpolating a
function of one variable, at the two points x1 and x2, using the
function (f1 and f2) and derivative (g1 and g2).
"""
# print(x1, x2)
# Nocedal and Wright Eqn (3.59)
d1 = g1 + g2 - 3*(f1 - f2)/(x1 - x2)
d2 = torch.sign(torch.tensor(float(x2 - x1)))*torch.sqrt(
torch.tensor(float(d1**2 - g1*g2)))
xmin = x2 - (x2 - x1)*(g2 + d2 - d1)/(g2 - g1 + 2*d2);
return xmin
|
e7100db1eff739901f272a058e4be74a6a6ceb40
| 64,717
|
def k_to_c(t_k):
"""Convert Kelvin to Celsius."""
if t_k is None:
return None
return t_k - 273.15
|
e739a53e3b678541f49bf12dd82738e2cbb6dd35
| 64,718
|
def read_keys(keyfile):
"""Get access keys from a key file."""
with open(keyfile, "r") as keyfileobj:
return tuple(key.strip() for key in keyfileobj.readlines())
|
9603ace57ab7a6c8a27bf191e98e25df9afe8b7c
| 64,719
|
def json_to_style(json):
"""
convert a json to a string formated as style argument
{ a : "b", c : "d" } -> "a:b;c:d;"
"""
style_argument = ""
for attribute, value in json.items():
style_argument = style_argument + attribute + ":" + value + ";"
return style_argument
|
7276ec377edd68742e61edeaf89287ebacb654c0
| 64,720
|
def assign_vocab_ids(word_list):
""" Given a word list, assign ids to it and return the id dictionaries """
word_ids = [x for x in range(len(word_list))]
word_to_id = dict(zip(word_list, word_ids))
id_to_word = dict(zip(word_ids, word_list))
return word_to_id, id_to_word
|
6742c12deb2bf250f7eb7c96630d1f57c1f92c61
| 64,725
|
def IntToRGB(intValue):
""" Convert an FL Studio Color Value (Int) to RGB """
blue = intValue & 255
green = (intValue >> 8) & 255
red = (intValue >> 16) & 255
return (red, green, blue)
|
5ba1735c73a2b0c51f67b8b4f0ae67dfcae8a5ea
| 64,726
|
def circuit(rel_x, rel_y, color='green', port=1, target_port=1):
"""helper method for setting Entity connections, eg. Entity(E.chest, connections=[circuit(0, 1)])"""
return port, color, rel_x, rel_y, target_port
|
25736270c42ef216a46f4332e7770b9023d06fe4
| 64,728
|
def portfolio_returns(holdings_returns, exclude_non_overlapping=True):
"""Generates an equal-weight portfolio.
Parameters
----------
holdings_returns : list
List containing each individual holding's daily returns of the
strategy, noncumulative.
exclude_non_overlapping : boolean, optional
If True, timeseries returned will include values only for dates
available across all holdings_returns timeseries If False, 0%
returns will be assumed for a holding until it has valid data
Returns
-------
pd.Series
Equal-weight returns timeseries.
"""
port = holdings_returns[0]
for i in range(1, len(holdings_returns)):
port = port + holdings_returns[i]
if exclude_non_overlapping:
port = port.dropna()
else:
port = port.fillna(0)
return port / len(holdings_returns)
|
713c06bcdabfc1ccd0ca79aa9b807f073d2bdc46
| 64,731
|
def create_dict(items):
"""
creates a dict with each key and value set to an item of given list.
:param list items: items to create a dict from them.
:rtype: dict
"""
result = {name: name for name in items}
return result
|
e600c4139fc0c335233d39153239e104c83d90cb
| 64,732
|
def FixateInputShape(input_params,
batch_size,
target_max_seqlen=None,
source_max_seqlen=None):
"""Adjusts the input_params so that it always produces fixed shaped output.
Sets p.target_max_length to target_max_seqlen and p.source_max_length to
source_max_seqlen if set. Only keep items in bucket_upper_bound that is <=
max_seqlen where max_seqlen is source_max_seqlen if given; otherwise,
target_max_seqlen.
Args:
input_params: The input params.
batch_size: The input generator should always output the batch size.
target_max_seqlen: Every batch should produce samples with this sequence
length, and samples are padded to this length. Use
input_params.bucket_upper_bound[-1] if not set.
source_max_seqlen: Same effect as target_max_seqlen but for source if set.
Returns:
input_params itself.
"""
p = input_params
# Pad to fixed length, since otherwise the infeed queue can't be set up,
# because the shape won't be known statically.
#
# Limit memory by throwing away large sequences.
if not target_max_seqlen:
assert p.bucket_upper_bound
target_max_seqlen = p.bucket_upper_bound[-1]
if source_max_seqlen:
p.bucket_upper_bound = [
x for x in p.bucket_upper_bound if x <= source_max_seqlen
]
if not p.bucket_upper_bound:
p.bucket_upper_bound = [source_max_seqlen]
else:
p.bucket_upper_bound = [
x for x in p.bucket_upper_bound if x <= target_max_seqlen
]
if not p.bucket_upper_bound:
p.bucket_upper_bound = [target_max_seqlen]
p.bucket_batch_limit = [batch_size] * len(p.bucket_upper_bound)
p.pad_to_max_seq_length = True
if source_max_seqlen:
p.source_max_length = source_max_seqlen
p.target_max_length = target_max_seqlen
if hasattr(p, 'pad_and_set_target_shape'):
p.pad_and_set_target_shape = True
# Because every batch is padded to the max sequence length and has the same
# batch size, they are shardable.
p.remote.shardable_batch = True
return p
|
9034b08e73b6fdded41420f36167f099fade58fb
| 64,738
|
import torch
def calculate_cummulate_survive(max_len, gamma, surrogate_step_survival):
"""
estimate a overall surrogate survival values
:param input: the src tensor to be attacked. shape: [batch, timestep]
:param gamma: used in reinforced rewards
:param surrogate_survival: surrogate single step survival rewards
:return: a list of cummulated survival for every step,
with estimate_accumulate_survive[timestep]=accumualted survive of sen_len "timestep"
"""
estimate_accumulate_survive = [surrogate_step_survival]
for i in range(1,max_len):
estimate_accumulate_survive.append(
estimate_accumulate_survive[i-1]*gamma+surrogate_step_survival
)
return torch.tensor(estimate_accumulate_survive)
|
922161aca2feeb36acdbbadfe6d47f71f688f972
| 64,739
|
def check_args(args):
"""
Validates command-line arguments passed in to the script:
* Ensures the --type argument is one of csv or json.
* Ensures the port to listen on is between 1024 and 65535.
Arguments:
* args: The args namespace produced by ```parse_args```
Returns True if checks pass, False otherwise.
"""
errors = 0
if(args.type not in ['csv', 'json']):
errors += 1
print("Please specify your file type with --type as csv or json.")
if(int(args.port) <= 1023 or int(args.port) > 65535):
errors += 1
print("Please specify an integer port between 1024 and 65535.")
if errors == 0:
return True
return False
|
3606681f0de888d930896a7ac561c4c7145405b2
| 64,746
|
def size_similarity(heights, index1, index2):
"""
Calculate vertical size similarity ratio.
Args:
heights(numpy.array): Text proposlas heights.
index1(int): First text proposal.
index2(int): Second text proposal.
Return:
overlap(float32): vertical overlap.
"""
h1 = heights[index1]
h2 = heights[index2]
return min(h1, h2) / max(h1, h2)
|
c6e65ba9d091ddfd7ae859d773eb15e00d8fc493
| 64,747
|
def prettyDict(d: dict) -> str:
"""Takes a dictionary and lays it out in "Key: Value" format, seperated by tabs."""
return "".join("{}: {}\t".format(i, d[i]) for i in d)
|
c4466d7d89fef144e1aefde147c4f56044e1a886
| 64,748
|
def getLiableOwner(directory_item):
"""
Fetches the owner liable for the file directory_item.
Recall - OWNERS of shared folders are charged for the entire shared folder
People uploading to the shared folder will not be charged
"""
parent = directory_item
while (parent.parent_folder):
parent = parent.parent_folder
return parent.owner
|
47b9bcd83b98c45df708dc0b0fb2a37dc14ec56a
| 64,757
|
import requests
def send_api_request(request_url, headers):
"""
Sends the request to The Blue Alliance for the given request and headers.
Arguments:
request_url: The request URL to send the request to.
headers: The header parameters to send with the request.
Returns:
A JSON object of the response
"""
response = requests.get(request_url, headers)
json_response = response.json()
return json_response
|
c816c836f95b04d9ed6724ef4f65ce5a5a34b3b2
| 64,765
|
import logging
def __get_logging_level(logging_level):
"""Returns the logging level that corresponds to the parameter string."""
if isinstance(logging_level, str):
return getattr(logging, logging_level.upper())
else:
return logging_level
|
fa2448e85adeb8be1c08e5409917d808f6b36e8a
| 64,766
|
def flat_mapping_of_dict(d, sep='_', prefix=''):
"""
Return a copy of d, recursively unpacking all dicts.
Note: This assumes all keys are strings!
:param d: a dict
:param sep: the prefix to a key
:param prefix: the prefix to keys in this dict
:return: a new dict
"""
new_d = {}
for k, v in d.items():
k = prefix + sep + str(k) if prefix else str(k)
if isinstance(v, dict):
new_d.update(flat_mapping_of_dict(v, sep=sep, prefix=k))
else:
new_d[k] = v
return new_d
|
b7292e0a62467c24a1ab8937dc4aebcb30918d09
| 64,768
|
def map_value(value, in_start, in_stop, out_start, out_stop):
"""
Map a value from an input range to an output range
:param value:
:param in_start:
:param in_stop:
:param out_start:
:param out_stop:
:return:
"""
return out_start + (out_stop - out_start) * ((value - in_start) / (in_stop - in_start))
|
dd63dcc466e4146159b24d2419fb08445de3ff10
| 64,770
|
def fixed_window_rolling_average(df, thickness, dx=0.1, bounds=[0., 28.0]):
"""
Calculates a rolling average using a window of fixed width.
Parameters
----------
df : pandas.DataFrame object
DataFrame containing splitting results.
thickness : float
Size of window, in km.
dx : float, optional
Size of increment for rolling average, in km. Default 0.1 km.
bounds : list of float, optional
Min/max depth to perform rolling average over, in km. Default 0-28 km.
Returns
-------
depth : list of float
Depths for each of the assigned values.
mean : list of float
Mean values for the rolling window, assigned to the midpoint.
median : list of float
Median values for the rolling window, assigned to the midpoint.
std : list of float
Standard deviation values for the rolling window, assigned to the
midpoint.
"""
depth, amean, median, std = [], [], [], []
top, max_depth = bounds
bottom = top + thickness
while bottom <= max_depth:
depth.append(top + thickness / 2)
vals = df[(df["depthkm"] >= top) & (df["depthkm"] <= bottom)]["tlag"]
amean.append(vals.mean())
median.append(vals.median())
std.append(vals.std())
top += dx
bottom += dx
return depth, amean, median, std
|
d1f37d63a3cd0a194103dbce153d9b4b6f572fc9
| 64,771
|
def decode_url(raw):
"""
Decode a URL into a unicode string. Expected to be UTF-8.
:param raw:
Raw URL string.
:type raw:
string (non-unicode)
:returns:
Decode URL.
:rtype:
unicode string
"""
return raw.decode('utf-8')
|
eeff8af78b6e67729c231518e963dd4736c2af2a
| 64,772
|
def max_square(size, grid):
"""Find square (with size side length) with highest power total."""
grid_size = len(grid)
max_power, max_coord = 0, None
for row in range(1, grid_size - size + 1):
for col in range(1, grid_size - size + 1):
a = grid[row - 1][col - 1]
b = grid[row - 1][col + size - 1]
c = grid[row + size - 1][col - 1]
d = grid[row + size - 1][col + size - 1]
power_sum = a + d - b - c
if power_sum > max_power:
max_power = power_sum
max_coord = (col, row)
return (max_power, (max_coord))
|
283d82629cb54b8a31bd79979067afafe5b65eec
| 64,773
|
def square(n):
"""Computes square of a number.
"""
return n*n
|
2d38833fe6d93d432e77311adcbc958f971b3aef
| 64,774
|
def _fatal_code(e):
"""Return True if an exception/status should not be retried. """
retryable = not hasattr(e, 'response') or \
e.response.status_code in [429] or \
500 <= e.response.status_code <= 599
return not retryable
|
10deda99de42d0a6a54351057ff8f246f081480b
| 64,778
|
def get_class_names(dataset_name='VOC2007'):
"""Gets label names for the classes of the supported datasets.
# Arguments
dataset_name: String. Dataset name. Valid dataset names are:
VOC2007, VOC2012, COCO and YCBVideo.
# Returns
List of strings containing the class names for the dataset given.
# Raises
ValueError: in case of invalid dataset name
"""
if dataset_name in ['VOC2007', 'VOC2012', 'VOC']:
class_names = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
elif dataset_name == 'COCO':
class_names = ['background', 'person', 'bicycle', 'car', 'motorcycle',
'airplane', 'bus', 'train', 'truck', 'boat',
'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat',
'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana',
'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet',
'tv', 'laptop', 'mouse', 'remote', 'keyboard',
'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush']
elif dataset_name == 'YCBVideo':
class_names = ['background', '037_scissors', '008_pudding_box',
'024_bowl', '005_tomato_soup_can', '007_tuna_fish_can',
'010_potted_meat_can', '061_foam_brick', '011_banana',
'035_power_drill', '004_sugar_box', '019_pitcher_base',
'006_mustard_bottle', '036_wood_block',
'009_gelatin_box', '051_large_clamp',
'040_large_marker', '003_cracker_box',
'025_mug', '052_extra_large_clamp',
'021_bleach_cleanser', '002_master_chef_can']
elif dataset_name == 'FAT':
class_names = ['background', '037_scissors', '008_pudding_box',
'024_bowl', '005_tomato_soup_can', '007_tuna_fish_can',
'010_potted_meat_can', '061_foam_brick', '011_banana',
'035_power_drill', '004_sugar_box', '019_pitcher_base',
'006_mustard_bottle', '036_wood_block',
'009_gelatin_box', '051_large_clamp',
'040_large_marker', '003_cracker_box',
'025_mug', '052_extra_large_clamp',
'021_bleach_cleanser', '002_master_chef_can']
elif dataset_name == 'FERPlus':
return ['neutral', 'happiness', 'surprise', 'sadness',
'anger', 'disgust', 'fear', 'contempt']
elif dataset_name == 'FER':
return ['angry', 'disgust', 'fear', 'happy',
'sad', 'surprise', 'neutral']
elif dataset_name == 'IMDB':
return ['man', 'woman']
else:
raise ValueError('Invalid dataset', dataset_name)
return class_names
|
3abd097e427064a918c07608002ba19f9f3fb12f
| 64,780
|
def _mult_replace(text,*A,**replacements):
"""
Simple tool to replace text with replacements dictionary.
Input can be either `param=val` or (param,val) tuples.
Can also invert if _invert=True
"""
invert = replacements.pop('_invert',False)
for item in A:
if isinstance(item,dict):
replacements.update(item)
for key,val in replacements.items():
if invert:
val,key = key,val
text = text.replace(key,val)
return text
|
a6f9d642e6652c55589c26a55bd8429abcde64c4
| 64,783
|
def get_pair_equality(row, column_1, column_2):
"""
Helper function used by pair_equality, to test values of two columns for
equality in a single row.
:param row:
Row from dataframe
:param column_1:
Name of first column
:param column_2:
Name of second column
:return:
1 if and only if the row has the same value in two given columns
"""
if row[column_1] == row[column_2]:
return 1
else:
return 0
|
fc95314580776bd2bc8796b7fe0beeec9c5b52f2
| 64,784
|
def split_at_space(string, max_length=40):
"""
Split a string into two parts. The split must occur at a whitespace
character, and the max_length of the first part is set with the
`max_length` argument. This is used for splitting the user's
`address` field into `address1`, `address2`, and `address3`.
"""
if len(string) <= max_length:
return string, ""
last_index = 0
for index, char in enumerate(string):
if char.isspace():
if index > max_length:
return string[0:last_index], string[last_index:]
last_index = index
return string[0:last_index], string[last_index:]
|
60fbbca29ba4d75443af203cf568caa6cc7f1a5e
| 64,785
|
def equal_chunks(list, chunk_size):
"""return successive n-sized chunks from l."""
chunks = []
for i in range(0, len(list), chunk_size):
chunks.append(list[i:i + chunk_size])
return chunks
|
8c3e44344c3a1631e786597679c46e005fafa170
| 64,787
|
import re
def _split_regexp(restr):
"""Return a 2-tuple consisting of a compiled regular expression
object and a boolean flag indicating if that object should be
interpreted inversely."""
if restr[0] == "!":
return re.compile(restr[1:]), 1
return re.compile(restr), 0
|
48b8bb22429c09fa93fe1d70d96a9388a45a61ee
| 64,789
|
from typing import Any
def is_smqtk(value: Any) -> bool:
"""Return `True` if `value` is a "smqtk" directive."""
return type(value) is dict and "smqtk" in value
|
d09f213f5e6bdaf0b5dec55b364b1c5969416cdb
| 64,794
|
import random
def weighted_choice(item_probabilities):
""" Randomly choses an item according to defined weights
Args:
item_probabilities: list of (item, probability)-tuples
Returns:
random item according to the given weights
"""
probability_sum = sum(x[1] for x in item_probabilities)
assert probability_sum > 0
random_value = random.random() * probability_sum
summed_probability = 0
for item, value in item_probabilities:
summed_probability += value
if summed_probability > random_value:
return item
|
3219926d9b9a3af441bd98e512e3403a5857d671
| 64,799
|
def contains_silence(seq, thresh=0.15):
"""If more than <thresh> of <seq> is 0, return True"""
return sum(seq==0)/len(seq) > thresh
|
5e5abb55d6f8897f92c1ea9e0958264c812371c5
| 64,802
|
def create_edge_adjacency(npoints:int):
"""This describes how the points are connected with each other
example:
[[0, 1],
[1, 2],
[3, 4],
[4, 0]]
This says point 0 is connected to 1. 1 is connected to 2 and eventually 4 is connected to 0.
This edge definition reflects the connectivity of an airfoil geometry.
Args:
npoints (int): Number of points in an airfoil
Returns:
List[(int,int)]: List of point connectivities
"""
edges = list()
for i in range(1,npoints):
edges.append([i-1,i])
edges.append([len(edges),0])
return edges
|
9583322baff0d8d3ffaae7dd7a1457b9853e5009
| 64,806
|
import gzip
def GzippableFileType(*args, **kwargs):
"""
A wrapper around file type that transparently handles gzip files.
"""
def ret(fname):
if fname.endswith(".gz"):
return gzip.open(fname, *args, **kwargs)
else:
return open(fname, *args, **kwargs)
return ret
|
e81f2718a134e0d81641bb244a67bcd9af20b7e4
| 64,808
|
def _julian_to_ordinal(jd: int) -> int:
"""Convert Julian Day (JD) number to ordinal number."""
return jd - 1721425
|
cd23c1db745f421923fa27428e7ed9343d9d55e4
| 64,809
|
def get_P_ref_comp_out(P_ref_cnd):
"""圧縮機吐出圧力 (3)
圧縮機吐出圧力は凝縮圧力と等しいとする。
Args:
P_ref_cnd(float): 凝縮圧力 (MPa)
Returns:
float: 圧縮機吐出圧力 (MPa)
"""
return P_ref_cnd
|
a1ae5af5425d799f1fcad7fe74edb24085e22247
| 64,812
|
from datetime import datetime
def get_date_as_numeric(date):
"""Takes the date as a string and returns it as a float
The input date is converted to seconds for comparison with other dates
Args:
date (str): the date as a string "%Y-%m-%d %H:%M:%S.%f",
eg "2018-03-09 11:00:36.372339"
Returns:
float: time in seconds
"""
# Expression to match
expression = "%Y-%m-%d %H:%M:%S.%f"
# Convert date to seconds
seconds = datetime.strptime(date, expression).timestamp()
return seconds
|
21e2f3752a992424dff70f4381c31d321c3ec136
| 64,814
|
import six
def content_length_header(body):
"""
Returns an appropriate Content-Length HTTP header for a given body.
"""
return 'Content-Length', six.text_type(len(body))
|
750d571ff7c4402fbc1d141fbd84cae7d7d70cd2
| 64,816
|
def flip(axis):
"""Returns the opposite axis value to the one passed."""
return axis ^ 1
|
72dbe4fdc8f0d203b56bddc50c59ade23f37a7a0
| 64,819
|
import getpass
def prompt_for_password(hostname):
"""Prompt the operator to interactively enter the disk encryption password."""
prompt_text = "Enter disk encryption password for '%s': "
return getpass.getpass(prompt_text % hostname)
|
12684667c3ccf14d55a46f806652f4b461ba415e
| 64,820
|
def first(iterable, or_=None):
"""Get the first element of an iterable.
Just semantic sugar for next(it, None).
"""
return next(iterable, or_)
|
ef7f2d9834defe477830ca7dd5851644e904d2e7
| 64,824
|
from typing import List
def SequenceToListInt(s: str, separator: str = '.') -> List[int]:
"""Convert '192.168.42.1' to [192,168,42,1]
Args:
s (str): String to convert
separator (str, optional): Separator to use. Defaults to '.'.
Returns:
List[int] : List of int
"""
return [int(item) for item in s.split(separator)]
|
0d93f0cb254157312e3f5acad0a57c272ff1e8c7
| 64,826
|
def get_absolute_boxes(box_absolute, boxes_relative):
"""
Given a bounding box relative to some image, and a sequence of bounding
boxes relative to the previous one, this methods transform the coordinates
of each of the last boxes to the same coordinate system of the former.
For example, if the absolute bounding box is [100, 100, 400, 500] (ymin, xmin,
ymax, xmax) and the relative one is [10, 10, 20, 30], the coordinates of the
last one in the coordinate system of the first are [110, 410, 120, 430].
Args:
box_absolute (ndarray): absolute bounding box.
boxes_relative (sequence of ndarray): relative bounding boxes.
Returns:
sequence of ndarray: coordinates of each of the relative boxes in the
coordinate system of the first one.
"""
absolute_boxes = []
absolute_ymin, absolute_xmin, _, _ = box_absolute
for relative_box in boxes_relative:
absolute_boxes.append(relative_box + [absolute_ymin, absolute_xmin, absolute_ymin, absolute_xmin])
return absolute_boxes
|
0e97c9feb990faa69baae9c07a026de6c8b8abd5
| 64,829
|
def function_42(a):
"""
A short function_42 description.
:param a: a placeholder first param
:type a: int
:returns: int 42
"""
return 42
|
56346dec258656f4ccc679562ab20cc282e504ad
| 64,833
|
def update_graphs_header(pathname):
"""
Update the graphs page header on page load or refresh.
Parameters
----------
pathname : str
The pathname of the url in window.location
Returns
-------
header : str
The header for the graphs page that includes the hostname
desc: str
A description of what the graphs page shows
"""
# Parse hostname.
hostname = pathname.split('/')[1]
header = hostname + ' Graphs'
desc = ('This page features two histograms that will help you explore '
'and visualize image data from the {} device.'.format(hostname))
return header, desc
|
d59e3cab49d7afb2e33d0b9a371475b0da543147
| 64,834
|
def orbital_to_shell_mapping(ncore,nopen,npair):
"""\
Map the orbitals to shells. All the core orbitals are in
the first shell. Then each orbital has its own shell.
>>> orbital_to_shell_mapping(1,0,0)
[0]
>>> orbital_to_shell_mapping(2,0,0)
[0, 0]
>>> orbital_to_shell_mapping(2,1,0)
[0, 0, 1]
"""
shell = [0 for i in range(ncore)]
ncoreshells = 1 if ncore else 0
for i in range(nopen+2*npair):
shell.append(i+ncoreshells)
return shell
|
d87b2f90199f3f6e3b50cdf9d954433e3028a69c
| 64,835
|
from typing import List
def common_words(sentence1: List[str], sentence2: List[str]) -> List[str]:
"""
Input: Two sentences - each is a list of words in case insensitive ways.
Output: those common words appearing in both sentences. Capital and
lowercase words are treated as the same word.
If there are duplicate words in the results, just choose one word.
Returned words should be sorted by word's length.
"""
set_1 = set(word.lower() for word in sentence1)
set_2 = set(word.lower() for word in sentence2)
return sorted(list(set_1.intersection(set_2)), key=len)
|
9f3ba2a913324ba9447ec144f057e255ee49cd6d
| 64,838
|
def warningMessage(warn, location = None):
"""
Generate a standard warning message.
Parameters
----------
warn : str
The warning message.
location : str, optional
Where the warning happens. E.g. CTL.funcs.funcs.warningMessage
Returns
-------
str
The generated warning message.
"""
if (location is None):
return "Warning: {}".format(warn)
else:
return "Warning in {}: {}".format(location, warn)
|
5c6fc99deb7a7a45d364bbd4904229d72e1eaffa
| 64,843
|
import torch
def expand_many(x, axes):
"""Call expand_dims many times on x once for each item in axes."""
for ax in axes:
x = torch.unsqueeze(x, ax)
return x
|
22ef04759db98be7d7eeeab85fd468dfa257b392
| 64,852
|
def compare_metadata(expected, sample):
"""Compares the keys and values of ``expected`` with the keys and values of ``sample``
:param dict expected: The dictionary containing the expected metadata.
:param dict sample: The dictionary of metadata which must be a complete superset of the expected metadata.
:return: Whether ``expected`` is a complete and equal subset of ``sample``.
:rtype: bool
"""
for k, v in expected.items():
if k not in sample or v != sample[k]:
return False
return True
|
853ba88edb3911491ff2fcdb2f45a7f391ceebd4
| 64,855
|
def isprintable(text):
"""
Checks if all characters in ``text`` are printable or the string is empty.
Nonprintable characters are those characters defined in the Unicode character database
as "Other" or "Separator", excepting the ASCII space (0x20) which is considered printable.
Note that printable characters in this context are those which should not be escaped
when repr() is invoked on a string. It has no bearing on the handling of strings
written to sys.stdout or sys.stderr.
:param text: The string to check
:type text: ``str``
:return: True if all characters in ``text`` are printable or the string is empty, False otherwise.
:rtype: ``bool``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.isprintable()
|
665434fea4735e6eaa4052bf9029c68e4fbdbcf1
| 64,865
|
import re
def replace(string, substitutions):
""" Perform many string replacements
all at once.
Parameters
----------
string : str
The string to modify.
substitutions : dict of str to str
The string replacements to perform.
Returns
-------
str
The modified string.
Examples
--------
>>> qosy.tools.replace('ABC', {'A':'AB', 'B':'D', 'C':'AC'}) # 'ABDAC'
"""
# From https://gist.github.com/carlsmith/b2e6ba538ca6f58689b4c18f46fef11c
substrings = sorted(substitutions, key=len, reverse=True)
regex = re.compile('|'.join(map(re.escape, substrings)))
return regex.sub(lambda match: substitutions[match.group(0)], string)
|
b1b1111f01dbe10a3b59ad949ccb126b7d066062
| 64,867
|
import random
def generate_points(n, bound):
"""
Generates a random list of n city positions (xi, yi).
n: number of cities
bound: maximum allowed X or Y position of any city
returns List[(x1, y1), ..., (xn, yn)]
"""
points = set()
while len(points) < n:
points.add((bound * random.random(), bound * random.random()))
return list(points)
|
48dfa991d2901278a573a47223904e24820e37d5
| 64,868
|
def noInf(value1, value2):
"""
Takes in two values and divides the first by the second
If valu2 is 0 then take the value to be 0
"""
if(value2 == 0):
return(0)
else:
return(value1 / value2)
|
5419a2f670a1063f22233272a8e476cdefaff7d7
| 64,870
|
def sliced_by_n(images, n=500):
"""Slice the images by sequences of n elements."""
return [images[i : i + n] for i in range(0, len(images), n)]
|
a6099c46debf8f202f1321683d67d342c354f360
| 64,873
|
def buf_table_name(board):
""" Temporary table name for board 'board' """
return "tmp_idx_" + board
|
c06eded29b5636e55352ea99b18d8cbb786a7082
| 64,879
|
import time
def prediction_timer(model, samples):
"""
Timeshow long a model takes to make predictions on samples.
"""
start_time=time.perf_counter()
model.predict(samples)
end_time=time.perf_counter()
total_time=end_time-start_time
time_per_pred= total_time/len(samples)
return total_time, time_per_pred
|
8f5651f9744b54401baff986ef23a260da8b4d4c
| 64,887
|
import time
def utc_to_epoch(struct_time):
"""Convert a struct_time to an integer number of seconds since epoch."""
return int(time.mktime(struct_time))
|
1beb4e1bee72f067e0e21d49dd2bf944149fd4ba
| 64,891
|
def graphql_union(name, description=None):
"""Annotate a static method or global function as a description of a union.
Decorator that annotates a static method or global function as
returning a list of the names of the types that comprise a GraphQL
union type. The types must be object, interface, and union types.
At present, we do not support annotating a function with multiple
graphql_union annotations.
basestring name - The name of the union type.
basestring description - A description of the type, or None.
GraphQL favors the Markdown format.
"""
def decorator(func):
func._graphql_union_name = name
func._graphql_union_description = description
return func
return decorator
|
aca18b0c76f35e57b441d2cb1504c9e9f3d8a3fa
| 64,893
|
def get_list_index(ls_object=[], find_str=""):
"""
find a string index from list provided.
:param ls_object: <list> the list to find the string frOpenMaya.
:param find_str: <str> the string to find inside the list.
:return: <int> the found index. -1 if not found.
"""
try:
return ls_object.index(find_str)
except ValueError:
return -1
|
a58282c148a19eab28118264163f6b6894533901
| 64,896
|
def generate_test_description(local_symbols, *variable_names):
"""
Generate test description.
:param local_symbols: local symbol table from where the function was called
:param variable_names: variable names
:return: test description
"""
variables_text = ', '.join('{} = {}'.format(variable_name, eval(variable_name, local_symbols))
for variable_name in variable_names)
return 'when testing \'{}\''.format(variables_text)
|
4eddea7075994cc8e3d9e5da4bdb4bf8c85c8aad
| 64,897
|
def dict_to_list(d):
"""
Takes in a dictionary and returns a list of its values.
If there are repeats in the values, there will be repeats in the list
:param d: Dictionary
:return: list of values in the dictionary
>>> d = {'a': 1, 'b': 'bee', 'c': 65}
>>> dict_to_list(d)
[1, 'bee', 65]
>>> d2 = {}
>>> dict_to_list(d2)
[]
"""
L = []
for key, value in d.items():
L.append(value)
return L
|
db599df9d52e385f77eb52315af825d450c6ab70
| 64,900
|
def kwa(**kwargs) -> dict:
"""
Returns the specified kwargs as a dict.
:param kwargs: the kwargs to convert
:return: the specified kwargs as a dict
"""
return kwargs
|
b7ca0561589b279347a122311b94787894197499
| 64,901
|
import hashlib
def compute_hashed_password(password, salt):
"""
Compute a hashed password.
This compute a hashed password regarding a random salt hash and a
password string.
:param password: A password
:param salt: A salt hash
:type password: str
:type salt: str
:return: the hashed password
:rtype: str
"""
return hashlib.sha512(password + salt).hexdigest()
|
69502d3add150acd04753125113e47feff488c6e
| 64,902
|
import string
def text_process(mess):
"""
Takes in a string of text, then performs the following:
1. Remove all punctuation
2. Remove all stopwords
3. Returns a list of the cleaned text
"""
# STOPWORDS = stopwords.words('english') + ['u', 'ü', 'ur', '4', '2', 'im', 'dont', 'doin', 'ure']
# Check characters to see if they are in punctuation
nopunc = [char for char in mess if char not in string.punctuation]
# Join the characters again to form the string.
nopunc = ''.join(nopunc)
# Now just remove any stopwords
return ' '.join([word for word in nopunc.split()])
|
ff3db60ee35da5788ccfc3208a0ea15a9409c81d
| 64,903
|
def remove_split_from_name(name):
"""
if last part of name is _split__###
return: name - "split"
Else, return: name
"""
if name[-15:-7] == "_split__":
return name[:-15] + ".csv"
else:
return name
|
054f3d8c43087f0c2d00b789a339b703f4a73652
| 64,905
|
def get_reference_keys(xml_node):
"""reference an xml_node in the catalog: ``catalog[section][line]``"""
section = xml_node.tag.split("}")[-1]
line = "Line %d" % xml_node.sourceline
return section, line
|
766e875a068c344093d9ecf11d08da28c7b497b6
| 64,906
|
def fss_init(thr, scale):
"""
Initialize a fractions skill score (FSS) verification object.
Parameters
----------
thr: float
The intensity threshold.
scale: float
The spatial scale in pixels. In practice, the scale represents the size
of the moving window that it is used to compute the fraction of pixels
above the threshold.
Returns
-------
fss: dict
The initialized FSS verification object.
"""
fss = dict(thr=thr, scale=scale, sum_fct_sq=0.0, sum_fct_obs=0.0, sum_obs_sq=0.0)
return fss
|
87a74c769533a1931ec26171a45fdf4181bc23a2
| 64,916
|
import collections
def picard_idxstats(picard, align_bam):
"""Retrieve alignment stats from picard using BamIndexStats.
"""
opts = [("INPUT", align_bam)]
stdout = picard.run("BamIndexStats", opts, get_stdout=True)
out = []
AlignInfo = collections.namedtuple("AlignInfo", ["contig", "length", "aligned", "unaligned"])
for line in stdout.split("\n"):
if line:
parts = line.split()
if len(parts) == 2:
_, unaligned = parts
out.append(AlignInfo("nocontig", 0, 0, int(unaligned)))
elif len(parts) == 7:
contig, _, length, _, aligned, _, unaligned = parts
out.append(AlignInfo(contig, int(length), int(aligned), int(unaligned)))
else:
raise ValueError("Unexpected output from BamIndexStats: %s" % line)
return out
|
6ee5d38b272e4ae05a3df5d02230509bb427133b
| 64,923
|
def check_switch(panel : int, switch : int) -> bool:
"""
Get whether switch is set on or off
===================================
Parameters
----------
panel : int
Switch panel to investigate.
switch : int
Switch to check.
Returns
-------
bool
True, if switch is on, False if not.
Notes
-----
This function works well binary switch panels only.
"""
return panel // switch % 2 == 1
|
5c4cd02418b05d01b4b7a7ff61a4fd7b938eb86f
| 64,928
|
def make_symbol_list(maximum):
"""For a given maximum, return the list of symbols to compute permutations for
For instance, 3 would return ['0', '1', '2', '3']
Parameters
----------
maximum: int
Returns
-------
List[str]
A list of strings representing the symbols to be permuted
"""
return [str(i) for i in range(0, maximum + 1)]
|
3fad3d508b70c9d104af4f2fa689c925cbb61f71
| 64,929
|
def log_file_with_status(log_file: str, status: str) -> str:
"""
Adds an extension to a log file that represents the
actual status of the tap
Args:
log_file: log file path without status extension
status: a string that will be appended to the end of log file
Returns:
string of log file path with status extension
"""
return f'{log_file}.{status}'
|
7d66115fb238d736f4a6ee92a96af8f0ec6663c5
| 64,933
|
def group_by_length(words):
"""Returns a dictionary grouping words into sets by length.
>>> grouped = group_by_length(['python', 'module', 'of', 'the', 'week'])
>>> grouped == {2: set(['of']),
... 3: set(['the']),
... 4: set(['week']),
... 6: set(['python', 'module']),
... }
True
"""
d = {}
for word in words:
s = d.setdefault(len(word), set())
s.add(word)
return d
|
dbbabfc499b6885b1f2978593b3828c5c6fc5d3e
| 64,935
|
def is_multiallelic(variant):
"""Does variant have multiple alt alleles?
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
True if variant has more than one alt allele.
"""
return len(variant.alternate_bases) > 1
|
c170379ceced883d7569ad21b9c13d8a711d1536
| 64,937
|
import json
def parse_json_site_response(text):
"""
Helper function to parse and extract the station name from the
trafiklab JSON site response.
"""
jdata = json.loads(text)
data = []
for site in jdata.get(u'ResponseData', {}):
if site.get(u'Type') == 'Station':
data.append({u'name': site['Name']})
return data
|
41501df6d5de808316695158c3bd8b5c1869c91a
| 64,938
|
def itoa(num, radix):
"""
Convert int to a sts representation in an arbitrary base, up to 36.
"""
result = ""
while num > 0:
result = "0123456789abcdefghijklmnopqrstuvwxyz" [num % radix] + result
num //= radix
return result
|
61548fab8b0cbf1a750d7a9d66192f47aad2743f
| 64,939
|
import string
def remove_punctuation(text):
""" Removes all punctuation from the given string
:param str text: Removes all punctuation from the given text
:return: Punctuation-removed string
:rtype: str
"""
translator = str.maketrans('', '', string.punctuation)
return text.translate(translator)
|
2659a780375990329925b64f0b8c6f49e436017e
| 64,944
|
import math
def get_distance_meters(locA, locB):
"""Gets the distance between two location objects, in meters"""
dlat = locB.lat - locA.lat
dlon = locB.lon - locA.lon
dalt = locB.alt - locA.alt
return math.sqrt((dlat**2) + (dlon**2) + (dalt**2)) * 1.1131195e5
|
47d49a81976d40d90b5920babc0bf80ab08ad05d
| 64,945
|
def trailer(draw):
"""
trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
"""
#'(' [testlist] ')' | '[' subscript ']' | '.' NAME
return ''
|
75107d230a43a1d96046b544530d82e9dbe8a57b
| 64,947
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.