content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def get_options(options_str):
"""
Receives a string of 1's and 0's corresponding to different user settings
1 = True, 0 = False
options_list[0]: "Enable Footnotes"
options_list[1]: "Force Reprocess"
"""
options_list = [True if char == "1" else False for char in options_str]
options = {"enable_footnotes": options_list[0], "force_reprocess": options_list[1]}
return options
|
9f0650f0ef21c938488de43e981afddf0cea4f29
| 89,500
|
def is_namedtuple(x):
"""
Check if x is a namedtuple.
:rtype: Boolean.
"""
return all((type(x).__bases__[0] is tuple,
len(type(x).__bases__) == 1,
isinstance(getattr(x, '_fields', None), tuple)))
|
fc3848c191a7a5fec16469758597128e0629c8ef
| 89,504
|
def union(a, b):
""" return the union of two lists """
return list(set(a) | set(b))
|
8a356478239030c1518a2e7a8fc8db38c187a7c5
| 89,507
|
def get_related_models(model):
"""
Get all related models by model
:param model: Model
:return: list of Models
"""
related_models = [rel.model for rel in model._meta.get_all_related_objects()]
return related_models
|
90bec1de2864f4360d0eafa646fd2e10e21c1b91
| 89,510
|
import zlib
def decompress(data: bytes) -> bytes:
"""Decompress a byte array using zlib
Args:
data (bytes): Binary data to decompress
Returns:
bytes: A byte array of uncompressed data
"""
return zlib.decompress(data)
|
3277396d9461eb1683e75efbb6a36594f01af9f6
| 89,513
|
def list_of_list_to_list(listoflist):
"""
Transforms a list of lists in a single list.
params:
listoflist: the list of lists
"""
singlelist = []
for xlist in listoflist:
for xelem in xlist:
singlelist.append(xelem)
return singlelist
|
3680193941c169460f1cda5dac10819ab2a18158
| 89,517
|
import re
def parse_frames(string):
"""Parse the resulting frames list from a frame list string.
Examples
>>> parse_frames("0-3;30")
[0, 1, 2, 3, 30]
>>> parse_frames("0,2,4,-10")
[0, 2, 4, -10]
>>> parse_frames("-10--5,-2")
[-10, -9, -8, -7, -6, -5, -2]
Args:
string (str): The string to parse for frames.
Returns:
list: A list of frames
"""
result = list()
if not string.strip():
raise ValueError("Can't parse an empty frame string.")
if not re.match("^[-0-9,; ]*$", string):
raise ValueError("Invalid symbols in frame string: {}".format(string))
for raw in re.split(";|,", string):
# Skip empty elements
value = raw.strip().replace(" ", "")
if not value:
continue
# Check for sequences (1-20) including negatives (-10--8)
sequence = re.search("(-?[0-9]+)-(-?[0-9]+)", value)
# Sequence
if sequence:
start, end = sequence.groups()
frames = range(int(start), int(end) + 1)
result.extend(frames)
# Single frame
else:
try:
frame = int(value)
except ValueError:
raise ValueError("Invalid frame description: "
"'{0}'".format(value))
result.append(frame)
if not result:
# This happens when only spaces are entered with a separator like `,` or `;`
raise ValueError("Unable to parse any frames from string: {}".format(string))
return result
|
ef9523203b4fbc602fd124ec07fc61aaab1437ba
| 89,518
|
def SFFloat_vrmlstr( value, lineariser=None):
"""Convert floats to (compact) VRML97 representation"""
rpr = str( value )
if rpr == '0.0':
return '0'
elif rpr[:2] == '0.':
return rpr[1:]
elif rpr[:3] == '-0.':
return '-'+rpr[2:]
elif rpr[-2:] == '.0':
return rpr[:-2]
else:
return rpr
|
f6cecef411aa6ef3a8a1ef872e25dad19d3d8f03
| 89,521
|
import math
def geometric(a, r, index_range=None, seq_range=None):
"""
Generates the geometric sequence with a first term of 'a' and a common
ratio of 'r':
a, ar, ar^2, ...
"""
def t(n):
return a * r ** (n - 1)
if index_range:
for n in index_range:
yield t(n)
return
elif seq_range:
u = math.ceil(1 + math.log(seq_range.start / a, r))
v = math.floor(1 + math.log((seq_range.stop - 1) / a, r))
for n in range(u, v + 1):
yield t(n)
return
k = 0
while True:
yield t(k)
k += 1
|
04f47872779d234e7592632f332bed990b6b4d80
| 89,525
|
from datetime import datetime
def zulu_to_mdy(zulu_date):
"""Converts Tenables time format (Zulu) to MDY (Month, Day, Year)
args:
zulu_date (str): Tenable given time format
return (str): MDY (Month, Day, Year)
"""
return datetime.strptime(zulu_date[:19], "%Y-%m-%dT%H:%M:%S").strftime('%m-%d-%Y %I:%M %p')
|
f971c6bd2d3b16e2cc83cb747fe82999e94d87a9
| 89,530
|
def model_ismomentregistredzc_39(calendarMoments_t1 = ["Sowing"]):
"""
- Name: IsMomentRegistredZC_39 -Version: 1.0, -Time step: 1
- Description:
* Title: Is FlagLeafLiguleJustVisible Model
* Author: Pierre Martre
* Reference: Modeling development phase in the
Wheat Simulation Model SiriusQuality.
See documentation at http://www1.clermont.inra.fr/siriusquality/?page_id=427
* Institution: INRA Montpellier
* Abstract: if FlagLeafLiguleJustVisible is already Registred
- inputs:
* name: calendarMoments_t1
** variablecategory : state
** datatype : STRINGLIST
** default : ['Sowing']
** inputtype : variable
** unit :
** description : List containing appearance of each stage at previous time
- outputs:
* name: isMomentRegistredZC_39
** datatype : INT
** min : 0
** variablecategory : state
** max : 1
** unit :
** description : if Flag leaf ligule has already appeared
"""
isMomentRegistredZC_39 = 1 if "FlagLeafLiguleJustVisible" in calendarMoments_t1 else 0
return isMomentRegistredZC_39
|
5df41b0de2e738a32511ad39094aa9a21c07dd44
| 89,531
|
def key(id):
"""Returns the memcache key for the user's sidebar.
"""
return 'sidebar_for_%s' % repr(id)
|
5ba579c97ee58c1d03ab0ccf58b1ae6dc81fa940
| 89,532
|
def bool_to_int(labels: list) -> list:
"""
Turn a list of 0s and 1s into a list whose values are the indices of 1s.
Used to create a valid Kaggle submission.
E.g. [1, 0, 0, 1, 1] -> [0, 3, 4]
"""
return [i for i, x in enumerate(labels) if x == 1]
|
22fec6fae3f48d66c177653fe047048149851fc5
| 89,536
|
def order_terms_in_binding(result_vars, binding):
"""Convert a binding into a complete ordered list of terms ordered
in accordance with result_vars
:param result_vars: Vars list from SPARQL JSON results
:param binding: Item in bindings section of SPARQL results JSON
:returns: A list of RDF terms
:rtype: list
"""
return [binding.get(result_var) for result_var in result_vars]
|
bac2119766945f3641f15fd638ae8cd792bb714a
| 89,538
|
def palo_alto_tags(azure_dict, namespace='', delim='_'):
"""This function takes a dictionary of MS Azure tags and returns a list
of Palo Alto Networks formatted tags, takes an optional namespace for prefix for pan tag element"""
prefix = 'azure'
if namespace:
prefix = prefix + delim + namespace
pan_tags = []
try:
for tag, value in azure_dict.items():
pan_tags.append(prefix + delim + tag + delim + value)
except:
pan_tags.append(prefix + delim + 'None')
return pan_tags
|
141a0d76780ddd03eb5df465569cb4b73166aefc
| 89,540
|
from pathlib import Path
def get_datacheck_files(pattern: str, directory: Path) -> list:
"""Return a list of files matching the pattern."""
return sorted(directory.glob(pattern))
|
97a99442972dc0576257df14b048b4e8322e5603
| 89,545
|
def capitalize(s:str) -> str:
"""
make first letter of s upper case
:param s: string
:return: s with first letter capitalized
"""
return s[0].upper()+s[1:]
|
92f1bc8bb3a4ab2fcefff62ef61aeab399c36473
| 89,549
|
def reduce_pipeline(pipeline, iterable):
"""Run through a pipeline."""
val = iterable
# Process through the pipeline.
for fn in pipeline:
val = fn(val)
return val
|
c409185a148aa9076c4a60f6b8922bfd971771e0
| 89,551
|
def _fit_single_layer_loop(param):
"""
Loop body to be passed to the parallel pool.
Note: `func_fit_single_layer` can be:
(1) helper_hh_model.fit_HH_x_single_layer(), or
(2) helper_mkz_model.fit_H4_x_single_layer()
etc.
"""
damping_curve, other_params = param
(
func_fit_single_layer, use_scipy, pop_size, n_gen, lower_bound_power,
upper_bound_power, eta, seed, show_fig, verbose,
) = other_params
best_para = func_fit_single_layer(
damping_curve,
use_scipy=use_scipy,
n_gen=n_gen,
eta=eta,
pop_size=pop_size,
lower_bound_power=lower_bound_power,
upper_bound_power=upper_bound_power,
seed=seed,
show_fig=show_fig,
verbose=verbose,
parallel=False, # no par. within layers
)
return best_para
|
b1ef1a5c016b9bac9ec3d06808e1c97e9ce27230
| 89,555
|
def _str2num(s):
"""Converts a string to a number checking whether it is int or not"""
return int(s) if s.lstrip('-').isdigit() else float(s)
|
a2c38164745ddf3734c87c1002c5b5e994a9a6aa
| 89,556
|
def check_parentality(object_a, object_b):
"""Check recursively if object 'a' is not parent of object 'b'"""
if object_b.parent:
if object_a == object_b.parent:
return True
else:
return check_parentality(object_a, object_b.parent)
else:
return False
|
0ec90527f80b87910e4f284dacef95ca83c5c513
| 89,560
|
def new_pos(pos, up=0, down=0, left=0, right=0):
"""helper method to generate positions relative distances from an origin position
>>> new_pos((0,0), down=1)
(0, 1)
>>> new_pos((2,3), up=2, left=1)
(1, 1)
"""
if not pos:
return None
x, y = pos
return x + right - left, y - up + down
|
3a5d2065fb6cba6b90c8e28569a28ec9ad25a09f
| 89,561
|
def nested_update(this, that):
"""Merge two nested dictionaries.
Effectively a recursive ``dict.update``.
Examples
--------
Merge two flat dictionaries:
>>> nested_update(
... {'a': 1, 'b': 2},
... {'b': 3, 'c': 4}
... )
{'a': 1, 'b': 3, 'c': 4}
Merge two nested dictionaries:
>>> nested_update(
... {'x': {'a': 1, 'b': 2}, 'y': 5, 'z': 6},
... {'x': {'b': 3, 'c': 4}, 'z': 7, '0': 8},
... )
{'x': {'a': 1, 'b': 3, 'c': 4}, 'y': 5, 'z': 7, '0': 8}
"""
for key, value in this.items():
if isinstance(value, dict):
if key in that and isinstance(that[key], dict):
nested_update(this[key], that[key])
elif key in that:
this[key] = that[key]
for key, value in that.items():
if key not in this:
this[key] = value
return this
|
1f218df42cd37e328118fe7e87b2523aae622be9
| 89,562
|
import re
def generate_safe_filename_prefix_from_label(label):
"""Generates safe filename prefix by avoiding non-alphanumeric chars.
"""
return re.sub('\W', '_', label.lower())
|
68991dbd9156713d608d5f31e66d3140edbf28cf
| 89,565
|
def capture_field(field, value):
"""
Macro to create text format field
:param field: register name
:param value: register value
:return: string of definition
"""
try:
_test_value = int(value, 16)
except (ValueError, AttributeError):
# Can't convert string to int, assumed to be string
return " '{}': '{}',\n".format(field, value)
return " '{}': {},\n".format(field, value)
|
52d6b895a7d26928725fe249cffd934361f14793
| 89,566
|
import json
def get_json(file):
"""
Load JSON data into memory
file: the path to the data file
"""
with open(file) as json_file:
json_data = json.load(json_file)
json_file.close()
return json_data
|
e50656d07d199b607cb6fbf6053af7e7816f736a
| 89,567
|
def _convert_valid_actions(valid_actions):
"""Convert provided valid_actions for gRPC.
Args:
valid_actions: Either None, a list of bools or a numpy boolean array.
Returns:
None if valid_actions is None. Otherwise, a list of bools.
"""
if valid_actions is None:
return None
return list(map(bool, valid_actions))
|
0d70c584aac8f67ff2a6ccd1326184e4fa0115ac
| 89,569
|
def replace(df, replace_dict):
"""Replaces values in columns based on a dict
Args:
df (`pandas.DataFrame`): The dataframe to replace values in
replacement_dict (dict): A dict containing columns, the values to replace, and what to replace them with.
Should be formatted like:
{
'column_a': {'$': '', ',':''},
'column_b': {'bought': 1, 'sold': -1}
}
"""
return df.replace(replace_dict)
|
03d05c23e912baa1ccc9e67dc4efc4e85d708f70
| 89,575
|
import re
def get_cxx_struct_begin_regex(struct):
###############################################################################
"""
>>> bool(get_cxx_struct_begin_regex("Foo").match("struct Foo {"))
True
>>> bool(get_cxx_struct_begin_regex("Foo").match("struct Foo"))
True
>>> bool(get_cxx_struct_begin_regex("Foo").match("struct FooBar"))
False
"""
struct_regex_str = r"^\s*struct\s+{}([\W]|$)".format(struct)
return re.compile(struct_regex_str)
|
6a837e94b39294a6ffb8be7f1ae2f30a45fda956
| 89,577
|
def get_power_level(x: int, y: int, serial: int = 9306) -> int:
"""Returns the powerlevel at a given coordinate (x, y) based on the serial number {serial}
Arguments:
x {int} -- x coordinate
y {int} -- y coordinate
Keyword Arguments:
serial {int} -- Serial number (default: {9306})
Returns:
int -- Power level
"""
rack_id = x + 10
power_level = rack_id * y
power_level += serial
power_level *= rack_id
power_level //= 100
power_level %= 10
power_level -= 5
return power_level
|
fa6ab944cc3789766f8b9d90267c43271d1d1455
| 89,579
|
import copy
def permutation(n):
"""Returns a list of all permutation of n integers"""
if (n == 1):
result = [[1]]
return result
else:
result = permutation(n-1)
newresult = []
for shorterpermutation in result:
for position in range(0,n):
newpermutation = copy.deepcopy(shorterpermutation)
newpermutation.insert(position,n)
newresult.append(newpermutation)
return newresult
|
f2fe77f43362a79d0aa8e01fdf2eb1fc9c684ca5
| 89,581
|
import re
def contains_num_only_uuid(string):
"""
Determines if a string contains a pattern like: '13614-31311-31347',
which toil uses in its test generated sdb domain names.
"""
return bool(re.compile('[0-9]{5}-[0-9]{5}-[0-9]{5}').findall(string))
|
2b98020ac01bb06e13cc91bc5cd83dc2d2a1176a
| 89,585
|
def _null_terminate(value: str) -> str:
"""Null terminate the given string."""
if "\x00" in value:
value = value.split("\x00")[0]
return f"{value}\x00"
|
417129084d2ea34dd85b601d9d70f879ab8b997d
| 89,589
|
import json
def getEndpoint(chain='mainnet'):
"""
Extracts the infura endpoint website given the input chain name from among
the possible values:
mainnet
kovan
rinkeby
goerli
ropsten
Assumes that at a folder level above this python file, there is a json file
containing the credentials fo an infura account, called secrectInfuraCredentials.json
Parameters
----------
chain : str, optional
DESCRIPTION. The default is 'mainnet' and currently only works for mainnet.
Raises
------
ValueError
DESCRIPTION.
Returns
-------
endpoint : str
url to endpoint for specified chain.
"""
with open('../secretInfuraCredentials.json','r') as file:
INFURA_CREDENTIALS = json.load(file)
try:
endpoint = INFURA_CREDENTIALS[f'{chain.upper()}_ENDPOINT']
except:
raise ValueError(f'COULD NOT FIND ENDPOINT FOR SPECIFIED CHAIN: {chain.upper()}')
return endpoint
|
1c753665fb5bfb551a37859b73fc31028a6bdd6c
| 89,590
|
def array_keys(space, w_arr, w_search=None, strict=False):
""" Return all the keys or a subset of the keys of an array """
lst = []
with space.iter(w_arr) as itr:
while not itr.done():
w_key, w_val = itr.next_item(space)
if w_search:
if space.str_eq(w_val, w_search):
if not strict or w_val.tp == w_search.tp:
lst.append(w_key)
else:
lst.append(w_key)
return space.new_array_from_list(lst)
|
9b9b7732294821bcc666fc3ca1bf5301e620cda6
| 89,592
|
def sample2(x):
"""This is sample code.
:param x: number
:return x**2
"""
return x**2
|
c5b22a9fc5ac46a556daa5b2fa478a0c2e1e84d0
| 89,596
|
def pick(o, default=False, *values):
"""
Method picks first existing value from object o
or return default if no key from given values exists in o
:param o:
:param default:
:param values:
:return:
"""
for val in values:
if val in o:
return o[val]
return default
|
d3c2d24a497823e3cea31d9517ccd1a66d299827
| 89,601
|
def distance_to(from_cell, to_cell):
"""
Compute Manhattan distance between two cells in a rectangular maze.
"""
return abs(from_cell[0] - to_cell[0]) + abs(from_cell[1] - to_cell[1])
|
3769d804baa4530035bdfdefe7654bba5e3aa813
| 89,604
|
def to_single_data(input):
"""Convert an input to a single bcbio data/world object.
Handles both single sample cases (CWL) and all sample cases (standard bcbio).
"""
if (isinstance(input, (list, tuple)) and len(input) == 1):
return input[0]
else:
assert isinstance(input, dict), input
return input
|
f028949f478f2a20f754ae059d6f3d5cb48d4ff2
| 89,607
|
def enum(*sequential):
"""Create a simple enumeration."""
return type(str("Enum"), (), dict(zip(sequential, sequential)))
|
c96f15378c6e6cf97d1d781beaba8ff099b6d1bb
| 89,608
|
def set_weight(courier_type):
"""returns maximal weight for courier (based on his courier_type)"""
DATA = {"foot": (2, 10), "bike": (5, 15), "car": (9, 50)}
weight_max = DATA[courier_type][1]
return weight_max
|
7667aea6a01b938ccd93a6938373e9262381f3e4
| 89,609
|
def caesar_cipher(message: str, n: int) -> str:
"""
Encrypts the message using caesar cipher.
@:param message -> The message to be encrypted.
@:n -> The amount of places the message has to be shifted.
@:return -> Encrypted Message.
"""
# Initialise Result string list.
result = []
# Shift each character in the message.
for index in range(len(message)):
char = message[index]
if char.isupper():
result.append(chr((ord(char) + n - 65) % 26 + 65))
elif char.islower():
result.append(chr((ord(char) + n - 97) % 26 + 97))
elif char.isdigit():
result.append(chr((ord(char) + n - 48) % 10 + 48))
else:
result.append(char)
# Return the Cipher Text.
return "".join(result)
|
790003b23a22ae00e40f452bbe9de0f85bfc0a54
| 89,611
|
def get_function_types(subcommand_params):
""" Reads all subcommands and returns a set with all the types of functions used
"""
list_functions = [
subcommand["function_type"] for subcommand in subcommand_params
]
return set(list_functions)
|
c8ad557845dc69842037cdcf5575840ed4f50ec1
| 89,616
|
def _is_nak(msg):
"""Test if a message is a NAK from the modem."""
if hasattr(msg, "ack") and msg.ack.value == 0x15:
return True
return False
|
377eb6eed3fc160c8bba0b3e9c509f9b67e344ce
| 89,623
|
from functools import reduce
def get_nested_value(dictionary, *keys, default=None):
"""Get the value of a key from a nested dictionary.
:param dictionary: The nested dictionary.
:type dictionary: dict
:param default: The default value.
.. tip::
Be careful when supplying a default value. This will cover errors when specifying ``keys``.
"""
_keys = list(keys)
try:
# http://stackoverflow.com/a/14692747/355230
return reduce(lambda d, k: d[k], _keys, dictionary)
except KeyError:
if default is not None:
return default
raise
except TypeError:
if default is not None:
return default
raise KeyError("%s" % _keys[-1])
|
37ec58ecb22cc2d7938ef784923e22442a110cb5
| 89,627
|
import six
def filter_none(**kwargs):
"""Remove any entries from a dictionary where the value is None."""
return dict((k, v) for k, v in six.iteritems(kwargs) if v is not None)
|
93f470546b09f65906c2e2bac6bdd01396226ede
| 89,629
|
def _remove_bom_from_scalar(row, bom):
"""Remove a byte order marker (BOM) from a scalar"""
try:
return row.lstrip(bom)
except AttributeError:
return row
|
c332dd26b09fdac96a9a1edd43733e0e75cde57e
| 89,631
|
from datetime import datetime
import pytz
def get_timestamp(timezone):
"""Returns the current timestamp in ISO 8601 format."""
return datetime.now(pytz.timezone(timezone)).isoformat()
|
f20cbb00a51a687328515dda25be6dcebbe01936
| 89,635
|
def create_node(_id, _label, _type, _shape=''):
"""Creates a node.
Parameters:
_id (str): unique id
_label (str): label shown in graph
_type (str): type of node according to schema_key_dict
_shape (str): shape as visualized in graph
"""
return {
'data': {
'id': _id,
'_label': _label if _label else _id,
'_type': _type,
'_shape': _shape
},
'classes': ''
}
|
40fd3e8457a1107b998383def10c4f79acd10d77
| 89,637
|
import torch
def cluster_utts(utts: torch.Tensor) -> torch.Tensor:
"""
given a 2-d tensor of [S][N], where N is number of
examples, and S is sequence length, and the tensor
is of discrete int64 indices (cf distributions over
tokens), we cluster all identical examples, and return
a cluster assignment as a long tensor, containing the
cluster id of each example, starting from 0
if examples have differnet lengths, padding id should
be identical. this function will compare the entire
length of each example. as long as the padding id is
consistent, this should work as desired, i.e. effectively
ignore padding
"""
S, N = utts.size()
clustering = torch.zeros(N, dtype=torch.int64)
seen = torch.zeros(N, dtype=torch.bool)
cluster_id = 0
for n in range(N):
if seen[n]:
continue
mask = (utts == utts[:, n:n + 1]).all(dim=0)
clustering[mask] = cluster_id
cluster_id += 1
seen[mask] = True
return clustering
|
e4780f7aeae1f2d0b9b15d4e412594ea0703b513
| 89,645
|
def CalculateOverlapInIntervals(range_1, range_2):
"""Measures the amount of overlap between two floating point intervals.
Args:
range_1 (FloatRange): The first floating point range to compare.
range_2 (FloatRange): The second floating point range to compare.
Returns:
(float): The maximum proportion of either region that the other overlaps.
"""
assert range_1.upper >= range_1.lower, (
'Usage: ({}, {}) Lower bound must be lower than upper bound'.format(
range_1.lower, range_1.upper))
assert range_2.upper >= range_2.lower, (
'Usage: ({}, {}) Lower bound must be lower than upper bound'.format(
range_2.lower, range_2.upper))
overlap_width = (
min(range_1.upper, range_2.upper) - max(range_1.lower, range_2.lower))
if overlap_width < 0.0:
# There is no overlap as each interval is out of range of the other.
return 0.0
range_1_width = range_1.upper - range_1.lower
range_2_width = range_2.upper - range_2.lower
if range_1_width == 0.0 or range_2_width == 0.0:
# At least one range is a single point overlapped by the other. Note 0 is
# safe to compare directly using '==' here to prevent division by 0.
return 1.0
return max(overlap_width / range_1_width, overlap_width / range_2_width)
|
6fdb0a4c88cb711ccfaf466f02f3a7bf8da4c667
| 89,647
|
def choose(n,k):
"""Binomial coefficient"""
if n < k:
raise Exception("n cannot be less than k")
if k < 0:
raise Exception("k must be nonnegative")
# Calculate the numerator and denominator seperately in order to avoid loss
# of precision for large numbers.
N = 1
D = 1
for i in range(1,k+1):
N *= (n+1-i)
D *= i
return N//D
|
c86fa4d9b315d60243d6b4c58973d85e0c886c52
| 89,657
|
def test_ndim(request):
"""Test dimension."""
return request.param
|
34bc3db2834b3e4b97ff12716cb9430faab58792
| 89,658
|
import math
def center_crop(size, image):
"""
Perform center crop on input images.
Args:
size (int): size of the cropped height and width.
image (array): the image to perform center crop.
"""
height = image.shape[0]
width = image.shape[1]
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
cropped = image[y_offset : y_offset + size, x_offset : x_offset + size, :]
assert cropped.shape[0] == size, "Image height not cropped properly"
assert cropped.shape[1] == size, "Image width not cropped properly"
return cropped
|
b85c567c4d6cdd7a96ae28e4e36d01b2fd82fc39
| 89,660
|
def _get_query_string(arg):
"""Return the query values given the first argument to a pymemcache command.
If there are multiple query values, they are joined together
space-separated.
"""
keys = ""
if isinstance(arg, dict):
arg = list(arg)
if isinstance(arg, str):
keys = arg
elif isinstance(arg, bytes):
keys = arg.decode()
elif isinstance(arg, list) and len(arg) >= 1:
if isinstance(arg[0], str):
keys = " ".join(arg)
elif isinstance(arg[0], bytes):
keys = b" ".join(arg).decode()
return keys
|
ddf49ba689a9ee5cc3103eebd8f2906e4524b339
| 89,662
|
def review_restaurant_name(review):
"""Return the reviewed restaurant's name (string)."""
return review[0]
|
95111ce65ddbec8ac8c9ced3e885a605af1c44d7
| 89,667
|
def map_cap_to_opnames(instructions):
"""Maps capabilities to instructions enabled by those capabilities
Arguments:
- instructions: a list containing a subset of SPIR-V instructions' grammar
Returns:
- A map with keys representing capabilities and values of lists of
instructions enabled by the corresponding key
"""
cap_to_inst = {}
for inst in instructions:
caps = inst['capabilities'] if 'capabilities' in inst else ['0_core_0']
for cap in caps:
if cap not in cap_to_inst:
cap_to_inst[cap] = []
cap_to_inst[cap].append(inst['opname'])
return cap_to_inst
|
45fea02c7a25a7d0e06f4aa3328fab530940093d
| 89,668
|
def is_transparent(im):
"""
Detect if an image has any transparent components.
Based on https://stackoverflow.com/a/58567453
"""
if im.mode == "P":
transparent = im.info.get("transparency", -1)
for _, index in im.getcolors():
if index == transparent:
return True
elif im.mode == "RGBA":
extrema = im.getextrema()
if extrema[3][0] < 255:
return True
return False
|
f189dc1d4d80727d937f443aa9de60867db6fff5
| 89,671
|
import importlib
def _import_component(component_spec: str, force_component: bool = False):
"""
Import a module or module component from *spec*.
:param component_spec: String of the form "module_name" or "module_name:component_name" where
module_name must an absolute, fully qualified path to a module.
:param force_component: If True, *spec* must specify a component name
:return: the imported module or module component
"""
if ':' in component_spec:
module_name, component_name = component_spec.split(':', maxsplit=1)
else:
module_name, component_name = component_spec, None
if force_component and component_name is None:
raise ValueError('illegal spec, must specify a component')
if module_name == '':
raise ValueError('illegal spec, missing module path')
if component_name == '':
raise ValueError('illegal spec, missing component name')
module = importlib.import_module(module_name)
return getattr(module, component_name) if component_name else module
|
38822bd2d9072063d00f32f29e852c0158500338
| 89,674
|
import re
def market_text_filter(txt):
"""
This filter is designed for text from teh Agora nad Nucleus data sets. It removes the PGP key from text.
:param/return txt: String
"""
pgp_key = re.search(r'BEGIN PGP', txt)
if pgp_key:
i1 = pgp_key.start(0)
txt = txt[0:i1]
return txt
|
86f7b8d454492385861a1734f8c761af71b36edf
| 89,679
|
import torch
def so3_metric(M):
"""
Computes the SO(3) metric for the given matrix
Parameters
----------
M : Tensor
a (3,3,) or (N,3,3,) tensor
Returns
-------
Tensor
the (1,) metric tensor
"""
return torch.mean(torch.norm(torch.matmul(M, torch.transpose(M, -1, -2))-torch.eye(3), dim=(-2, -1), keepdim=True))
|
a79424a2162ab943689559a4f56b8dbe29d01020
| 89,680
|
import math
def Distribute(nEl, size, rank):
"""
Distribute 'nEl' elements among 'size' containers, return the list of
elements in container 'rank'
nEl is the number of elements to distribute
size is total number of process
rank is the ID of this process
returns list of indices for this process to handle
"""
if size < 1:
raise Exception('size cannot be zero')
if size == 1:
return range(nEl)
else:
if nEl < size:
if rank < nEl:
return [rank]
else:
return []
else:
# minimum number of elements given to any process
nProcEl = int(math.floor(nEl/size))
# number of remaining elments to split up
remaining = nEl % size
if rank < remaining:
# one extra element in this bin
nProcEl += 1
startEl = nProcEl*rank
else:
startEl = (nProcEl+1)*remaining + nProcEl*(rank-remaining)
return range(startEl, startEl+nProcEl)
raise Exception('Should not get here')
|
13fc0c76bb4129ba8e16cda81a9fb83f8e577fd5
| 89,681
|
def _minor_release(version):
"""Excludes the patch release from a version number, such that
only major and minor versions face strict matching."""
major_minor_version = version.rsplit('.', 1)[0]
if len(major_minor_version .split('.')) != 2:
raise ValueError("Version doesn't conform to `major.minor.patch` format.")
return major_minor_version
|
f6ac859ac8ac06b4337ef09d88ab834ea3f15a57
| 89,686
|
def is_empty_tile(ds):
"""Check if this tile has no data (sum(mask)==0)."""
if ds['mask'].sum() == 0:
return True
return False
|
cbfa04e661094848daa7e3844a74e2c8d01b8e68
| 89,692
|
def name_to_absolute(x):
"""Convert standard hg38 HLA name into ABSOLUTE naming.
"""
for c in ["-", "*", ":"]:
x = x.replace(c, "_")
x = x.lower()
return x
|
2a56eeb8bfec737fdb1cbeee36de2fb6cb9cce4a
| 89,693
|
def adjust_probabilities(p_list):
"""
Adjust a list of probabilties to ensure that they sum to 1
"""
if p_list is None or len(p_list) == 0:
out = None
else:
total = sum(p_list)
if total == 1:
out = p_list
elif total == 0:
raise ValueError('Probabilities may not sum to zero')
else:
out = [x / total for x in p_list]
return out
|
0c0cc515fa74b83a84405b0cd7f30d1933210ef7
| 89,695
|
def splitChunkAndSignature(chunk):
"""Simple wrapper method to separate the signature from the rest of the chunk.
Arguments:
chunk {bytes} -- Everything inside the message field of an IOTA tx.
Returns:
bytes -- Everything except the trailing 64 bytes.
bytes -- 64 bytes long signature.
"""
signature = chunk[-64:]
data = chunk[0:-64]
return data, signature
|
45a48784ccfcd36d4b0982191532e950464fd82f
| 89,696
|
def get_n_interaction(df, user_col='user_id', weight_dic = None):
"""
Returns a dataframe with:
user_id | item_id | n_interactions
- Input:
df -> pandas dataframe
user_col -> name of the user column
weight_dic -> weight for each type of interaction
"""
if(weight_dic == None):
df = df[[user_col,'reference']]
df = (
df
.groupby([user_col, "reference"])
.size()
.reset_index(name="n_interactions")
)
else:
df = df.replace({'action_type': weight_dic})
df = df[[user_col,'reference', 'action_type']]
df = df.groupby([user_col, "reference"])['action_type'].agg('sum').reset_index(name='n_interactions')
return df
|
5e7a6e683f111de3656f1097f898b1017a764d83
| 89,697
|
def get_new_field_item(field_update):
"""
Get the new key-value for a field_update.
"""
return (field_update[0], field_update[1][1])
|
efb8fc18182ce60ca24e6e8b1595a2125bb171c7
| 89,698
|
def docstring_template(desc, params, returns):
"""
Returns a string at a specific formating for a docstring
Parameters
----------
desc : str
A short description at the top of the docstring
params : dict
A dictionary of parameters and string description
returns : str
A short description of expected object to return
Returns
-------
docstring : str
"""
docstring = f"""
{desc}
Parameters
----------
{params}
Returns
-------
{returns}
"""
return docstring
|
8fb1dedca563db909c0cc272623675031473965d
| 89,702
|
def get_classes(hierarchy):
""" Walks a class hierarchy and returns all of the classes. """
classes = []
for item in hierarchy:
if type(item) is tuple:
classes.append(item[0])
else:
classes.extend(get_classes(item))
return classes
|
4df86479a5a21546252a34f21b9ea4768fa9d140
| 89,704
|
def add(a: int, b: int): # 这个地方的类型是建议的类型
"""
两个数字相加
:param a: 第一个数字
:param b: 第二个数字
:return: 两个数字相加的结果
"""
return a + b
|
4a38756624a41bf5017ff07e87cea604f70f7dac
| 89,707
|
import pickle
def load_raw_data(file_name):
"""Load a previously pickled file into a variable and return it.
Args:
file_name (str): The absolute path to the requested file.
Returns:
object: The contents of the file.
Raises:
IOError
pickle.PickleError
"""
_file = None
saved_data = None
try:
_file = open(file_name, 'rb')
saved_data = pickle.load(_file)
_file.close()
except (IOError, pickle.PickleError):
if _file is not None:
_file.close()
raise
return saved_data
|
a6c4bf2b825b6b3bf7e34e5be2a52b820dae4897
| 89,710
|
def flatten_list_of_lists(list_of_lists):
"""
Flattens a list of lists
:param list_of_lists: List of lists
:return: Flatten list of lists
"""
return [item for sublist in list_of_lists for item in sublist]
|
ed09dde3b72fce8c63aae124bcbcd520543ca69d
| 89,712
|
def validate_predefined_argument(argument_name: str, argument_value: object, argument_options: list) -> bool:
"""
Validate predefined argument is a valid option.
Args:
argument_name (str): The name of the argument to validate.
argument_value (object): The value of the argument to validate.
argument_options (list): Argument predifuend options.
Returns:
bool: True if the argument is valid, otherwise raise an exception.
"""
if not isinstance(argument_value, list):
argument_value = [argument_value]
for value in argument_value:
if value not in argument_options:
raise Exception(f'Invalid {argument_name} argument. Please provide one of the following options:'
f'{str(argument_options)}')
return True
|
1157ceb56e563701edf564c8ba685b0966320955
| 89,719
|
import math
import random
def generate_entities(num_entities=100):
"""generate num_entities random entities for synthetic knowledge graph."""
i = 0
entity_list = []
hex_chars = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
l = int(math.log(num_entities, 18)+1)
# print l
while i < num_entities:
entity = "/entity_{}".format(''.join(random.sample(hex_chars, l)))
if entity not in entity_list:
entity_list.append(entity)
i += 1
return entity_list
|
6ccdf228dbfe6b04b002bdc648ed73d1c759f426
| 89,727
|
def s2n(s):
"""
String to number.
"""
if not len(s):
return 0
return int(s.encode("hex"), 16)
|
a68e7e1b394a45893bd7d75093fb1854cf765afa
| 89,730
|
def number_to_letter(n):
"""Returns a capital letter representing ordinal position. E.g., 1=A, 2=B, etc. Appends letters
once you reach 26 in a way compatible with Excel/Google Sheets column naming conventions. 27=AA, 28=AB...
"""
string = ""
if n is None:
n = 0
while n > 0:
n, remainder = divmod(n - 1, 26)
string = chr(65 + remainder) + string
return string
|
14eafa5877bceec2772c1a067597a643a4e38186
| 89,733
|
def match_span(source_tree, source_lines):
"""
Greps the source text represented by the given source tree from the original code
Parameters
----------
source_tree : tree-sitter node object
Root of the AST which should be used to match the code
source_lines : list[str]
Source code as a list of source lines
Returns
-------
str
the source code that is represented by the given source tree
"""
start_line, start_char = source_tree.start_point
end_line, end_char = source_tree.end_point
assert start_line <= end_line
assert start_line != end_line or start_char <= end_char
source_area = source_lines[start_line:end_line + 1]
if start_line == end_line:
return source_area[0][start_char:end_char]
else:
source_area[0] = source_area[0][start_char:]
source_area[-1] = source_area[-1][:end_char]
return "\n".join(source_area)
|
de337605647fda5d36f40c7080421247796fe0eb
| 89,734
|
def KeepCol(state, command, *columns):
"""Keep columns
Keep only specified columns, remove everything else.
Example: KeepCol~column1~column2
"""
c = state.expand_columns(columns)
state.log_info("Keep columns: "+(", ".join(c)))
df = state.df()
return state.with_df(df.loc[:,c])
|
568bdca7f064835b1ee043ef10b5d348ca06bcda
| 89,738
|
def resize_image(img, base_size):
""" Resize image `img` such that min(width, height)=base_size; keep image
proportions
Parameters:
-----------
img: Image
input image to resize
base_size: integer
minimal dimension of the returned image
"""
old_width, old_height = img.size
if old_width < old_height:
new_size = (base_size, int(base_size * old_height / old_width))
else:
new_size = (int(base_size * old_width / old_height), base_size)
return img.resize(new_size)
|
d0d08c4f322ba6d259f9993b1442f2101943fb92
| 89,741
|
def unique_list(inp_list):
"""
returns a list with unique values of inp_list.
:usage:
>>> inp_list = ['a', 'b', 'c']
>>> unique_inp_list = unique_list(inp_list*2)
"""
return list(set(inp_list))
|
0c4b476021a26c41fca683a919c999325069385d
| 89,743
|
def package_list_from_file(file):
"""List up package name (not containing version and extras) from a package list file"""
mocked_packages = []
with open(file, "r") as fp:
for ln in fp.readlines():
# Example: `tqdm>=4.41.0` => `tqdm`
# `[` is for package with extras
found = [ln.index(ch) for ch in list(",=<>#[") if ch in ln]
pkg = ln[: min(found)] if found else ln
if pkg.rstrip():
mocked_packages.append(pkg.rstrip())
return mocked_packages
|
e66e1406ef9ff68de329b7f0785a65875faeb1b6
| 89,745
|
def get_metafield(doc, metafield):
"""For fields like _parent and _routing es1.x requires you to request them
in 'fields', and they're returned as such. ES2.x always returns them but
at the root document level (like _id). This function copes with both.
"""
return doc.get(metafield, doc.get('fields', {}).get(metafield))
|
e214ea706eb7ba0a744fea0cb95fbd6c1b24cb4e
| 89,746
|
def ParseLogLines(lines):
"""Parse log file lines.
Args:
lines: lines from log file produced by profiled run
Below is an example of a small log file:
5086e000-52e92000 r-xp 00000000 b3:02 51276 libchromeview.so
secs msecs pid:threadid func
START
1314897086 795828 3587:1074648168 0x509e105c
1314897086 795874 3587:1074648168 0x509e0eb4
1314897086 796326 3587:1074648168 0x509e0e3c
1314897086 796552 3587:1074648168 0x509e07bc
END
Returns:
tuple conisiting of 1) an ordered list of the logged calls, as an array of
fields, 2) the virtual start address of the library, used to compute the
offset of the symbol in the library and 3) the virtual end address
"""
call_lines = []
vm_start = 0
vm_end = 0
dash_index = lines[0].find ('-')
space_index = lines[0].find (' ')
vm_start = int (lines[0][:dash_index], 16)
vm_end = int (lines[0][dash_index+1:space_index], 16)
for line in lines[2:]:
line = line.strip()
# print hex (vm_start)
fields = line.split()
call_lines.append (fields)
return (call_lines, vm_start, vm_end)
|
e2c06383a4fea5d4be42f525ce6110fc90df7b34
| 89,747
|
def is_stop(tag):
""" Checks if an HTML tag is a stop
:param tag A BeautifulSoup tag
:return True if the tag is a link to a "displayBusStopDetails" page, False otherwise"""
return tag.has_attr("href") and tag["href"].startswith("/displayBusStopDetails?")
|
7b650c27125aee906469e5176b1d3abbd1389639
| 89,749
|
def URLify(input_string):
"""Replace spaces in input_string with '%20', without using the
replace() method of Python str objects, and must be done in-place.
List slicing in Python is O(k), where k is the slice size, so this solution
could be more optimal.
Parameters
----------
input_string : str
String to process
Returns
-------
str
input_string, with spaces replaces by '%20'
"""
# Convert to char array
chars = list(input_string)
# Get indices of spaces
space_indices = []
for i, char in enumerate(chars):
if char == ' ':
space_indices.append(i)
# Extend char array to correct size
chars.extend([None] * len(space_indices) * 2)
# Replace spaces with '%20'
for i, index in enumerate(space_indices):
adjusted_index = index + (i * 2) # * 2, since adding 2 chars each loop
str_end_index = len(input_string) + (i * 2) - 1
chars[adjusted_index: str_end_index + 2 + 1] = (
list('%20') + chars[adjusted_index + 1: str_end_index + 1]
)
return ''.join(chars)
|
7e41cc8c63acc8c92a9af42733e5b8f960f1264e
| 89,754
|
import math
def square_root(num: int):
"""
Returns the square root of the arg `num`.
"""
result = math.sqrt(num)
return result
|
de2686a3faae5f8d3254de856b8464201b856a22
| 89,757
|
def hasTag(tagName, typeOrPropertyObj):
"""check up if the as parameter given object has a tag with the
given name
Keyword arguments:
tagName -- name of the tag to look for
typeOrPropertyObj -- type or property object to check up
"""
if not hasattr(typeOrPropertyObj, 'tags'):
return False
for tag in typeOrPropertyObj.tags:
if tag.name == tagName:
return True
return False
|
566fd38881b2512ba6644a54e0974b4777dbff58
| 89,764
|
def is_pull_request(issue):
"""Return True if the given issue is a pull request."""
return bool(issue.get('pull_request', {}).get('html_url', None))
|
276f5eeb363e073ed2572c8dcad30a26a1a2019d
| 89,768
|
from functools import reduce
import operator
def get_var(dataset, id_):
"""Given an id, return the corresponding variable from the dataset."""
tokens = id_.split('.')
return reduce(operator.getitem, [dataset] + tokens)
|
fdcb1f90461d05eb354aa8372d55d7e42e0164bf
| 89,772
|
from pkg_resources import WorkingSet
def has_package(package: str) -> bool:
"""Check if the given package is available.
:param package: Package name to search; hyphen-insensitive
:return: Whether the given package name is installed to the current environment.
"""
for pkg in WorkingSet():
if package.replace('-', '_') == pkg.project_name.replace('-', '_'):
return True
return False
|
ec93815a0f4869c5b2f45bfde759ca46137c8eaf
| 89,774
|
import re
def find_os_compression_type(filename):
""" Find if filename is OS compressed
filename: the filename to check
return: compression type (if any) else None. The compression type is
returned as a string and can be any of:
['.Z', '.gz', '.tar.gz', '.zip']
"""
dct = [['.Z', re.compile('.*.Z$')], ['.tar.gz',
re.compile('.*.tar.gz$')],
['.gz', re.compile('.*.gz$')], ['.zip',
re.compile('.*.zip$')]]
for tp in dct:
xtype, rgx = tp
if rgx.match(filename):
return xtype
return None
|
e602560877084c7c4e9424c07baf6d432eb7530f
| 89,775
|
def running_gemini_api(context):
"""Check if gemini pod is running."""
return context.is_gemini_api_running
|
72840fa7487fa58a9415a98bec5e4e777ec05689
| 89,780
|
import torch
def _make_tensor(
data, cols: int, dtype: torch.dtype, device: str = "cpu"
) -> torch.Tensor:
"""
Return a 2D tensor with the specified cols and dtype filled with data,
even when data is empty.
"""
if not len(data):
return torch.zeros((0, cols), dtype=dtype, device=device)
return torch.tensor(data, dtype=dtype, device=device)
|
f7309eb835bad7708230fa504c0f8f1735031ef8
| 89,787
|
def latest_date_before(starting_date, upper_bound, time_step):
"""
Looks for the latest result_date s.t
result_date = starting_date + n * time_step for any integer n
result_date <= upper_bound
:type starting_date: pd.Timestamp
:type upper_bound: pd.Timestamp
:type time_step: pd.Timedelta
:return: pd.Timestamp
"""
result = starting_date
while result > upper_bound:
result -= time_step
while upper_bound - result >= time_step:
result += time_step
return result
|
bf4a67a3dc80c81c6141a28bce73d6f2c113ccaa
| 89,792
|
def find_value_in_context(key, context):
"""
Finds and returns the first value for key-value pair with 'key' in the given
context. If not present, returns None
"""
while key not in context:
if context.has_key('context'):
context = context['context']
else:
return None
return context[key]
|
cd30f0039a03708f1e89ece006c20a3c1278315b
| 89,793
|
import random
def rnd(seq):
"""Generates random index for a sequence"""
return int(
random.random()*len(seq)
)
|
d8bf8682f708aa56d36f6f92bbbf9691d334ee20
| 89,794
|
def generate_tap_stream_id(catalog_name, schema_name, table_name):
"""Generate tap stream id as appears in properties.json"""
return catalog_name + '-' + schema_name + '-' + table_name
|
65b2e024009372d327774f48470f092d31638e60
| 89,795
|
def service_item(service, status, openapi, endpoints):
"""Function that sets the correct structure for service item
If status=='OK' and openapi is empty then:
* it is REST X-Road service that does not have a description;
* endpoints array is empty.
If status=='OK' and openapi is not empty then:
* it is OpenAPI X-Road service with description;
* at least one endpoint must be present in OpenAPI description.
In other cases status must not be 'OK' to indicate problem with
the service.
"""
return {
'serviceCode': service[4],
'status': status,
'openapi': openapi,
'endpoints': endpoints
}
|
c53b2a9b1f2791e0878eaae53af2d8826291b720
| 89,802
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.