content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def im_pares(lista):
""" Separe em listas os impares e pares, dos inteiros da 'lista' """
pares = []
impares = []
for i in lista:
if i%2 == 0:
pares.append(i)
else:
impares.append(i)
return pares, impares
|
4c8171533efa6cb79d36f87b8460644a43a4751c
| 452,770
|
def time_format(timestamp):
""" Format a timestamp as 'Month Day, Year - HourPM' """
return timestamp.strftime("%b %d, %Y - %I%p")
|
84896fb2295c7a957bb8bc09af8280772de2767c
| 193,734
|
def latest_date_before(starting_date, upper_bound, time_step):
"""
Looks for the latest result_date s.t
result_date = starting_date + n * time_step for any integer n
result_date <= upper_bound
:type starting_date: pd.Timestamp
:type upper_bound: pd.Timestamp
:type time_step: pd.Timedelta
:return: pd.Timestamp
"""
result = starting_date
while result > upper_bound:
result -= time_step
while upper_bound - result >= time_step:
result += time_step
return result
|
bf4a67a3dc80c81c6141a28bce73d6f2c113ccaa
| 89,792
|
def insertion_sort(arr):
"""Performs an Insertion Sort on the array arr."""
for i in range(1, len(arr)):
key = arr[i]
j = i-1
while j>= 0 and key < arr[j] :
arr[j+1] = arr[j]
j -= 1
arr[j+1] = key
return arr
|
f94da3a933fd24c81a16d90bd229170a8ac1e8a0
| 443,747
|
import fnmatch
def match_entry_to_list(pattern_list, entry):
"""
A function to match a string to a list of filename patterns. If any of them
match, returns True, otherwise, returns False.
Parameters:
pattern_list - The list of filename patterns to check against
entry - The string to check
Returns:
True if the string matches any of the filename patterns, False otherwise.
"""
for pattern in pattern_list:
if fnmatch.fnmatch(entry, pattern):
return True
return False
|
430e233b2c77bc686f8b357a4cd7b7d946f88208
| 536,754
|
import hashlib
def rfc6962_hash_leaf(leaf):
"""RFC6962 hashing function for hashing leaves of a Merkle tree.
Arguments:
leaf: A bytearray containing the Merkle tree leaf to be hashed.
Returns:
A bytearray containing the RFC6962 SHA256 hash of the leaf.
"""
hasher = hashlib.sha256()
# RFC6962 states a '0' byte should be prepended to the data.
# This is done in conjunction with the '1' byte for non-leaf
# nodes for 2nd preimage attack resistance.
hasher.update(b'\x00')
hasher.update(leaf)
return hasher.digest()
|
19589a6f96def8a9a000c932b38a97515383da5b
| 585,391
|
def TTPN_calc(item1, item2):
"""
Calculate TPR,TNR,PPV,NPV.
:param item1: item1 in fractional expression
:type item1 : int
:param item2: item2 in fractional expression
:type item2: int
:return: result as float
"""
try:
result = item1 / (item1 + item2)
return result
except ZeroDivisionError:
return "None"
|
b68942f41e7d72d6b7617f9e0f8736e876806d83
| 425,406
|
def get_name_parts(au):
"""
Fares Z. Najar => last, first, initials
>>> get_name_parts("Fares Z. Najar")
('Najar', 'Fares', 'F.Z.')
"""
parts = au.split()
first = parts[0]
middle = [x for x in parts if x[-1] == '.']
middle = "".join(middle)
last = [x for x in parts[1:] if x[-1] != '.']
last = " ".join(last)
initials = "{0}.{1}".format(first[0], middle)
if first[-1] == '.': # Some people use full middle name
middle, last = last.split(None, 1)
initials = "{0}.{1}.".format(first[0], middle)
return last, first, initials
|
02a683c67c7a6458f3c085e150fbb452752980a0
| 273,462
|
def timeEstimateDays(periodType, timeToElapse):
"""
Takes in period type and time to elapse
and returns the days equivalent.
"""
if periodType.lower() == "days":
days = timeToElapse
elif periodType.lower() == "weeks":
days = 7 * timeToElapse
else:
days = 30 * timeToElapse
return days
|
422302c696f36e9bba76bde10559f44c08c1cbb3
| 325,730
|
def fwhm(lambda_, d, alpha1=1.3):
"""
The nominal Full Width Half Maximum (FWHM) of a LOFAR Station beam.
:param lambda_: wavelength in meters
:param d: station diameter.
:param alpha1: depends on the tapering intrinsic to the layout of the station,
and any additional tapering which may be used to form the
station beam. No electronic tapering is presently applied to
LOFAR station beamforming. For a uniformly illuminated circular
aperture, alpha1 takes the value of 1.02, and the value increases
with tapering (Napier 1999).
:returns: the nominal Full Width Half Maximum (FWHM)
"""
return alpha1 * lambda_ / d
|
4dfe8718af7cf9912b31fdd55d6b6f4f8a131a51
| 624,951
|
def _move(shape, position, orientation):
"""
Rotate a shape into an orientation, then move it to a position. Return the
new shape.
"""
placed_shape = []
for point in shape:
rotated_point = orientation.rotate(point)
placed_shape.append((rotated_point[0] + position[0], rotated_point[1] + position[1], rotated_point[2] + position[2]))
return placed_shape
|
86bf26aa3a117cb7ef1413a9467e92cfd99bc2b8
| 441,987
|
def calculations_rain_module(basin, rain_intensity, timestep):
"""
Calculates the volume of rain falling on a basin during 1 timestep.
:param basin: Basin in which the rain is falling
:param rain_intensity: amount of rain falling per hour [mm/hour]
:param timestep: amount of seconds per timestep
:return inflow_volume_rain: volume of rain entering the basin in 1 timestep [m^3]
"""
inflow_volume_rain = basin.SurfaceArea * (rain_intensity / 1000) * (timestep/3600)
return inflow_volume_rain
|
ed894972f08024bf3913ec3f933fbb07b95b4fca
| 632,292
|
import re
def snekify(line: str) -> str:
"""Snekify text"""
words = re.findall(r'[A-Za-z0-9]+', line)
return '_'.join(word.lower() for word in words)
|
208fb56eb04f8f6fa5fe872da7e4aafc347fee52
| 500,003
|
def prepare_search(search_term):
"""Return a search term that the api can use to query the db
:param search_term: string
:return: search dict
"""
if search_term:
return {"match": [{"_all": search_term}, ], }
return {}
|
8fb10ade615fc960a5c9dbadb710ea0d93be911c
| 374,060
|
def get_distinguished_name(queue_entry):
"""
Returns the distinguished_name of the queue entry.
"""
sawtooth_entry = queue_entry["data"]
return sawtooth_entry["distinguished_name"][0]
|
07ccab6661c64a41b46a05989b5b8917a4f13a81
| 135,398
|
def format_env(**d):
"""Converts env var values into variable string, ie
'var1="val1" var2="val2" '"""
args_ = [f'{key}="{d[key]}" ' for key in d]
return ''.join(args_)
|
d2861dc90d2d5f8d0f349a271426b3b6d85b424c
| 336,036
|
def get_board_score(board):
"""
Calculates the score of the winning board as per the rules of the problem
:param board: List[List[str]]
:return: int
"""
score = 0
for row in board:
for num in row:
if num is not None:
score += int(num)
return score
|
5a0abd5a8fdda15b8d03719ef6f8e63c9f0fed6e
| 617,475
|
def index_from(sequence, x, start_from=0):
""" Index from a specific start point
:param sequence: a sequence
:type sequence: List[object]
:param x: an object to index
:type x: object
:param start_from: start point
:type start_from: int
:return: indices of the matched objects
:rtype: List[int]
"""
indices = []
for idx in range(start_from, len(sequence)):
if x == sequence[idx]:
indices.append(idx)
return indices
|
8013215fe1ecf5486922d94fc5ae5c1cedc555c5
| 512,326
|
def crop_image_to_square(img):
"""Crops image to the largest square that fits inside img.
Crops from the top left corner.
Args:
img: image of type PIL image, e.g. PIL.JpegImagePlugin.JpegImageFile.
Returns:
Square image of same type as input image.
"""
side_length = min(img.height, img.width)
return img.crop((0, 0, side_length, side_length))
|
d4633c17e826048688aa9157827005e40870ad35
| 219,458
|
def decode_str(string):
"""Convert a bytestring to a string. Is a no-op for strings.
This is necessary because python3 distinguishes between
bytestrings and unicode strings, and uses the former for
operations that read from files or other operations. In general
programmatically we're fine with just using unicode, so we decode
everything. This function also makes backwards-compatibility with
python2, since this will have no effect.
:param string: Either a string or bytes.
:type string: ``str`` or ``bytes``
:return: A unicode string.
:rtype: ``str``
"""
if hasattr(string, "decode"):
return string.decode("utf-8")
else:
return string
|
93550908fffbea174786e70471963ecf185f0ccf
| 332,539
|
import typing
import inspect
def method_names(cls: typing.Type) -> typing.List[str]:
"""Get a list of all method names of a Python class"""
return [t[0] for t in inspect.getmembers(cls, predicate=inspect.isfunction)]
|
6384cba8cc15eab7948af8c076e47b80ddf9cb5f
| 574,972
|
import base64
import six
def UrlSafeB64Decode(message):
"""wrapper of base64.urlsafe_b64decode.
Helper method to avoid calling six multiple times for preparing b64 strings.
Args:
message: string or binary to decode
Returns:
decoded data in string format.
"""
data = base64.urlsafe_b64decode(six.ensure_binary(message))
return six.ensure_str(data)
|
f675c56f0bbd35661adfbea85135a9434fd7b107
| 703,925
|
def calculate(cart):
"""Return the total shipping cost for the cart. """
total = 0
for line in cart.get_lines():
total += line.item.shipping_cost * line.quantity
return total
|
4b6d9bd94ce3a5748f0d94ab4b23dab993b430e4
| 666
|
def _format_strings(the_string='', prefix='', suffix=''):
"""Small convenience function, allows easier logic in .format() calls"""
if the_string:
return '{0}{1}{2}'.format(prefix, the_string, suffix)
else:
return ''
|
ef3ac0f8d0972d8d562a763f6255b197951acc26
| 653,387
|
import random
def random_disjoint_interval(start, end, avoid_start, avoid_end):
"""
Sample a value in [start, avoid_start] U [avoid_end, end] with uniform probability
"""
val = random.uniform(start, end - (avoid_end - avoid_start))
if val > avoid_start:
val += (avoid_end - avoid_start)
return val
|
895b2c9e6d757cb0d0cce69e32650d62233d044c
| 78,832
|
def get_initiator_from_call_stack(call_stack):
"""Return the bottom element of the call stack."""
if call_stack and type(call_stack) == str:
return call_stack.strip().split("\n")[-1]
else:
return ""
|
a65b42eae20401f6b1cfafb0c1c3b1ad724e0e3b
| 226,417
|
def os_folder_is_path(path_or_stream):
"""
Is given object `path_or_stream` a file path?
:param path_or_stream: file path or stream, file/file-like object
:return: True if `path_or_stream` is a file path
"""
return isinstance(path_or_stream, str)
|
23e374f3ce864c29b19ae279e9c1abaf98652506
| 500,126
|
from typing import Dict
def contents_append_notable_session_details(session) -> Dict:
"""Appends a dictionary of data to the base list
Args:
session: session object
Returns:
A contents list with the relevant notable session details
"""
content = {
'SessionID': session.get('sessionId'),
'InitialRiskScore': session.get('initialRiskScore'),
'LoginHost': session.get('loginHost'),
'Accounts': session.get('accounts'),
}
return content
|
4ab85c0b1331747de668802fc64e8e5a584eecc3
| 553,932
|
def make_withdraw(balance, password):
"""Return a password-protected withdraw function.
>>> w = make_withdraw(100, 'hax0r')
>>> w(25, 'hax0r')
75
>>> error = w(90, 'hax0r')
>>> error
'Insufficient funds'
>>> error = w(25, 'hwat')
>>> error
'Incorrect password'
>>> new_bal = w(25, 'hax0r')
>>> new_bal
50
>>> w(75, 'a')
'Incorrect password'
>>> w(10, 'hax0r')
40
>>> w(20, 'n00b')
'Incorrect password'
>>> w(10, 'hax0r')
"Your account is locked. Attempts: ['hwat', 'a', 'n00b']"
>>> w(10, 'l33t')
"Your account is locked. Attempts: ['hwat', 'a', 'n00b']"
>>> type(w(10, 'l33t')) == str
True
"""
attempts_lst = []
password_lst = [password]
def withdraw(amount, input_word):
nonlocal balance, attempts_lst
if len(attempts_lst)==3:
return "Your account is locked. Attempts: " + str(attempts_lst)
if input_word not in password_lst:
attempts_lst.append(input_word)
return 'Incorrect password'
if amount > balance:
return 'Insufficient funds'
balance = balance - amount
return balance
return withdraw
|
28b2114bbc1de49bb5ee94ae9a296827403e7fba
| 392,123
|
from typing import Any
def is_empty(data: Any) -> bool:
"""Checks if argument is empty.
Args:
data (Any): To check if empty
Returns:
bool: Returns bool indicating if empty
"""
if data is None or data == '' or data == 'null':
return True
return False
|
015c109dbfa2bc72d21213d526db6a553b27aa19
| 81,269
|
def flatten_tree(tree: dict, full: bool = False) -> dict:
"""
Flatten an execution tree to make it easier to read.
Task trees are often a single node nested several levels deep. These trees may be collapsed
into a list. The execution order is the same, but it's easier for a human to read.
Before:
- foo
- bar
- xoo
After:
- xoo
- bar
- foo
Before:
- foo
- xar
- bar
- xoo
After:
- foo
- xar
- xoo
- bar
:param tree: Tree to flatten
:param full: Flatten tree into single list
:return: flattened task list
"""
def flatten_node(node: dict) -> list:
"""
Flatten a single node. Always return a list for consistency, even when returning a single
node.
:param node:
:param parent: parent task list to collapse into
:return: flattened node
"""
node = node.copy()
num_dependencies = len(node["dependencies"])
if num_dependencies == 0:
# no dependencies: nothing to flatten, return as-is
return [node]
elif full or num_dependencies == 1:
# flatten dependencies: flatten into single list that includes parent & child
flattened = []
for dependency in node["dependencies"]:
flattened_child = flatten_node(dependency)
flattened.extend(flattened_child)
# clear dependencies, since they are now siblings
# this node is added last since it runs after dependencies
node["dependencies"] = []
flattened.append(node)
return flattened
else:
# multiple dependencies: do not flatten into parent.
#
# Any dependencies that are flattened need to be merged with other dependencies.
# Dependency nodes should either be a single node, or a list of nodes
dependencies = []
for dependency in node["dependencies"]:
flattened = flatten_node(dependency)
dependencies.extend(flattened)
node["dependencies"] = dependencies
return [node]
root = flatten_node(tree)
if len(root) > 1:
# if root's dependencies were flattened into it, then the returned list will have all of
# those dependencies. Create a new root node to contain them all. This keeps the structure
# consistent-ish for downstream consumers. They still have to special case this node, but
# it should be a little simpler since all nodes are of a similar shape
return {"name": None, "dependencies": root}
else:
# a single node, unpack it and return as root.
return root[0]
|
77b133b80d70256643e22d1b778c1bdecb00badf
| 45,232
|
def get_scalebin(x, rmin=0, rmax=100, tmin=0, tmax=100, step=10):
"""
Scale variable x from rdomain to tdomain with step sizes
return key, index
rmin denote the minimum of the range of your measurement
tmax denote the maximum of the range of your measurement
tmin denote the minimum of the range of your desired target scaling
tmax denote the maximum of the range of your desired target scaling
"""
newx = (x - rmin) / (rmax - rmin) * (tmax - tmin) + tmin
pos = 0
for pos, i in enumerate(range(tmin, tmax, step)):
if newx < i + step:
return "[%d,%d)" % (i, i+step), pos
return ">=%d" % (tmax), pos + 1
|
75ad962a32f26515dc2ee1e9038947c62e7bbb27
| 477,525
|
def is_zip_path(img_or_path):
"""judge if this is a zip path"""
return ".zip@" in img_or_path
|
dd48f05b4d4b75124486ceb3527e5cd0c5379757
| 513,821
|
import json
def json_encode(data):
"""Encodes given data in a JSON string."""
return json.dumps(data)
|
2c7b66448307f0ab3e516445148906f1fdd62057
| 530,283
|
def generate_percentile_metric(dataframe, fields_to_score, method="max", na_fill=.5, invert=False, pct=True):
"""When passed a dataframe and fields to score, this function will return a percentile score (pct rank) based on the
settings passed to the function including how to fill in na values or whether to invert the metric.
:param dataframe: dataframe that will be returned with new scored fields
:param fields_to_score: list of columns to score
:param method: {‘average’, ‘min’, ‘max’, ‘first’, ‘dense’}
average: average rank of group
min: lowest rank in group
max: highest rank in group
first: ranks assigned in order they appear in the array
dense: like ‘min’, but rank always increases by 1 between groups
:na_fill: float
Will fill kept null values with the chosen value. Defaults to .5
:invert : boolean
Will make lower values be scored
pct: boolean, default True
Computes percentage rank of data"""
for field in fields_to_score:
try:
new_score = "{0}_Score".format(field)
if not invert:
dataframe[new_score] = dataframe[field].rank(method=method, pct=pct).fillna(value=na_fill)
else:
dataframe[new_score] = dataframe[field].rank(method=method, pct=pct, ascending=False).fillna(
value=na_fill)
except:
print("WARNING:Could not score column {0}. Check input dataframe.".format(field))
return dataframe
|
8210ee6516d6fc10f7f2065b57c5d1c769ef6b80
| 277,527
|
def soft_get(data: dict, set_name: str, tp: type):
"""
Get setting value from a dict, or set it by default,
Use when setting *not* required.
:param data: dict with data
:param set_name: setting name
:param tp: value type
:return: setting value
"""
def default(val):
defaults = dict(
api_sleep=0.07,
end_datetime='now',
start_datetime='-2M',
max_post_length=1024,
image_generation=True,
images_chance=0.333,
)
print(f"{val.upper()} value is being set from defaults!\n"
"There is no such value in settings.yml, or it's incorrect")
return defaults[val]
try:
value = data[set_name]
if type(value) != tp:
value = default(set_name)
except KeyError:
value = default(set_name)
return value
|
3a5db68bde7aa2614dbc79306c2772ed2c2beeb4
| 152,686
|
def get_xcache_command(catchall, workdir, jobid, label, xcache_function):
"""
Return the proper xcache command for either activation or deactivation.
Command FORMAT: {'command': <command>, 'args': <args>, 'label': <some name>}
:param catchall: queuedata catchall field (string).
:param workdir: job working directory (string).
:param jobid: PanDA job id (string).
:param label: label (string).
:param xcache_function: activation/deactivation function name (function).
:return: command (dictionary).
"""
com = {}
if 'pilotXcache' in catchall:
com = xcache_function(jobid=jobid, workdir=workdir)
com['label'] = label
com['ignore_failure'] = True
return com
|
be825c021b1bf53dffcc05a04f56140e8796d62f
| 562,089
|
def merge_two_dicts(starting_dict: dict, updater_dict: dict) -> dict:
"""
Starts from base starting dict and then adds the remaining key values from updater replacing the values from
the first starting dict with the second updater dict.
Thus, the update_dict has precedence as it updates and replaces the values from the first if there is a collision.
ref: https://stackoverflow.com/questions/38987/how-do-i-merge-two-dictionaries-in-a-single-expression-taking-union-of-dictiona
For later: how does d = {**d1, **d2} replace collision?
:param starting_dict:
:param updater_dict:
:return:
"""
new_dict: dict = starting_dict.copy() # start with keys and values of starting_dict
new_dict.update(updater_dict) # modifies starting_dict with keys and values of updater_dict
return new_dict
|
b9df7f3bf7785c67198ec0f282e89d51f25d153a
| 629,140
|
def adjust_axis(ax, axis_width, tickwidth, major_ticklength,
minor_ticklength):
"""
Adjusts the tick properties of the axis. Sets the tick lengths, widths and
also ensures that the ticks are pointing inwards.
Parameters
----------
ax : ``matplotlib`` axis.
The axis we're adjusting.
axis_width, tickwidth : Floats.
The desired width (or thickness) of the axis and ticks.
major_ticklength, minor_ticklength : Floats.
The desired length of the major and minor ticks.
Returns
---------
ax : ``matplotlib`` axis.
The axis with the properties adjusted.
"""
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(axis_width)
ax.tick_params(which = 'both', direction='in',
width = tickwidth)
ax.tick_params(which = 'major',
length = major_ticklength)
ax.tick_params(which = 'minor',
length = minor_ticklength)
return ax
|
9e9da67534339efe7cf95aef3976b8c0b76569b8
| 519,512
|
def find_tree(gi, found_lib, folders):
"""
Look for a directory structure in the given library.
Returns the id of the last folder of the tree if it was completely found
"""
dist_folders = gi.libraries.get_folders(found_lib)
dist_f = {}
for f in dist_folders:
dist_f[f['name']] = f
path = ""
last_f_id = None
for f in folders:
path += "/" + f
if path in dist_f:
print("Found folder " + f)
last_f_id = dist_f[path]['id']
else:
raise Exception("Did not find folder '" + f + "'")
return last_f_id
|
5c1f2719ebfe6488dd7da0b5864b1851cb53d99d
| 113,617
|
def repair_ids(shared_state, original_id_to_label, label_to_id):
"""Given an optimizer state and some new ids for the parameters,
creates a new state with the new ids linked to the same tensors.
Useful when using an optimizer state used on some old model on a
new one.
"""
new_state = {}
for param_id, state in shared_state.items():
new_param_id = label_to_id[original_id_to_label[param_id]]
new_state[new_param_id] = state
return new_state
|
871c488842bac61784f6f4d7653e3455589dcf91
| 148,661
|
def calculate_beta(
r0: float,
params: dict
) -> float:
"""
Calculate beta from R0 and other parameters
:param float r0: basic reproduction number
:param dict params: dictionary of parameters
:return float: calculated beta
"""
return r0 * params["gamma"]
|
a0e05b0ff708be05f74878f8d5c80919b1a76410
| 407,879
|
def times(values):
"""
Reads the stdout logs, calculates the various cpu times and creates a dictionary
of idle time and the total time
Parameters
----------
values : list
output of the command from the std out logs
Returns
-------
tuple
idle and total time of the cpu
"""
user, nice, system, idle, io, irq, soft, steal, _, _ = values
idle = idle + io
non_idle = user + nice + system + irq + soft + steal
total = idle + non_idle
return total, idle
|
74b7675a5854c757f3f3f2ddf53474b664e3d74b
| 32,614
|
def collect_key_values(key, data):
"""
Builds a list of values for all keys matching the given "key" in a nested
dictionary.
Args:
key (object): Dictionary key to search for
data (dict): Nested data dict
Returns:
list: List of values for given key
"""
values = []
for k, v in data.items():
if k == key:
values.append(v)
continue
if isinstance(v, dict):
values += collect_key_values(key, v)
return values
|
06e79c3c3b75db37533ff68366b851fce4a100a0
| 551,754
|
def with_lookup(old_fn, lookup):
"""
Wrap a similarity function, looking up a pred for each given lemma,
returning 0 if the lemma was not found
:param old_fn: function taking two indices and returning a similarity score
:param lookup: mapping from lemmas to sets of indices
:return: similarity function
"""
def sim_fn(a, b, *args, **kwargs):
"Calculate similarity"
try:
return old_fn(lookup[a], lookup[b], *args, **kwargs)
except KeyError:
# Unknown lemmas
return 0
return sim_fn
|
73348869c09eec8e00a843ceaaa4bca452aa34ac
| 177,402
|
import secrets
def generate_password(character_set, length):
"""
Uses the secretes module to pick a given number of characters
from the available character set. https://docs.python.org/3/library/secrets.html
"""
# "complexity is the worst enemy of security" - Bruce Schneier.
return ''.join(secrets.choice(character_set) for _character in range(length))
|
35056b5ffb44577abe077e1d5c0a9ec0b010c1b2
| 326,409
|
def readList_fromFile(fileGiven):
"""Reads list from the input file provided. One item per row."""
# open file and read content into list
lineList = [line.rstrip('\n') for line in open(fileGiven)]
return (lineList)
|
e89003bf74abcd805aa4d3718caec32a7aae4df6
| 654,319
|
def sieve(iterable, inspector, *keys):
"""Separates @iterable into multiple lists, with @inspector(item) -> k for k in @keys defining the separation.
e.g., sieve(range(10), lambda x: x % 2, 0, 1) -> [[evens], [odds]]
"""
s = {k: [] for k in keys}
for item in iterable:
k = inspector(item)
if k not in s:
raise KeyError(f"Unexpected key <{k}> found by inspector in sieve.")
s[inspector(item)].append(item)
return [s[k] for k in keys]
|
6ebb76dfb3131342e08a0be4127fba242d126130
| 3,244
|
import torch
import random
def gen_input_mask(
shape, hole_size, hole_area=None, max_holes=1):
"""
* inputs:
- shape (sequence, required):
Shape of a mask tensor to be generated.
A sequence of length 4 (N, C, H, W) is assumed.
- hole_size (sequence or int, required):
Size of holes created in a mask.
If a sequence of length 4 is provided,
holes of size (W, H) = (
hole_size[0][0] <= hole_size[0][1],
hole_size[1][0] <= hole_size[1][1],
) are generated.
All the pixel values within holes are filled with 1.0.
- hole_area (sequence, optional):
This argument constraints the area where holes are generated.
hole_area[0] is the left corner (X, Y) of the area,
while hole_area[1] is its width and height (W, H).
This area is used as the input region of Local discriminator.
The default value is None.
- max_holes (int, optional):
This argument specifies how many holes are generated.
The number of holes is randomly chosen from [1, max_holes].
The default value is 1.
* returns:
A mask tensor of shape [N, C, H, W] with holes.
All the pixel values within holes are filled with 1.0,
while the other pixel values are zeros.
"""
mask = torch.zeros(shape)
bsize, _, mask_h, mask_w = mask.shape
for i in range(bsize):
n_holes = random.choice(list(range(1, max_holes+1)))
for _ in range(n_holes):
# choose patch width
if isinstance(hole_size[0], tuple) and len(hole_size[0]) == 2:
hole_w = random.randint(hole_size[0][0], hole_size[0][1])
else:
hole_w = hole_size[0]
# choose patch height
if isinstance(hole_size[1], tuple) and len(hole_size[1]) == 2:
hole_h = random.randint(hole_size[1][0], hole_size[1][1])
else:
hole_h = hole_size[1]
# choose offset upper-left coordinate
if hole_area:
harea_xmin, harea_ymin = hole_area[0]
harea_w, harea_h = hole_area[1]
offset_x = random.randint(harea_xmin, harea_xmin + harea_w - hole_w)
offset_y = random.randint(harea_ymin, harea_ymin + harea_h - hole_h)
else:
offset_x = random.randint(0, mask_w - hole_w)
offset_y = random.randint(0, mask_h - hole_h)
mask[i, :, offset_y: offset_y + hole_h, offset_x: offset_x + hole_w] = 1.0
return mask
|
799e5cd0590a58012888707b2997b69992a45f83
| 635,119
|
def ParseAndObscureAddress(email):
"""Break the given email into username and domain, and obscure.
Args:
email: string email address to process
Returns:
A 3-tuple (username, domain, obscured_username).
The obscured_username is trucated the same way that Google Groups does it.
"""
if '@' in email:
username, user_domain = email.split('@', 1)
else: # don't fail if User table has unexpected email address format.
username, user_domain = email, ''
base_username = username.split('+')[0]
cutoff_point = min(8, max(1, len(base_username) - 3))
obscured_username = base_username[:cutoff_point]
return username, user_domain, obscured_username
|
cbe5178b5b4508b629f6a37fd80e0ad8eb0bc78d
| 359,181
|
def createOverwriteItem(gc, parentFolderId, name):
"""
Creates an item, overwriting it if it already existed.
:param gc: Instance of GirderClient with the correct permissions.
:param parentFolderId: The parent folder of the item.
:param name: The name of the item to create.
:returns: The newly created item.
:rtype: dict
"""
toDelete = gc.listItem(parentFolderId, name=name)
for item in toDelete:
gc.delete('item/%s' % item['_id'])
return gc.createItem(parentFolderId, name)
|
917c7e544c9a4a1b859d2e76d760c706faae85b9
| 306,538
|
def multi_timescale_model(request) -> str:
"""Fixture that provides multi-timescale models.
Returns
-------
str
Name of the multi-timescale model.
"""
return request.param
|
7c538e397bca6a18194d088614db9d9be9323eae
| 392,163
|
def microbe_mortality_prob(basal_death_prob,death_rate,Tax_tolerance,Psi_fc,Psi):
"""
Derive taxon-specific mortality probability.
This function as a function of water potential and drought tolerance.
Paramters:
basal_death_prob: 1D array; taxon-specific basal mortality prob
death_rate: 1D array; taxon-specific mortality change rate
Tax_tolerance: series; taxon-specific drought tolerance
Psi_fc: scalar; water potential at field capacity
Psi: scalar; daily water potential
Returns:
mortality_rate: 1D array; taxon-specific mortality probability
Reference:
Allison and Goulden, 2017, Soil Biology and Biochemistry
"""
if Psi >= Psi_fc:
mortality_rate = basal_death_prob
else:
tolerance = Tax_tolerance.to_numpy(copy=False)
# option 1
mortality_rate = basal_death_prob * (1 - (1-tolerance)*(Psi-Psi_fc)*death_rate)
# option 2
#mortality_rate = basal_death_prob * (1 - (1/np.exp(tolerance))*(Psi-Psi_fc)*death_rate)
# option 3
#mortality_rate = basal_death_prob * (1/np.exp(tolerance)) * (1 - death_rate*(Psi-Psi_fc))
return mortality_rate.astype('float32')
|
63bc1ec1be34050f32baa83c6512919cc4e70ed1
| 245,271
|
def Base_setTextValue(self, param):
"""
- name: input username
setTextValue:
text: |
multi line text1
multi line text2
id: elementid1
"""
txt = self.getvalue(param)
if txt is None:
raise Exception("text not set: param=%s" % (param))
elem = self.findmany2one(param)
self.driver.execute_script("arguments[0].value = arguments[1];", elem, txt)
return self.return_element(param, elem)
|
3030ed2241364002246f0c8c1fadf3c244658eff
| 41,048
|
import pkg_resources
def get_required(dist):
"""Return a set with all distributions that are required of dist
This also includes subdependencies and the given distribution.
:param dist: the distribution to query. Can also be the name of the distribution
:type dist: :class:`pkg_resources.Distribution` | str
:returns: a list of distributions that are required including the given one
:rtype: set of :class:`pkg_resources.Distribution`
:raises: class:`pkg_resources.DistributionNotFound`
"""
d = pkg_resources.get_distribution(dist)
reqs = set(d.requires())
allds = set([d])
while reqs:
newreqs = set([])
for r in reqs:
dr = pkg_resources.get_distribution(r)
allds.add(dr)
newreqs = newreqs & set(dr.requires())
reqs = newreqs - reqs
return allds
|
aeb18cb99af0bfc498d87b63dbdc131dad5bcafc
| 647,975
|
def view_logout(self, request):
""" Handles the logout requests. """
return self.logout_to(request)
|
c28b02a93f8a2ccac40b9fca563293ab3b068f25
| 531,618
|
def escape_dn_for_filter(anything: str) -> str:
"""Escape an LDAP distinguished name so that it can be used in filters without confusing the server.
Distinguished names already have some special characters escaped or encoded, so we must use this
function instead of the generic escape function, which would escape the existing escape sequences.
In a filter, you use the format field=value.
But distinguished names are in the form CN=x,OU=y,DC=z so those equal signs need to be escaped.
But then the values x, y, and z can also have equal signs in them, and those will ALREADY be escaped
differently from the ones following CN, OU, etc.
That's why DNs need a different escaping in filters than everything else.
"""
if isinstance(anything, int) or isinstance(anything, float):
return anything
if anything.isalnum():
return anything
def escape_char(char):
""" Escape a single character."""
if char in "()*":
return "\\%02x" % ord(char)
else:
return char
return "".join(escape_char(x) for x in anything)
|
5f7932ac8935f31426ea60edb73a3eb0502d6f96
| 575,509
|
def hamming_distance(string1, string2):
"""
string1 and string2 must be in bytes string.
Return number of '1' in xor(string1, string2), it tell's how many bit
are differents.
"""
if len(string1) != len(string2):
raise ValueError("Undefined for sequences of unequal length")
return sum([bin(b1 ^ b2).count('1') for b1, b2 in zip(string1, string2)])
|
8d97cd5a9f9674a21a8e5f07d12a6f9579fb2819
| 546,190
|
import struct
def mac2str(mac):
"""Converts mac address to string .
Args:
mac: 6 bytes mac address
Returns:
readable string
"""
return '%02x:%02x:%02x:%02x:%02x:%02x'%tuple(int(x) for x in struct.unpack('BBBBBB', mac))
|
01a51ef8a787d9a4e2ee0262698ef481bcadb6ff
| 66,114
|
def pop_script(kwargs,script):
"""Pop script from list of scripts, list of threadsafe tasks, and list of containers.
Arguments:
----------
kwargs : : dict
Keyword arguments extracted from [slurm] section of config file, to be passed into write_jobs() function.
script : str
Name of script.
Returns:
--------
popped : bool
Was the script popped?"""
popped = False
if script in kwargs['scripts']:
index = kwargs['scripts'].index(script)
kwargs['scripts'].pop(index)
kwargs['threadsafe'].pop(index)
kwargs['containers'].pop(index)
popped = True
return popped
|
431c88cd3f3bb87d14a21e582e8507d28b9a3e42
| 268,635
|
def unicode_to_c_ustring(string):
"""Converts a Python unicode string to a C++ u16-string literal.
>>> unicode_to_c_ustring(u'b\u00fccher.de')
'u"b\\\\u00fccher.de"'
"""
result = ['u"']
for c in string:
if (ord(c) > 0xffff):
escaped = '\\U%08x' % ord(c)
elif (ord(c) > 0x7f):
escaped = '\\u%04x' % ord(c)
else:
escaped = c
result.append(escaped)
result.append('"')
return ''.join(result)
|
2498487aa424ebf7da1825284be054d2699a5e6e
| 667,527
|
def stringformat(value, arg):
"""
Formats the variable according to the argument, a string formatting specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See http://docs.python.org/lib/typesseq-strings.html for documentation
of Python string formatting
"""
try:
return ("%" + str(arg)) % value
except (ValueError, TypeError):
return ""
|
056a4b24e582d2e7576f8344c80592f7f36147d0
| 477,652
|
def calcPurchasePrice(initialSharePrice, allotment):
"""Return Initial Share Price * Allotment."""
return initialSharePrice * allotment
|
dd13f55f3599cb586135e476c6cebcb5fca64c5c
| 260,986
|
def smoothstep(a, b, x):
""" Returns a smooth transition between 0.0 and 1.0 using Hermite interpolation (cubic spline),
where x is a number between a and b. The return value will ease (slow down) as x nears a or b.
For x smaller than a, returns 0.0. For x bigger than b, returns 1.0.
"""
if x < a: return 0.0
if x >=b: return 1.0
x = float(x-a) / (b-a)
return x*x * (3-2*x)
|
c439470ec953def96cdd588a0d6ffc35e15fc9a3
| 488,638
|
def _ensure_tuples(list):
""" Ensures that an iterable is a list of position tuples. """
return [tuple(item) for item in list]
|
0cc1ccd95c2b561c705ece6a15723d6c56e136f2
| 491,633
|
def get_files() -> list:
"""Gets a list of paths of files the user wants to compare"""
files = []
while True:
file = input("Enter the path of a file to compare (or enter to quit): ")
if file == "":
break
files.append(file)
return files
|
d47a95448f6b69acbc517e48f2862371e3f09208
| 631,543
|
def camel_to_snake(s: str) -> str:
"""Convert from CamelCase to snake_case"""
return "".join(["_" + i.lower() if i.isupper() else i for i in s]).lstrip("_")
|
e91cba6263cc1d868bc09137dda823462cfc484b
| 254,401
|
def interval_not_intersect_intervals(intervals, interval):
"""
Determines if one intervals does not overlap an iterable of intervals
:param intervals: iterable of ChromosomeIntervals
:param interval: one ChromosomeInterval
:return: boolean
"""
intersections = []
for target_interval in intervals:
intersections.append(interval.intersection(target_interval))
return intersections.count(None) == len(intersections)
|
f4995f6129b301abd5ba63725454cefdc2574a40
| 485,177
|
def _attr_rem(attr):
"""Create a 'remove' dictionary for update_workspace_attributes()"""
return {
"op" : "RemoveAttribute",
"attributeName" : attr
}
|
69c013d88fcffbe429b4203687717f79c41ee061
| 458,636
|
import re
def format_footprint(footprint: str) -> str:
"""Formats the supplied footprint so that it conforms to the Sentinel format
i.e. using Intersects and quotations
Args:
footprint::str
AOI Footprint which may be a Coordinate Pair, WKT Polygon bounding box or
Intersects fn call
Returns:
val::str
Formatted footprint compatible with Sentinel Hub API
"""
intersects_pattern = re.compile(r"^\"?Intersects\((.+)\)\"?$")
intersects_match = intersects_pattern.match(footprint)
return (
f'"Intersects({footprint})"'
if intersects_match is None
else f'"Intersects({intersects_match.group(1)})"'
)
|
9be11245c1dc87f389704b9048071cba0b97391d
| 230,247
|
import math
def csformula(nrows):
"""Return the fitted chunksize (a float value) for nrows."""
# This formula has been computed using two points:
# 2**12 = m * 2**(n + log10(10**6))
# 2**15 = m * 2**(n + log10(10**9))
# where 2**12 and 2**15 are reasonable values for chunksizes for indexes
# with 10**6 and 10**9 elements respectively.
# Yes, return a floating point number!
return 64 * 2**math.log10(nrows)
|
0b7633fd7245433479f3af55a1b2962b93148b95
| 182,502
|
def organiser_email(meta):
"""Get email from organiser text."""
v = meta.get('organiser') or ''
if type(v) == dict:
return v.get('email', '')
return ''
|
9e1a6e448bc471811860f71506ca0593c649e9d1
| 337,842
|
def maybe_call(obj, *args, **kwargs):
"""
Calls obj with args and kwargs and return its result if obj is callable, otherwise returns obj.
"""
if callable(obj):
return obj(*args, **kwargs)
return obj
|
87945fc1059ae028f83d2de91bf9b87085aa2857
| 259,058
|
import functools
import time
def _rate_limit(func=None, per_second=1):
"""Limit number of requests made per second.
Will sleep for 1/``per_second`` seconds if the last request was
made too recently.
"""
if not func:
return functools.partial(_rate_limit, per_second=per_second)
@functools.wraps(func)
def wrapper(self, url, *args, **kwargs):
if self.last_request is not None:
now = time.time()
delta = now - self.last_request
if delta < (1 / per_second):
time.sleep(1 - delta)
self.last_request = time.time()
return func(self, url, *args, **kwargs)
return wrapper
|
019963de08ec2cc5b1a44f1554fbe6b5cde5be6f
| 83,499
|
def _trip_protection_to_integer(trip_protection: bool) -> int:
"""Convert from True/False/None to 1/0/-1"""
if trip_protection is None:
return 0
return 1 if trip_protection else -1
|
dfa07630b8e68535abcd4836aa169612eea3deb4
| 272,092
|
def format_album(index, data):
"""Returns a formatted line of text describing the album."""
return "{}. {artist} - {album} - {year} - {rating}\n".format(index, **data)
|
c9b52ee10343d34be4dc81c160391e88a1dd6b16
| 528,765
|
def create_dict_keyed_by_field_from_items(items, keyfield):
""" given a field and iterable of items with that field
return a dict keyed by that field with item as values
"""
return {i.get(keyfield): i for i in items if i and keyfield in i}
|
a7478c5dc04d7e23801eecb5aaf85b7530d6bf79
| 682,062
|
import random
def init_population(pop_number, gene_pool, state_length):
"""Initializes population for genetic algorithm
pop_number : Number of individuals in population
gene_pool : List of possible values for individuals
state_length: The length of each individual"""
g = len(gene_pool)
population = []
for i in range(pop_number):
new_individual = [gene_pool[random.randrange(0, g)] for j in range(state_length)]
population.append(new_individual)
return population
|
c538ed82d6d5d65b65dc409da761f175b964121c
| 142,149
|
def count_args(func_el):
"""
Have to handle positional-only, keyword-only, and standard arguments. Not
counting any unpacked args (vararg and kwarg).
"""
posonlyargs = func_el.xpath('args/arguments/posonlyargs/arg') ## python 3.8+
args = func_el.xpath('args/arguments/args/arg')
kwonlyargs = func_el.xpath('args/arguments/kwonlyargs/arg')
all_args_n = len(posonlyargs + args + kwonlyargs)
return all_args_n
|
36c1029e05a4f9a8d9e4a4fa8ebb307116436091
| 242,169
|
def check_coverage_threshold(pol, min_cov):
"""Check if coverage for each position is above threshold.
Returns percentage of bases that pass the coverage threshold"""
column = str(min_cov) + "X_Coverage"
pol.loc[pol["total_bases"] >= min_cov, column] = "Yes"
pol.loc[pol["total_bases"] < min_cov, column] = "No"
if "Yes" in pol[column].values:
pct_coverage = pol[column].value_counts()["Yes"] / len(pol) * 100
else:
pct_coverage = 0
return pct_coverage
|
d060206aa2df945c7927e903c5a96478b3bb39d9
| 577,348
|
import random
def rand_point(rows, cols):
""" random (r, c) point """
r = random.randint(0, rows)
c = random.randint(0, cols)
return r, c
|
65bcc7d4453d34e4bbe230c71d513621b5960c83
| 462,743
|
from typing import List
import csv
import io
def csv_to_list(text: str) -> List[List]:
"""
Converts CSV formatted output from Windows utilities into a list.
:param text: CSV formatted text output from a Windows utility (e.g. "/FO CSV" )
:return: A List of lists like so [[r1c1, r1c2, r1c3, ...], [r2c1, r2c2, r2c3, ...] ...]
"""
return list(csv.reader(io.StringIO(text), delimiter=',', quotechar='"'))
|
a52f285afe0df741fac5fcd9f2ea6560052c4a3d
| 505,991
|
from bs4 import BeautifulSoup
def get_wiki_from_spotlight_by_name(spotlight, name):
"""Given the spotlight output, and a name string, e.g. 'hong kong'
returns the wikipedia tag assigned by spotlight, if it exists, else '-'."""
actual_found = 0
parsed_spotlight = BeautifulSoup(spotlight.text, 'lxml')
for wiki_tag in parsed_spotlight.find_all('a'):
if wiki_tag.string.lower() == name.lower():
actual_found += 1
return wiki_tag.get('href').split('/')[-1], actual_found
# If nothing found, try to match based on prefixes, e.g. match the name Estonia to the tag for 'Estonian'
for wiki_tag in parsed_spotlight.find_all('a'):
if wiki_tag.string.lower()[:len(name)] == name.lower():
actual_found += 1
return wiki_tag.get('href').split('/')[-1], actual_found
return '-', actual_found
|
911538367813fa6e80cd73c35f744d1055390248
| 659,040
|
import re
def titleize(name):
""" Titleize a course name or instructor, taking into account exceptions such as II. """
name = re.sub(r'I(x|v|i+)', lambda m: 'I' + m.group(1).upper(), name.strip().title())
name = re.sub(r'(\d)(St|Nd|Rd|Th)', lambda m: m.group(1) + m.group(2).lower(), name)
name = re.sub(r'Mc([a-z])', lambda m: 'Mc' + m.group(1).upper(), name)
name = name.replace("'S", "'s")
return name
|
b4eb58ec092d89d23d1e878a8b0de077ec17c551
| 678,624
|
def getattr_nested(obj, attrs):
"""Support getting nested attributes
Args:
obj -- object to get attributes from
attrs -- attribute accessor string
This should be a dot separated list of attributes to access,
just like when typeing the code out
Examples
>>> A = namedtuple('A', 'x,y')
>>> a = A(A(1, 2), A(3, 4))
>>> getattr_nested(a, 'x.x')
1
>>> getattr_nested(a, 'y.x')
3
"""
for attr in attrs.split('.'):
obj = getattr(obj, attr)
return obj
|
313cd7daea93a98a56e29deb60b749920a7b2320
| 505,463
|
def gcd(a,b):
"""gcd(a,b) returns the greatest common divisor of the integers a and b."""
a = abs(a); b = abs(b)
while (a > 0):
b = b % a
tmp=a; a=b; b=tmp
return b
|
1c97e04e39b4aff0f97b58e98b500965f58b9c66
| 652,326
|
def add_to_master_list(single_list, master_list):
"""This function appends items in a list to the master list.
:param single_list: List of dictionaries from the paginated query
:type single_list: list
:param master_list: Master list of dictionaries containing group information
:type master_list: list
:returns: The master list with the appended data
"""
for list_item in single_list:
master_list.append(list_item)
return master_list
|
4b4e122e334624626c7db4f09278b44b8b141504
| 705,370
|
def is_notebook() -> bool:
"""Checks if the current environment is a notebook
Returns:
bool: whether the current environment is a notebook
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
if shell == "ZMQInteractiveShell":
return True # Jupyter notebook or qtconsole
elif shell == "TerminalInteractiveShell":
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False
|
98702993b92585ea1916a7d78592079a6c1a2dc7
| 465,185
|
def geom_mean_long_sun(juliancentury: float) -> float:
"""Calculate the geometric mean longitude of the sun"""
l0 = 280.46646 + juliancentury * (36000.76983 + 0.0003032 * juliancentury)
return l0 % 360.0
|
7b9c4e95af16fbd86b83eff950858787eaa543bc
| 332,132
|
import struct
def mrt_block_unpack( packed_bytes ):
"""Decodes an MRT header block."""
(time_secs, mrt_type, mrt_subtype, content_length) = struct.unpack( '!LHHL', packed_bytes[0:12])
content_pad = packed_bytes[12:]
content = content_pad[:content_length]
parsed = { 'time_secs' : time_secs,
'mrt_type' : mrt_type,
'mrt_subtype' : mrt_subtype,
'content' : content }
return parsed
|
060fb4e501f70f1739aa8b65dbd3986b126b1370
| 230,601
|
import re
def validator(ip):
"""
check if a given ip follows ip regex pattern
:param ip: string to validate
:return: true if the string follows the ip regex pattern,
otherwise return false
"""
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip) is not None:
return True
return False
|
f80add0ce1d8355da10390079b5db42ac513a327
| 263,743
|
def getBrackets(dumFunc,midVal):
"""find bracketing intervals of roots
NOTE: there are two roots and val lies in between them.
So, this is a simple routine to find the intervals that
bracket both roots
Input:
dumFunc -- function that changes sign at roots
val -- value that is located in betwee roots
Returns: (lBrack,rBrack)
lBrack,rBrack -- left/right interval containing root
"""
fac=0.01 # factor used to modify value of midVal
# so as to bracket an interval that
# contains a root of the objective function
midF = dumFunc(midVal)
lVal=midVal*(1-fac)
lF =dumFunc(lVal)
while(lF*midF>0.):
lVal*=(1.-fac)
midF=lF
lF=dumFunc(lVal)
lBrack=[lVal,midVal]
midF = dumFunc(midVal)
rVal=midVal*(1+fac)
rF =dumFunc(rVal)
while(rF*midF>0.):
rVal*=(1.-fac)
midF=rF
rF=dumFunc(rVal)
rBrack=[midVal,rVal]
return lBrack,rBrack
|
69e369953df555843dac2da362e0f25e2385f57a
| 340,564
|
def clientcompressionsupport(proto):
"""Returns a list of compression methods supported by the client.
Returns a list of the compression methods supported by the client
according to the protocol capabilities. If no such capability has
been announced, fallback to the default of zlib and uncompressed.
"""
for cap in proto.getprotocaps():
if cap.startswith(b'comp='):
return cap[5:].split(b',')
return [b'zlib', b'none']
|
f97a706681441fbd15718f6a7d24966a0bb5b27d
| 519,657
|
def combine_datasets(train_set, val_set):
"""
Combine two datasets in one.
"""
# Extract individual arrays.
train_inputs, train_labels, train_masks, train_sentences = train_set
val_inputs, val_labels, val_masks, val_sentences = val_set
# Combine respective arrays.
combined_inputs = train_inputs + val_inputs
combined_labels = train_labels + val_labels
combined_masks = train_masks + val_masks
combined_sentences = train_sentences + val_sentences
combined_set = (combined_inputs, combined_labels, combined_masks, combined_sentences)
return combined_set
|
956d14535ec59c8fb05c573789f422867dc881e3
| 195,500
|
def get_key_from_dict (dictionary, key, group, required=True):
"""
Grab a value from dictionary
:param dictionary: Dictionary to utilize
:param key: Key to fetch
:param group: Group this key belongs to, used for error reporting purposes
:param required: Boolean indicating whether this key is necessary for a valid manifest
:return Value if found
"""
if key not in dictionary:
if required:
raise KeyError ("Failed to generate manifest: {0} missing {1}".format (group, key))
else:
return None
else:
return dictionary[key]
|
81245795296c2fe6d21bb84f0cccfa197a966b24
| 10,972
|
def get_skin_cluster_influence_objects(skincluster):
"""
Wrapper around pymel that wrap OpenMaya.MFnSkinCluster.influenceObjects() which crash when
a skinCluster have zero influences.
:param skincluster: A pymel.nodetypes.SkinCluster instance.
:return: A list in pymel.PyNode instances.
"""
try:
return skincluster.influenceObjects()
except RuntimeError:
return []
|
ebb686bc4ca718db104fccb08b4332de1df9d3d3
| 28,739
|
def _build_bool_type(var, property_path=None):
""" Builds schema definitions for boolean type values.
:param var: The boolean type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
return {"type": "boolean"}
|
0be7e1ba38293d3912dbeee0b0a61fb6906875a0
| 181,920
|
import copy
def ApplyFunToRadii(fiber_bundle, fun):
"""
Applies function to fibers radii
Parameters
----------
fiber_bundle : [(,4)-array, ...]
list of fibers
fun : function
Returns
-------
res : [(,4)-array, ...]
translated fiber_bundle
"""
fiber_bundle = copy.deepcopy(fiber_bundle)
for i, _ in enumerate(fiber_bundle):
fiber_bundle[i][:, -1] = fun(fiber_bundle[i][:, -1])
return fiber_bundle
|
fd904747954dc7b894cf6e086c1843f98b9ece27
| 652,604
|
def upd_flag_by_other_flag(flag, other_flag):
"""
Update a flag array by another flag array.
Take a flag list, copy it, and set the flag to 'o'utside where some
other flag list is 'o'.
"""
flag_new = flag.copy()
for i, of in enumerate(other_flag):
if of == 'o':
flag_new[i] = 'o'
return flag_new
|
8361b532ca52ee682d8a3da1e37ef2a0837bf8fd
| 409,318
|
def build_regex(name, email):
"""
Build a regex that matches a name and email combination.
>>> build_regex("John Doe", "john@example.com")
'John Doe.*john@example.com.*'
:param name: Name of the person.
:type name: str
:param email: Email of the person.
:type email: str
:return: RegEx.
:rtype: str
"""
return name+".*"+email+".*"
|
93a88c7edbd30dd6530b9f2d26166ecbcb559b7a
| 517,461
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.