content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
import torch
def seq_continuous_group(xyz,sample_idx,nsample):
"""
Input:
nsample: max sample number in local region
sample_idx: the index generated by previous sample step
xyz: all points, [B, N, 3]
Return:
group_idx: grouped points index, [B, npoint, nsample]
"""
B,N,_=xyz.shape
_,npoint=sample_idx.shape
idx=torch.zeros((B,npoint,nsample),dtype=torch.long,device=xyz.device)
for i in range(B):
for j in range(npoint):
#check range
start=sample_idx[i,j]
if (start+nsample)<N:
idx[i,j]=torch.arange(start,start+nsample)
else:
# num_overflow=start+nsample-N+1
# num_in=N-num_overflow
# idx[i,j][:num_in]=torch.arange(start,start+num_in)
# idx[i,j][num_in:]=N-1
idx[i,j]=torch.arange(N-nsample,N)
tmp=idx[:,:,nsample//2]
idx[:,:,nsample//2]=idx[:,:,0]
idx[:,:,0]=tmp
return idx
|
6dea4eb4ab014c0246ff6dda4e0ff1001119d761
| 336,080
|
import ipaddress
def is_internal(ip_address):
"""Determine if the address is an internal ip address"""
return ipaddress.ip_address(ip_address).is_private
|
c5ef1d2cc2916edb22f65fe389cd2bd3daf8c60f
| 208,452
|
def _check_conversion(key, valid_dict):
"""Check for existence of key in dict, return value or raise error"""
if key not in valid_dict and key not in valid_dict.values():
# Only show users the nice string values
keys = [v for v in valid_dict.keys() if isinstance(v, str)]
raise ValueError('value must be one of %s, not %s' % (keys, key))
return valid_dict[key] if key in valid_dict else key
|
9f9054265b53efc9ee5d6aaf7aeb56e476d18dd8
| 72,351
|
def get_rel_path(path, base):
"""get relative path, e.g., get_rel_path('abc/de/fg', 'abc') => 'de/fg'
"""
lb = len(base)
assert path[:lb] == base
if len(path) == lb:
rel_path = ''
elif path[lb] == '/':
rel_path = path[lb+1:]
else:
rel_path = path[lb:]
return rel_path
|
22752fda364ea599f26aaddd8ed2f25d348412c4
| 474,671
|
def isDescendantWidget(maybeParent, widget):
"""Return True if 'widget' is 'maybeParent' or a descendant of it.
Widget parenthood is tested for Tk in this function.
"""
if widget is maybeParent:
return True
else:
return any(( isDescendantWidget(w, widget)
for w in maybeParent.winfo_children() ))
|
3ef70f5cb2d9a8cf48caf6fc9fd17407523bd380
| 578,636
|
def bbox_sanity_check(img_size, bbox):
"""
Checks whether bounding boxes are within image boundaries.
If this is not the case, modifications are applied.
Args:
img_size: The size of the image
bbox: The bounding box coordinates
Return:
The modified/original bbox
"""
img_width, img_heigth = img_size
if bbox[0] < 0:
bbox[0] = 0.0
if bbox[1] < 0:
bbox[1] = 0.0
if bbox[2] >= img_width:
bbox[2] = img_width - 1
if bbox[3] >= img_heigth:
bbox[3] = img_heigth - 1
return bbox
|
43bf7d415624f4cd7623afd153bd0fbe01426a19
| 180,633
|
def _dedup_preserving_order(list):
"""Given a list, deduplicate its elements preserving order."""
r = []
seen = {}
for e in list:
if e in seen:
continue
seen[e] = 1
r.append(e)
return r
|
299b1b750d7ba32ab34f3eb36bc8185041e511cc
| 284,307
|
import glob
def _GetSequentialFileName(base_name):
"""Returns the next sequential file name based on |base_name| and the
existing files."""
index = 0
while True:
output_name = '%s_%03d' % (base_name, index)
if not glob.glob(output_name + '.*'):
break
index = index + 1
return output_name
|
943583418b2bac1b8a42c61f5f33b0bc4f38f519
| 668,873
|
def _GetRelativePathOfChrootPath(chroot_path):
"""Gets the relative path of the chroot path passed in.
Args:
chroot_path: The chroot path to get its relative path.
Returns:
The relative path after '/mnt/host/source/'.
Raises:
ValueError: The prefix of 'chroot_path' did not match '/mnt/host/source/'.
"""
chroot_prefix = '/mnt/host/source/'
if not chroot_path.startswith(chroot_prefix):
raise ValueError('Invalid prefix for the chroot path: %s' % chroot_path)
return chroot_path[len(chroot_prefix):]
|
cd85e15e8f84ef1d32b51d955e9713f737ba9dbf
| 239,114
|
import torch
def encode(matched, priors, variances):
"""
Args:
matched (tensor): Coords of ground truth for each prior in xyxy Shape: [num_priors, 4].
priors (tensor): Prior boxes in center-offset form Shape: [num_priors,4].
variances (list[float]): Variances of priorboxes
Returns:
encoded boxes and landmarks (tensor), Shape: [num_priors, 14]
"""
# dist b/t match center and prior's center
priors = priors.clone()
g_cxcy = (matched[:, 0:2] + matched[:, 2:4]) / 2 - priors[:, 0:2]
# encode variance
g_cxcy /= (variances[0] * priors[:, 2:4])
# match wh / prior wh
g_wh = (matched[:, 2:4] - matched[:, 0:2]) / priors[:, 2:4]
g_wh = torch.log(g_wh) / variances[1]
# # landmarks
# g_xy1 = (matched[:, 4:6] - priors[:, 0:2]) / (variances[0] * priors[:, 2:4])
# g_xy2 = (matched[:, 6:8] - priors[:, 0:2]) / (variances[0] * priors[:, 2:4])
# g_xy3 = (matched[:, 8:10] - priors[:, 0:2]) / (variances[0] * priors[:, 2:4])
# g_xy4 = (matched[:, 10:12] - priors[:, 0:2]) / (variances[0] * priors[:, 2:4])
# g_xy5 = (matched[:, 12:14] - priors[:, 0:2]) / (variances[0] * priors[:, 2:4])
# return target for loss
return torch.cat([g_cxcy, g_wh], 1)
|
d7edf0f6419ff44041e925db53bec2b3bc15956b
| 295,589
|
def _convert_snake_to_pascal(snake_case_string: str) -> str:
"""
Convert a string provided in snake_case to PascalCase
"""
return ''.join(word.capitalize() for word in snake_case_string.split('_'))
|
41fd25da7fa5b6f120fae63aafb5bed1438256bd
| 689,162
|
def counting_sort(array, k, key_func=lambda x: x, ascending=True):
"""Sort array using counting sort algorithm.
Parameters
----------
array : list
List to be sorted; Must produce integer values in the range 0 to k-1
when key_func is applied to its elements
k : int
Max value of key_func(i), -1, where i an element of array
key_func : func, optional
Function to apply to elements of array to produce an integer in range 0
to k-1
ascending : bool, optional
If True sort array from smallest to largest; False -> sort array from
largest to smallest
Returns
-------
output_arr : list
Input array sorted.
"""
# Create copy to avoid modifying array inplace
array = array.copy()
# Generate array to contain counts of each distinct value in array
counts = [0] * k
# Populate counts array by running through array
for item in array:
counts[key_func(item)] += 1
# Calculate starting index for each k value, putting them in counts
# Effectively storing number of items with key_func(item) less than i
total = 0
for i in range(k):
old_count = counts[i]
counts[i] = total
total += old_count
# Transfer to output array
output_arr = [None] * len(array)
for item in array:
# Store item in correct position in array
output_arr[counts[key_func(item)]] = item
# Increment index for relevant k value
counts[key_func(item)] += 1
if ascending:
return output_arr
else:
# Reverse array for descending order sort
return output_arr[::-1]
|
5c55c39bf5fe1431880657f80feb9b3f2f474645
| 185,039
|
def sort_objects_by_score(objects, reverse=True):
"""
Put any set of objects in order from high score to low score.
"""
if reverse:
sign = -1
else:
sign = 1
return sorted(objects, key=lambda k: sign*k['score'])
|
7da184e1bbbe8b4df03c5e313cd9264cfa4c7b1b
| 454,773
|
def api_argument_type(value):
"""
A workaround for the Prowl API not accepting the string "0" as a valid
argument.
"""
if value == '0':
value += ' '
return value
|
e6889907e534054fc18b289b9261ad6035cd215d
| 556,862
|
import mpmath
def interval_prob(x1, x2, a, b):
"""
Compute the probability of x in [x1, x2] for the beta distribution.
Mathematically, this is the same as
beta.cdf(x2, a, b) - beta.cdf(x1, a, b)
but when the two CDF values are nearly equal, this function will give
a more accurate result.
x1 must be less than or equal to x2.
"""
if x1 > x2:
raise ValueError('x1 must not be greater than x2')
with mpmath.extradps(5):
x1 = mpmath.mpf(x1)
x2 = mpmath.mpf(x2)
a = mpmath.mpf(a)
b = mpmath.mpf(b)
return mpmath.betainc(a, b, x1, x2, regularized=True)
|
6bd34f64828165e3e48a810ac32a958300a38a33
| 606,903
|
def sort_by_size(L):
"""
Return a copy of precinct list L, sorted into decreasing order by size.
"""
answer = L[:]
answer.sort()
answer.reverse()
return answer
|
151dcd7c108494b3998b6e106c234d1c7dbff376
| 67,450
|
def to_hex(value):
"""Converts RGB tuple to hex string"""
hex_values = (hex(i).lstrip('0x').upper().ljust(2, '0') for i in value)
return '#{}'.format(''.join(hex_values))
|
f98cf0b3618ed2cfe29c389ae529e120f03ca484
| 456,592
|
def get_spi_flash(tree):
"""Get the SPI Flash node"""
spi_nors = tree.match("jedec,spi-nor")
if len(spi_nors) == 0:
return None
return spi_nors[0].parent
|
51a1007cf62a799ace24c272d996b9fcaa4a5eb0
| 541,361
|
def smart_truncate(text, limit=100, suffix='...'):
"""
Truncates a text to the end of the last word before
reaching `limit`, and adds `suffix` to the end.
Since it is using Python's slicing, negative values
work, too.
:param text: <str>
:param limit: <int>
:param suffix: <str>
:return: <str>
"""
if len(text) <= limit:
return text
return text[:limit].rsplit(' ', 1)[0]+suffix
|
4b95d3b200f9e54adbb681f3db9206b3c5a9d7e6
| 625,028
|
import torch
def random_z(num: int, min_height: float, max_height: float, device: str) -> torch.Tensor:
"""Returns sampled height of the goal object."""
z = torch.rand(num, dtype=torch.float, device=device)
z = (max_height - min_height) * z + min_height
return z
|
d24b40cbbe08c1753fc4b9304fba7ba2e26373ec
| 83,010
|
def is_missing_atom_map(molecule):
"""
Checks if any atom in molecule is missing a map index. If even only one atom is missing a map index will return True
Parameters
----------
molecule: oechem.OEMOl
Returns
-------
bool
"""
MISSING_ATOM_MAP = False
for atom in molecule.GetAtoms():
if atom.GetMapIdx() == 0:
MISSING_ATOM_MAP = True
return MISSING_ATOM_MAP
return MISSING_ATOM_MAP
|
617b4fb51c777daf89d4ce0c8b6ba71752306dad
| 635,414
|
from typing import Any
def is_fixture(obj: Any) -> bool:
"""
Returns True if and only if the object is a fixture function
(it would be False for a Fixture instance,
but True for the underlying function inside it).
"""
return hasattr(obj, "ward_meta") and obj.ward_meta.is_fixture
|
b319c0b0da526142c541164694143411443dd1a1
| 652,977
|
def verify_input(log):
"""
Loop to verify yes or no input.
Function takes a string which is printed out so the user knows what they are agreeing to
"""
while True:
result = input(log)
if result.lower() == 'y':
return True
if result.lower() == 'n':
return False
else:
print("Please enter a valid response")
|
7714f52f49503ff67fce1ce15b585ca8e4c409ab
| 268,480
|
import hashlib
def compute_resource_hash(resource: str) -> str:
"""Calculate compute resource hash
:param str resource: resource
:rtype: str
:return: hash of resource
"""
return hashlib.sha1(resource.encode('utf8')).hexdigest()
|
f3d4880519a86f39a6b3378b1e3b5353265ea86e
| 395,290
|
def dsfr_accordion_group(items: list) -> dict:
"""
Returns a group of accordion items. Takes a list of dicts as parameters (see the accordeon
tag for the structure of these dicts.)
**Tag name**::
dsfr_accordion_group
**Usage**::
{% dsfr_accordion_group data_list %}
"""
return {"self": {"items": items}}
|
ca1b459c159c4164b8f88013a6149cc242260e31
| 607,307
|
def create_annotationlist_id(manifest_info, canvas_id, annolist_idx, opts):
"""
Return (uri, filename) for annotation list
"""
prefix = opts['url_prefix']
if not prefix:
# use manifest id as prefix
prefix = manifest_info['id']
scheme = opts['annolist_name_scheme']
if scheme == 'canvas':
# use last part of canvas id
canvas_part = canvas_id.split('/')[-1]
fn = canvas_part + '-annolist.json'
uri = prefix + '/' + fn
else:
fn = f"annolist-{annolist_idx}.json"
uri = prefix + '/' + fn
return uri, fn
|
7913611623c1f598d37e2f2f347412c61d186e4e
| 114,654
|
def _combine_ws(parts, whitespace):
"""Combine whitespace in a list with the element following it.
Args:
parts: A list of strings.
whitespace: A string containing what's considered whitespace.
Return:
The modified list.
"""
out = []
ws = ''
for part in parts:
if not part:
continue
elif part in whitespace:
ws += part
else:
out.append(ws + part)
ws = ''
if ws:
out.append(ws)
return out
|
501e3058a26b0ef9fa4cbfaf054866d17d9b2482
| 582,232
|
def hsl_to_rgb(h, s, l):
"""
Converts an HSL color value to RGB. Conversion formula
adapted from http://en.wikipedia.org/wiki/HSL_color_space.
Assumes h, s, and l are contained in the set [0, 1] and
the output r, g, and b are in the set [0, 1].
Parameters
----------
h : double
the hue
s : double
the saturation
l : double
the lightness
Returns
-------
tuple
the color in RGB format
"""
if s == 0: # achromatic
r = l
g = l
b = l
else:
def hue2rgb(p, q, t):
t = t % 1
if t < 1/6:
return p + (q - p) * 6 * t
if t < 1/2:
return q
if t < 2/3:
return p + (q - p) * (2/3 - t) * 6
return p
q = l * (1 + s) if l < 0.5 else l + s - l * s
p = 2 * l - q
r = hue2rgb(p, q, h + 1/3)
g = hue2rgb(p, q, h)
b = hue2rgb(p, q, h - 1/3)
return r, g, b
|
b0508f514c854331b7c26347f71d1580fb7d3b5e
| 240,630
|
def mag_to_flux(mag, zeropoint):
"""Convert a magnitude into a flux.
We get the conversion by starting with the definition of the magnitude scale.
.. math::
m = -2.5 \\log_{10}(F) + C
2.5 \\log_{10}(F) = C - m
F = 10^{\\frac{C-m}{2.5}}
:param mag: magnitdue to be converted into a flux.
:param zeropoint: zeropoint (in mags) of the magnitude system being used
:return: flux that corresponds to the given magnitude
"""
return 10**((zeropoint - mag)/2.5)
|
e3fd5d7cd97fd97517f42ed31a385a8b8b90c694
| 690,089
|
def calculate_sensitivity(blast_results, target_accessions):
"""
Function calculates sensitivity of the oligo (binding to target sequences).
Binding is defined as 100% query coverage and 100% percent identity of the oligo to the target sequence.
Sensitivity is calculated by the following formula:
TP / (TP + FN)
where:
TP = succesfully amplified accessions
FN = target accessions that were not amplified
TP + FN = total number of target accessions
"""
#Take only accessions where there was a perfect match --> full query coverage, 100% identity
perfect_match_results = blast_results.loc[(blast_results['qlen']==blast_results['length'])&(blast_results['pident']==100.0)]
#Retrieve only the accessions list
amplified_accessions = set(perfect_match_results.loc[:,'sacc'])
target_match = 0
#Count number of target accessions in the amplified accession list
for accession in amplified_accessions:
if accession in target_accessions:
target_match = target_match + 1
#Calculate sensitivity and return
sensitivity = target_match/len(target_accessions)
return sensitivity
|
11b517041bfd460f1b07936cfa0a5c8a27452ef0
| 446,829
|
import random
def convkey(x, left=False, right=False):
"""Turns a numerical salary value into a string preserving comparison."""
assert not (left and right)
if left:
appendix = '0'
elif right:
appendix = '9'
else:
appendix = '5'
appendix += ''.join(chr(random.randrange(32,127)) for _ in range(5))
return "{:0>10.2f}".format(x) + appendix
|
20b70b136caeed484ddbaa3d0b7f7691b8f38318
| 669,924
|
def read_taxid_list(filename, _dict=None):
""" Read a taxID list file.
A taxID list file consists of three tab separated columns: 1. ID type,
2. ID of sequence, 3. NCBI taxonomy ID for the sequence. It is headed by
one line starting with the '#' char.
Parameters
----------
filename : str
Path to the file containing the taxID list.
_dict : dict
Optional. Provide an existing dictionary into which parsed results
should be added. Useful if the taxID list consists of several files.
Returns
-------
A dict of dict. First dict's keys are the sequence types, e.g. "gi",
"GeneID", "NC". Second level keys are the sequence IDs and their values are
the according NCBI taxonomy IDs, or taxIDs for short.
Raises
------
ValueError
If a line does not contain of exactly three tab delimited fields.
"""
if _dict is None:
_dict = dict()
f = open(filename, 'r')
f.readline() # header
for line in f:
try:
_type, accession, taxid = line.rstrip().split("\t")
if _type not in _dict:
_dict[_type] = {}
_dict[_type][accession] = taxid
except ValueError:
f.close()
raise ValueError("Error parsing line '%s' of file '%s'" %
(line, filename))
f.close()
return _dict
|
1c9748985de32f9be8866c122a80890adec21063
| 591,038
|
def syllableHelper(word):
"""
# syllableHelper is a function that is a building block for a function
# that counts syllables. It transforms a word by doing two things:
# (1) changing all vowels to the letter a
# (2) removing all consecutive duplicates
# parameter word should be a string--if not return boolean value False
# otherwise, return another string, with all vowels changed to the letter a,
# and consecutive duplicates removed
# examples:
# syllableHelper("by") produces "ba"
# syllableHelper("alien") produces "alan"
# syllableHelper("boot") produces "bat"
# syllableHelper("fruition") produces "fratan"
# Note: empty string is legal, and should return empty string
"""
if type(word)!=str:
return False
if word=='':
return ''
List=['a','e','i','o','u','y','A','E','I','O','U','Y']
I=len(word)
J=len(List)
j=0
i=0
lista=list(word)
result=''
for i in range(0,I):
for j in range(0,J-1):
if word[i]==List[j]:
lista[i]='a'
word=''.join(lista)
else:
j=j+1
for k in range(0,I):
if word[k-1]!=word[k]:
result+=word[k]
if word[0]=='a' and result[0]!='a':
result=word[0]+result
return result
|
dcf883d30ed21af7824253d5ad3e42e9409e5530
| 226,360
|
def genre_choices( entity_instance ):
"""Choices for the possible movie genres"""
return [
((None),('')),
(('action'),('Action')),
(('animation'),('Animation')),
(('comedy'),('Comedy')),
(('drama'),('Drama')),
(('sci-fi'),('Sci-Fi')),
(('war'),('War')),
(('thriller'),('Thriller')),
(('family'),('Family')) ]
|
961468e3dc36abd040a85c78b21d1988194ce835
| 434,065
|
def is_truncate(q):
"""
True if `q` is a TRUNCATE table statement
:param q:
:return:
"""
return 'truncate' in q.lower()
|
c5def37dab064c36f0f28de6310d6ce265571e1d
| 67,773
|
import importlib
def can_import(module):
""" Checks if <module> can be imported, returns ``True`` if it can be,
``False`` otherwise.
To use with ``unittest.skipUnless`` for tests conditional on *optional*
dependencies, which may or may be present but must still be tested if
possible.
"""
try:
importlib.import_module(module)
except ImportError:
return False
else:
return True
|
072ab6d7945c02fa0e993abb5706b1c0dfb07c84
| 546,610
|
def is_hpp(file):
"""
Returns True if file looks like a C++ file (header of .cpp)
"""
return file.split(".")[-1] in ["hpp", "h"]
|
46a241ec02272463505ac36491940497ee41c092
| 404,333
|
def get_readable_time(duration: int) -> str:
"""
Format the time to a readable format.
Parameters
----------
duration : int
Time in ms
Returns
-------
string
The time splitted to highest used time (minutes, hours, ...)
"""
ms = duration % 1000
duration -= ms
duration //= 1000
sec = duration % 60
duration -= sec
duration //= 60
minutes = duration % 60
duration -= minutes
duration //= 60
if duration != 0:
return "%ih, %i minutes %is %ims" % (duration, minutes, sec, ms)
elif minutes != 0:
return "%i minutes %is %ims" % (minutes, sec, ms)
elif sec != 0:
return "%is %ims" % (sec, ms)
else:
return "%ims" % ms
|
079eb23abee2afd42f7ba65fddb2b80bd846809f
| 277,129
|
def to_bbox_str(llo: float, lla: float, mlo: float, mla: float) -> str:
"""
Given (min_lon, min_lat, max_lon, max_lat) bounding box values, returns a string representation understood by
Mapillary APIs.
"""
return ",".join([str(llo), str(lla), str(mlo), str(mla)])
|
9a9ba5e04fc62e29ab527d2cdd25af635c1aaa9b
| 382,038
|
def calculate_stress_ratio(stress_operating, stress_rated):
"""Calculate the operating electrical stress ratio of a device.
Inputs can be floats, integers, or a combination.
>>> calculate_stress_ratio(0.382, 1.29)
0.2961240310077519
>>> calculate_stress_ratio(1, 2)
0.5
>>> calculate_stress_ratio(0.382, 2)
0.191
>>> calculate_stress_ratio(1, 1.29)
0.7751937984496123
Rated stress must not be zero:
>>> calculate_stress_ratio(0.382, 0.0)
Traceback (most recent call last):
...
ZeroDivisionError: float division by zero
Stress inputs must not be strings:
>>> calculate_stress_ratio(0.382, '3.2')
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for /: 'float' and 'str'
:param stress_operating: the device's operating level of the stress.
:param stress_rated: the devices's rated stress.
:return: _stress_ratio; the ratio of operating stress to rated stress.
:rtype: float
:raise: TypeError if an input value is non-numerical.
:raise: ZeroDivisionError if the rated stress is zero.
"""
return stress_operating / stress_rated
|
9e3d1b77f17267f1ba2549fa50fe4f48686bd69f
| 519,436
|
def _is_no_cache(request):
"""Returns True if the request should skip the cache."""
cache_control = request.headers.get('Cache-Control') or ''
return 'no-cache' in cache_control or 'max-age=0' in cache_control
|
bf8421a3f9a654a877cdf518aa9a4de532098f89
| 12,967
|
def get_host_context(resp_host):
"""
Prepare host context data as per Demisto's standard.
:param resp_host: response from host command.
:return: Dictionary representing the Demisto standard host context.
"""
return {
'ID': resp_host.get('id', ''),
'Hostname': resp_host.get('hostName', ''),
'IP': resp_host.get('ipAddress', ''),
'OS': resp_host.get('operatingSystemScanner', {}).get('name', '')
}
|
0f8b38cf88d6cbd3921bee9cc9d55d1d3eaadc2f
| 222,408
|
def read_last(file_name, n_lines=1):
"""
Reads the last line of a file.
Parameters
----------
:param file_name: string
Complete path of the file that you would like read.
:return last_line: string
Last line of the input file.
"""
try:
with open(file_name, mode='r') as infile:
lines = infile.readlines()
except IOError:
last_lines = 'IOEror in read_last_line: this file does not exist.'
return last_lines
try:
last_lines = lines[-n_lines:]
last_lines = '\n'.join(last_lines)
except IndexError:
last_lines = 'IndexError in read_last_line: no last line appears to exist in this file.'
return last_lines
|
49ab2af4551c4d9d28e0db911cc7d846f866d9e8
| 68,256
|
def find_twin(device_id, device_list):
"""
Locate the emulated twin of source device with identifier device_id.
Parameters
----------
device_id : str
Identifier of original device for which the twin is located.
device_list : list
List of device dictionaries in project fetched by the API.
Returns
-------
twin : dict
Dictionary of device information for located twin.
Returns None if no twin were found.
"""
# iterate devices
for device in device_list:
# skip non-emulated devices
if not device['name'].split('/')[-1].startswith('emu'):
continue
# check if device_id label exists and matches
if 'original_device_id' in device['labels'].keys() and device['labels']['original_device_id'] == device_id:
print('-- Located twin [{}].'.format(device['labels']['name']))
return device
# no twin found
return None
|
9bad722712069b86cd1912a7daadd81253f0f578
| 653,442
|
def validate_row_monotonicity(df, col, rank_col, idx_col="project",
increasing=True, msg=""):
"""
Check whether row values for column(s) 'col' are monotonically increasing
for each index in 'idx_col', when sorted by idx_col and rank_col.
Example: check whether the new potential for each project monotonically
increases with the modelling period.
:param df:
:param col: str or list of str, column(s) to check for monotonic increase
:param rank_col: str, column to sort along before checking monotonicity
:param idx_col: str, default 'project', index column; will check
monotonicity for each idx in the index
:param increasing: boolean, default False, whether to check for monotonic
increase or decrease
:param msg: str, default '', optional clarifying error message.
:return:
"""
results = []
cols = [col] if isinstance(col, str) else col
for c in cols:
df2 = df.dropna(subset=[c])
group = df2.sort_values([idx_col, rank_col]).groupby(idx_col)[c]
if increasing:
invalids = ~group.apply(lambda x: x.is_monotonic)
direction = "increase"
else:
invalids = ~group.apply(lambda x: x.is_monotonic_decreasing)
direction = "decrease"
if invalids.any():
bad_idxs = invalids.index[invalids]
print_bad_idxs = ", ".join(bad_idxs)
results.append(
"{}(s) '{}': {} should monotonically {} with {}. {}"
.format(idx_col, print_bad_idxs, c, direction, rank_col, msg)
)
return results
|
9129a12eb3e6518fee9f7ee67d77f3796556f14b
| 350,458
|
import json
def _read_city_name(path: str) -> str:
"""read city name from json file in the log folder
Args:
path: path to the json city_name in the log folder
Returns:
city_name: city name of the current log, either 'PIT' or 'MIA'
"""
with open(path, "r") as f:
city_name = json.load(f)["city_name"]
assert isinstance(city_name, str)
return city_name
|
d758be33e25cf36260f97dbb23be4441c9f4ae75
| 633,252
|
def name_with_functools(args):
"""my docstring"""
return args
|
1eed2bfed71c73cf288fe40798a031e9c876c28c
| 499,395
|
from typing import Dict
from typing import Any
def validObject(object_: Dict[str, Any]) -> bool:
"""
Check if the Dict passed in POST is of valid format or not.
(if there's an "@type" key in the dict)
:param object_ - Object to be checked
"""
if "@type" in object_:
return True
return False
|
6c40ad1cef0a8f056d2e00c9a7632108bfd2f506
| 691,353
|
def yes_or_no(question):
"""Asks a yes or no question, and captures input. Blank input is interpreted as Y."""
reply = str(input(question+' (Y/n): ')).capitalize().strip()
if reply == "": #pylint: disable=no-else-return
return True
elif reply[0] == 'Y':
return True
elif reply[0] == 'N':
return False
return yes_or_no("Please enter Y or N.")
|
f441e39b85dc7407bedce5c120aa9839f0a3064f
| 18,897
|
def _transform_summarized_rep(summarized_rep):
"""change around the keys of the summarized_representation dictionary
This makes them more readable
This function makes strong assumptions about what the keys look like
(see PooledVentralStreams.summarize_representation for more info on this):
a single string or tuples of the form `(a, b)`, `(a, b, c), or `((a,
b, c), d)`, where all of `a,b,c,d` are strings or ints. We convert
them as follows (single strings are untouched):
- `(a, b) -> error_a_b`
- `(a, b, c) -> error_a_scale_b_band_c`
- `((a, b, c), d) -> error_a_scale_b_band_c_d`
Parameters
----------
summarized_rep : dict
the dictionary whose keys we want to remap.
Returns
-------
summarized_rep : dict
dictionary with keys remapped
"""
new_summarized_rep = {}
for k, v in summarized_rep.items():
if not isinstance(k, tuple):
new_summarized_rep["error_" + k] = v
elif isinstance(k[0], tuple):
new_summarized_rep["error_scale_{}_band_{}_{}".format(*k[0], k[1])] = v
else:
if len(k) == 2:
new_summarized_rep["error_scale_{}_band_{}".format(*k)] = v
else:
new_summarized_rep['error_' + '_'.join(k)] = v
return new_summarized_rep
|
acaaf9f4753ef8911e9354c2f7bd8ae27cc9b0f1
| 120,726
|
def getHeight(picture):
"""Return the height of a given picture."""
return picture[1]
|
78f4af75b099a5481c80ccb6c16ba506fdee94f5
| 256,011
|
def read_pnm_header(infile, supported='P6'):
"""
Read a PNM header, return width and height of the image in pixels.
"""
header = []
while len(header) < 4:
line = infile.readline()
sharp = line.find('#')
if sharp > -1:
line = line[:sharp]
header.extend(line.split())
if len(header) == 3 and header[0] == 'P4':
break # PBM doesn't have maxval
if header[0] not in supported:
raise NotImplementedError('file format %s not supported' % header[0])
if header[0] != 'P4' and header[3] != '255':
raise NotImplementedError('maxval %s not supported' % header[3])
return int(header[1]), int(header[2])
|
6954f2b476f7fddec7b523cac0c77a2c63a954ab
| 126,962
|
def number_of_lottery_tickets(total_sales):
"""
Function to calculate the number of lottery
tickets to assign for each participants based on sales.
Args:
total_sales(int): total sales done by the participant.
"""
total_sales = int(total_sales)
max_ticket_to_assign = 15
sales_ticket_mapping = {20000: 1, 30000: 2, 50000: 5}
sale_figures = list(sales_ticket_mapping.keys())
ticket_count = 0
while (
total_sales and sale_figures and ticket_count <= max_ticket_to_assign
):
sale = sale_figures.pop()
if total_sales >= sale:
remaining_sale = total_sales // sale
total_sales %= sale
ticket_count += remaining_sale * sales_ticket_mapping[sale]
return min(ticket_count, max_ticket_to_assign)
|
84802c36437466ddb3d70175e605de2ddac4f3a7
| 344,064
|
def onehot_to_int(seqs, axis=-1):
"""Translates one-hot sequences to integer sequences."""
return seqs.argmax(axis=axis)
|
409d9e4bfa0c4b2e3219cd79f56f36fdb653b5bc
| 537,730
|
import locale
def cell_format(data):
"""Formats the data to put in a table cell."""
if isinstance(data, int):
# Add commas to integers.
return locale.format_string("%d", data, grouping=True)
elif isinstance(data, float):
# Add commas to floats, and round to 2 decimal places.
return locale.format_string("%.2f", data, grouping=True)
else:
return str(data)
|
a91b1a6e14918d19f6100e25fa830ce005c51e1a
| 624,202
|
def str_to_int_array(string, base=16):
"""
Converts a string to an array of integer values according to the
base specified (int numbers must be whitespace delimited).\n
Example: "13 a3 3c" => [0x13, 0xa3, 0x3c]
:return: [int]
"""
int_strings = string.split()
return [int(int_str, base) for int_str in int_strings]
|
febd897cb3ccd149dd539c8674c529d104382189
| 89,020
|
def work_grid(grid, fig):
"""Take a two dimensional grid, add subplots to a figure for each cell and do layer work.
Parameters:
-----------
grid: a two dimensional grid of layers
fig: matplotlib figure to draw on
Returns:
--------
axes: a two dimensional list of matplotlib axes
"""
nrows = len(grid)
ncols = len(grid[0])
axes = [[None for _ in range(ncols)] for _ in range(nrows)]
for row in range(nrows):
for col in range(ncols):
axes[row][col] = fig.add_subplot(nrows, ncols, ncols * row + col + 1)
grid[row][col].work(ax=axes[row][col])
return axes
|
f1cf6dc59e75f7d50c9aff8651c12ba6e9f5bee5
| 671,842
|
from typing import List
def checksum(digits: List[int]) -> int:
"""
Calculate and return control digit for given list of digits based on
ISO7064, MOD 11,10 standard.
"""
remainder = 10
for digit in digits:
remainder = (remainder + digit) % 10
if remainder == 0:
remainder = 10
remainder = (remainder * 2) % 11
control_digit = 11 - remainder
if control_digit == 10:
control_digit = 0
return control_digit
|
8b54063d116c8fa3d52ab8405dbe222c504a7168
| 356,684
|
def depthpredicate(maxdepth):
"""Create a predicate that only descends the tree to a maximum depth.
"""
def predicate(it):
return it.repLength < maxdepth
return predicate
|
d77332fe99c2c6ce2fd5911f612202257ee9d0d2
| 75,609
|
def solution(power: int = 1000) -> int:
"""
Returns the sum of the digits of the number `2 ^ power`.
>>> solution(1000)
1366
>>> solution(50)
76
>>> solution(20)
31
>>> solution(15)
26
"""
if not isinstance(power, int):
raise TypeError("The parameter `power` should be of type int.")
if power < 0:
raise ValueError("The value of `power` should be greater than or equal to 0.")
n = 2 ** power
digits = [int(digit) for digit in str(n)]
return sum(digits)
|
be99bb9aa22acfee37cd0998cab83044d8ae21b2
| 99,499
|
def _find_param_separator(tokens):
"""
Return the index of the param separator.
:param list tokens: list of tokens on the parameter line
:returns: integer index of the separator or :data:`None` if no
separator is found
:rtype: int
Different versions of sphinxcontrib-httpdomain/autotornado use different
renderings for HTTP parameters. For example ``name (type) -- text``
where the ``--`` might be a single hyphen or the unicode em-dash...
"""
idx = [i for i, v in enumerate(tokens) if v in ('\u2013', '--', '-')]
return idx[0] if idx else None
|
563d51034784be835becc14e5aeb23199d34f413
| 491,338
|
import json
def read_json_config(cfg):
"""Read a JSON configuration. First attempt to read as a JSON
string. If that fails, assume that it is a JSON file and attempt
to read contents from the file.
Args:
res: a config string or file path
Returns:
dict of config options
"""
try:
cfg = json.loads(cfg)
except ValueError:
cfg_file = open(cfg, 'r')
cfg = json.load(cfg_file)
return cfg
|
2268297273dbfb468e0a8391981b4795702ba0b7
| 16,723
|
def crop(img, start_y, start_x, h, w):
"""
Crop an image given the top left corner.
:param img: The image
:param start_y: The top left corner y coord
:param start_x: The top left corner x coord
:param h: The result height
:param w: The result width
:return: The cropped image.
"""
return img[start_y:start_y + h, start_x:start_x + w, :].copy()
|
cb3a05989f1538bcec34102c33291d500a21c59d
| 701,133
|
def look_and_say(numbers):
"""
Performs a look'n'say iteration. Repeated digits are collapsed into one and preceeded by their amount.
Add 1 before each single digit. '111' -> '31'
:param numbers: string of digits
:return: look'n'say op over digits
"""
digit = ""
result = ""
count = 0
for c in numbers:
if c == digit:
count += 1
else:
if count:
result += str(count) + digit
digit = c
count = 1
result += str(count) + digit
return result
|
0513cf11d1a3b2e0cc9978524e8f4eaf2d115bf9
| 236,662
|
def dot(a, b):
"""The dot product between vectors a and b."""
c = sum(a[i] * b[i] for i in range(len(a)))
return c
|
5ba6e253a2408c79c9f1edd0e98a84904e966050
| 670,064
|
import re
def find_str_matched(strs, pat, regex=False, full=False, ind=False):
"""Find the strings that match the pattern from start
Args:
strs (sequence)
pat (str)
regex (bool) : if methods in the re package should be used
full (bool) : if the full string should be matched
ind (bool) : if set true, the indices of the matched string will be returned
Returns:
list
"""
val = []
if regex:
m = {True: re.fullmatch, False: re.match}[full]
for i, s in enumerate(strs):
if m(pat, s):
if ind:
val.append(i)
else:
val.append(s)
else:
for i, s in enumerate(strs):
if full:
if s == pat:
if ind:
val.append(i)
else:
val.append(s)
else:
if s.startswith(pat):
if ind:
val.append(i)
else:
val.append(s)
return val
|
c2c110031fef2852a5a08ec3f189af60ed2651ad
| 194,922
|
import hashlib
def make_pw_hash(name, pw, salt):
"""
Generates hash for user's password.
Args:
name: String, user's nickname.
pw: String, user's password.
salt: String, auto generated secret word.
Returns:
Hash.
"""
return hashlib.sha256(name + pw + salt).hexdigest()
|
6d1f12d6e30dd5cf92fc1069ea26680862a02720
| 339,649
|
def remove_iam_binding(policy, member, role):
"""Removes binding from given policy.
Args:
policy: Policy.
member: Account, e.g. user:joe@doe.com, serviceAccount:..., etc.
role: Role
Returns:
True if binding was removed. False, if binding was not present in policy.
"""
# Check if member is already bound to the role and remove it
for binding in policy['bindings']:
if binding['role'] == role:
if member in binding['members']:
# Member is bound to role. Remove it.
binding['members'].remove(member)
# Remove binding altogether if no more members left.
if not binding['members']:
policy['bindings'].remove(binding)
return True
return False
|
384d1f3119017367d1047c45478c2264fb32d598
| 389,989
|
import time
def parse_time(epoch_time):
"""Convert epoch time in milliseconds to [year, month, day, hour, minute, second]"""
date = time.strftime('%Y-%m-%d-%H-%M-%S', time.gmtime(epoch_time/1000))
return [int(i) for i in date.split('-')]
|
1c7f1b14cd672b5c98497f9eeee9fcbf3dc3f72e
| 681,406
|
def find_no_of_isocurves(target_0, target_1, avg_layer_height=1.1):
""" Returns the average number of isocurves that can cover the get_distance from target_0 to target_1. """
avg_ds0 = target_0.get_avg_distances_from_other_target(target_1)
avg_ds1 = target_1.get_avg_distances_from_other_target(target_0)
number_of_curves = ((avg_ds0 + avg_ds1) * 0.5) / avg_layer_height
return max(1, int(number_of_curves))
|
9df65a3f77c59ec771edea6bddc13a8a35e9dcde
| 177,488
|
def check_api_key(tmdb) -> bool:
"""
Checks for the presence of the TMDB API Key.
"""
return False if not tmdb.api_key or "" else True
|
8a6c4c59448c761d548de3ba66b089beb814f839
| 34,040
|
def countSuccess(L, diff, hero) :
"""
Counts the number of success for the roll.
Parameters
----------
L: int list
Decreasing sorted list with values between 1 and 10.
diff : int
Difficulty of the roll. A dice is counted as a success if the value of the dice is greater or equal to the difficulty.
hero : optional bool
If the character is a hero, its 10 count two success instead of one, but are cancelled first by a critical failure (a one on another dice).
"""
success = 0
for val in L :
if val == 1 :
success -= 1
elif val == 10 :
if hero :
success += 2
else :
success += 1
elif val >= diff :
success += 1
return success
|
342b0b661894117567709b930caf32bf8129e4d8
| 100,846
|
def get_external_user_uuid_list(messages):
"""Compiles a list of all unique the external user (respondent) uuids from a list of messages"""
external_user_uuids = set()
external_msgs = [message for message in messages if message['from_internal'] is False]
for message in external_msgs:
external_user_uuids.add(message["msg_from"])
internal_messages = [message for message in messages if message['from_internal'] is True]
for uuid in internal_messages:
external_user_uuids.add(uuid["msg_to"][0])
return external_user_uuids
|
90d942dfb79f93522c31989920a166c9773b469d
| 283,401
|
def list_or_str(v):
""" Convert a single value into a list.
"""
if isinstance(v, (list, tuple)):
return v
return [v]
|
ea2c76bc5b8e6522c30e6166426166eee457196a
| 181,359
|
import time
def get_timer() -> float:
"""Return a timer."""
return time.perf_counter()
|
6997d8e715d5962b0d7f1c7b28a612b0a7fcd6a5
| 376,038
|
import itertools
def det_values(*dicts):
"""Deterministically iterate over the values of each dict in `dicts`."""
def values(d):
return (d[key] for key in sorted(d.keys()))
return itertools.chain(*(values(d) for d in dicts))
|
c6b33134ab787a60bfaa7f5d302082c800149145
| 424,086
|
def has_immunity_been_reached(_model):
"""
Determine whether herd immunity has been reached after running a model
:param _model: a model run with no-intervention setting for testing herd-immunity
:return: a boolean
"""
return max(_model.derived_outputs["incidence"]) == _model.derived_outputs["incidence"][0]
|
a43515bfd0d15e259a6972d090b4897b292f2830
| 164,445
|
def time_to_decimal(time):
"""
Get the decimal part of a date.
Parameters
----------
time : array
a time with hms format split by ':'
Returns
-------
decimal_time : string
the decimal part of a date.
Examples
--------
>>> time = [20, 17, 40.088]
>>> time_to_decimal(time)
'73040'
"""
return str(int(time[0]) * 3600 + int(time[1]) * 60 + int(time[0]))
|
d10bd6b355d21c15ace53e416f74465c21e65849
| 172,248
|
def fatorial(n, show=False):
"""
-> Calcula o fatorial de um número.
:param n: O número a ser calculado.
:param show: (opcional) Mostrar ou não a conta.
:return: O valor do Fatorial de um número n.
"""
if show==False:
num=1
for i in range(n,0,-1):
num*=i
return num
elif show==True:
num=1
for i in range(n,0,-1):
num*=i
if i>1:
print(f'{i} x ',end='')
else:
print(f'{i} = ',end='')
return num
|
ee63f9e94f6147b21a04480689c81b424e41f009
| 82,494
|
import typing
def bit_length_table(n: int) -> typing.List[int]:
"""Bit length table.
Args:
n (int): an unsigned integer.
Returns:
list[int]: bit length table.
Complexity:
time: O(N)
space: O(N)
"""
length = [0] * n
for i in range(1, n):
length[i] = length[i >> 1] + 1
return length
|
7b1ccd6cc86d505f9adfdb8ecbdb0c47e7c8f8d6
| 424,631
|
def read(filepath):
"""
Read the entire text contents from a file into a string.
"""
fc = ''
with open(filepath, 'rt') as fin:
fc = fin.read()
return fc
|
e84bcfaab66696265e6cb94025213ddc252a4243
| 151,102
|
import glob
def glob_slash(dirname):
"""Like regular glob but replaces \ with / in returned paths."""
return [s.replace('\\', '/') for s in glob.glob(dirname)]
|
673cbe6e6871d3413d66801fd7f5453613bd0868
| 140,986
|
from typing import Tuple
import math
def rot(
x: float, y: float, deg: float, origin: Tuple[float, float] = (0, 0)
) -> Tuple[float, float]:
"""
Rotate a point by `deg` around the `origin`. This does floating-point math, so
you may encounter precision errors.
"""
theta = deg * math.pi / 180
x2 = (x - origin[0]) * math.cos(theta) - (y - origin[1]) * math.sin(theta)
y2 = (x - origin[0]) * math.sin(theta) + (y - origin[1]) * math.cos(theta)
return (x2 + origin[0], y2 + origin[1])
|
411c66982dcddf454372058595cc2c142b4d5fe7
| 665,010
|
def get_distance(distance_matrix, seq):
""" Function to get the distance while travelling along
a particular sequence of cities.
HINT : Keep adding the distances between the cities in the
sequence by referring the distances from the distance matrix
Parameters
----------
distance_matrix : [matrix]
[matrix of euclidien distance for each cities]
seq : [list]
[list of cities]
Returns
-------
[float]
[total distance from start city to end city]
"""
total_distance = 0
seq_length = len(seq)
for i in range(seq_length):
j = (i + 1)%seq_length
I = seq[i]
J = seq[j]
total_distance += distance_matrix[I][J]
return total_distance
|
344851b80134717bdb4cb957b6dde0c27592208f
| 336,475
|
import re
import inspect
def deduct_failures(test_results):
""" Accumulate each failed tests and the points lost."""
deductions = []
for failure in test_results.failures + test_results.errors:
msg = failure[1]
match = re.search(r'\@points\s*=\s*([0-9\.]+)', failure[0]._testMethodDoc)
points = float(match.group(1)) if match else 0
source = ' '.join(inspect.getsourcelines(getattr(failure[0], failure[0]._testMethodName))[0])
deduction = {'summary': '%s%s' % (failure[0]._testMethodName,
': ' + failure[0]._testMethodDoc
if failure[0]._testMethodDoc else ''),
'trace': '%s\nsource:\n%s' % (msg, source),
'points': points}
deductions.append(deduction)
return deductions
|
4dadaf38c335c6a6b1b60b09ae0ff42696ab6672
| 424,742
|
def expandMinAndMax(val0, val1, minimumDiff, growRatio, minLimit, maxLimit):
"""Expand the image dimension range
Inceases the min/max of the range by growRatio while maintaining
the same center. Also, ensures the range is at least minimumDiff.
Finally, limits the increases to ensure values are still within
the entire range of the image (minLimit, maxLimit)
Args:
val0 (int): starting (minimum) value of the input range
val1 (int): ending (maximum) value of the input range
minimumDiff (int): mimimum size of the output range
growRatio (float): ratio (expected > 1) to expand the range by
minLimit (int): absolute minimum value of the output range
maxLimit (int): absolute maximum value of the output range
Returns:
(int, int): start, end of the adjusted range
"""
val0 = max(val0, minLimit)
val1 = min(val1, maxLimit)
diff = val1 - val0
center = val0 + int(diff/2)
minimumDiff = max(minimumDiff, int(diff*growRatio))
if diff < minimumDiff:
if (center - int(minimumDiff/2)) < minLimit: # left edge limited
val0 = minLimit
val1 = min(val0 + minimumDiff, maxLimit)
elif (center + int(minimumDiff/2)) > maxLimit: # right edge limited
val1 = maxLimit
val0 = max(val1 - minimumDiff, minLimit)
else: # unlimited
val0 = center - int(minimumDiff/2)
val1 = min(val0 + minimumDiff, maxLimit)
return (val0, val1)
|
99efa887b8efef8ef35ac5979b869fd59b45f3e7
| 585,089
|
import re
def vuepress_slugify(s: str) -> str:
"""Slugify implementation duplicated from vuepress."""
s = re.sub(r"[\u0300-\u036F\u0000-\u001f]", "", s)
s = re.sub(r"[\s~`!@#$%^&*()\-_+=[\]{}|\\;:\"'“”‘’–—<>,.?/]+", "-", s)
s = re.sub(r"-{2,}", "-", s)
s = re.sub(r"^-+|-+$", "", s)
s = re.sub(r"^(\d)", r"_\g<1>", s)
return s.lower()
|
cc9ad7d04d6cbc8999cc26ed25def08686e150e9
| 226,236
|
import re
def remove_ordinals(string):
"""
Remove ordinal suffixes from a string
"""
return re.sub(r"(?<=[0-9])(?:st|nd|rd|th)", '', string)
|
0149d7616cb1327fff371f2fefd1d77d9d42fc30
| 162,917
|
def count_units(units):
"""Count total number of units."""
return sum(unit['count'] for _, unit in units.items())
|
8aea92d1eb1dee5a0e64c2139e9dedd16ad46fdb
| 408,320
|
def find_rooms_at_least_far_as(distances, wanted_distance):
"""Find rooms with at least wanted_distance from the distances grid."""
return sum(
True
for row in distances
for col in row
if col > 0 and col >= wanted_distance
)
|
31aa9562802d2fb65fb1c144d050279834ead88a
| 513,754
|
def lead(x, n = 1, default = None):
"""Return an array with each value replaced by the next (or further forward) value in the array.
Arguments:
x: a pandas Series object
n: number of next values forward to replace each value with
default: what to replace the n final values of the array with
Example:
>>> lead(pd.Series([1,2,3]), n=1)
0 2.0
1 3.0
2 NaN
dtype: float64
>>> lead(pd.Series([1,2,3]), n=1, default = 99)
0 2
1 3
2 99
dtype: int64
"""
res = x.shift(-1*n, fill_value = default)
return res
|
c7c41355008c6691a01bcae31130ab0469543480
| 691,695
|
def re_compare(str1: str, str2: str, regex) -> bool:
"""Return True if both strings match the given regex."""
return regex.match(str1) is not None and regex.match(str2) is not None
|
613a0dfd40a0e9c81cde9c1fb5dd7eca128a0423
| 340,891
|
def _pickle_method(m):
"""
Method to allow object pickling.
Parameters
----------
m: `object` or None
Object pickle?
Returns
-------
attr: Attributes to pickle?
"""
# if m.im_self is None:
# return getattr, (m.im_class, m.im_func.func_name)
# else:
# return getattr, (m.im_self, m.im_func.func_name)
# @jacobic: + py3 support
if m.__self__ is None:
return getattr, (m.im_class, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)
|
439b776375a0e8de05ee6950a388708280c4bfde
| 380,799
|
def get_place_names(data_labels_list):
"""Create list of place name labels from labels tuple in data_labels_list"""
labels = data_labels_list[1]
name_labels = []
for i in range(0,10):
name_only = labels[i][0]
name_labels.append(name_only)
return name_labels
|
b1355dc5cb7de3b58d9628d4cc6d6432cb775f45
| 374,237
|
def mcf_to_boe(mcf=0, conversion_factor=6):
"""Converts mcf to barrels of oil equivalent using the standard 6 mcf/boe conversion factor."""
return (mcf/conversion_factor)
|
e7f7b984ec0e537512cf2b926c72c25c83a3507b
| 47,675
|
def headers_to_dict(headers):
"""
Converts a sequence of (name, value) tuples into a dict where if
a given name occurs more than once its value in the dict will be
a list of values.
"""
hdrs = {}
for h, v in headers:
h = h.lower()
if h in hdrs:
if isinstance(hdrs[h], list):
hdrs[h].append(v)
else:
hdrs[h] = [hdrs[h], v]
else:
hdrs[h] = v
return hdrs
|
afe5473cd237a108e0fc0416d28404e4c85d34ac
| 571,470
|
def find_escape_floor(pac_mine, pac_their, scene):
"""
Find an escape floor which will save our pac from being eaten.
"""
# Shortcuts
escapes = scene['escape']
my_x = pac_mine['position'][0]
my_y = pac_mine['position'][1]
their_x = pac_their['position'][0]
their_y = pac_their['position'][1]
# Identify the arrangement of the pacs to select the correct escape floor.
dx = abs(my_x - their_x)
dy = abs(my_y - their_y)
# The x is prefered because the width is greater than the height.
if dx >= dy:
# Horizontal or diagonal with equal distance on x and y.
if my_x > their_x:
return escapes['right']
else:
return escapes['left']
else:
# Vertical.
if my_y > their_y:
return escapes['down']
else:
return escapes['up']
|
d4f1783748df278e27c6265ff56aa67e5a95f1f2
| 611,260
|
import collections
def build_dataset(input_words, vocabulary_size):
""" Build the appropriate datasets for the desired vocabulary size given a corpus of input words """
# Extract the n-most common words from the input (given a desired vocabulary size)
vocabulary_words = collections.Counter(input_words).most_common(vocabulary_size)
# Create a dictionary of indexes for words in the vocabulary, in descending order of most common
vocab = dict()
for word, _ in vocabulary_words:
vocab[word] = len(vocab) + 1
# Create the input corpus data - the index in the vocabulary of every known word in the input text
corpus_data = list()
for word in input_words:
if word in vocab:
index = vocab[word]
else:
index = 0
corpus_data.append(index)
return corpus_data, dict(zip(vocab.values(), vocab.keys()))
|
c0634c83d2554d357f43c60073931131eb199618
| 253,821
|
def proxify(scraped_urls,prefix):
"""
This method takes a list of scraped urls and turns them into urls that
go through the UW Library proxy so that all of them are full access.
"""
proxy_urls = []
for url in scraped_urls:
sd_id = url[32:]
newlink = prefix + sd_id
proxy_urls.append(newlink)
return proxy_urls
|
a9e66c33b1ee0fd9c7b754cf8c443acbb98232f4
| 657,147
|
def key_to_note(key, octaves=True):
"""Returns a string representing a note which is (key) keys from A0"""
notes = ['a', 'a#', 'b', 'c', 'c#', 'd', 'd#', 'e', 'f', 'f#', 'g', 'g#']
octave = (key + 8) // 12
note = notes[(key - 1) % 12]
if octaves:
return note.upper() + str(octave)
else:
return note.upper()
|
338e6dbe970d058fd58875ca5af5342d558030b5
| 551,662
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.