content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def solve(task: str) -> int:
"""Sum all digits that match the halfway around digit in the list."""
result = 0
task = task.strip()
shift = len(task) // 2
for i, digit in enumerate(task):
if digit == task[(i + shift) % len(task)]:
result += int(digit)
return result
|
ef8f7a524d66f6db03039d48e171be9dce2e1377
| 373,471
|
def get_lhc_sequence_filename_and_bv(beam: int, accel: str = 'lhc'):
""" Return the default sequence filename, the sequence name and the bv-flag
for the given beam.
Args:
beam (int): beam to use.
accel (str): accelerator name ('lhc' or 'hllhc').
"""
as_built = '_as-built' if accel.lower() == 'lhc' else ''
seq_file = f"lhc{as_built}.seq"
seq_name = f"lhcb{beam}"
bv_flag = -1 if beam == 2 else 1
if beam == 4:
seq_file = f"lhcb4{as_built}.seq"
seq_name = f"lhcb2"
return seq_name, seq_file, bv_flag
|
1e4a9fec3efd4b59db012422175ed0b416c4729b
| 631,218
|
def get_boundingbox(face, width, height, scale=1.3, minsize=None):
# Reference: https://github.com/ondyari/FaceForensics
"""
Expects a dlib face to generate a quadratic bounding box.
:param face: dlib face class
:param width: frame width
:param height: frame height
:param scale: bounding box size multiplier to get a bigger face region
:param minsize: set minimum bounding box size
:return: x, y, bounding_box_size in opencv form
"""
x1 = face.left() # Taking lines numbers around face
y1 = face.top()
x2 = face.right()
y2 = face.bottom()
size_bb = int(max(x2 - x1, y2 - y1) * scale) # scaling size of box to 1.3
if minsize:
if size_bb < minsize:
size_bb = minsize
center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2
# Check for out of bounds, x-y top left corner
x1 = max(int(center_x - size_bb // 2), 0)
y1 = max(int(center_y - size_bb // 2), 0)
# Check for too big bb size for given x, y
size_bb = min(width - x1, size_bb)
size_bb = min(height - y1, size_bb)
return x1, y1, size_bb
|
6cd8e8c5ed834ba432054226f24ec2b8c1b2fe5c
| 150,656
|
import struct
def _pack_mac(mac_addr):
"""Pack a MAC address (00:00:00:00:00:00) into a 6 byte string."""
fields = [int(x, 16) for x in mac_addr.split(':')]
return struct.pack('!6B', *fields)
|
cd242e6327c71c96e1498327a7e14880f4977f03
| 257,447
|
def open_file(filename: str):
""" Simple function to open a file """
text = open(filename, 'r')
return text.read()
|
35cf180e9259a1fafb6c27511cdd7b0670f892e5
| 568,516
|
def get_unique(items):
""" Get the unique elements of a string list """
unique = list(set(items))
try:
unique = sorted(unique)
except TypeError:
pass
return unique
|
bbb7ed5a3eb0390e693233620e33f0353a13d219
| 254,941
|
def little_endian(num):
"""returns a string with the hexadecimal little endian representation of the parameter"""
return " ".join([f"0x{i:X}" for i in num.to_bytes(4, "little")])
|
1d06921597604a1ce40f76dd8f46970cab4ea2ae
| 567,252
|
def parse_games_losses(r):
""" Used to parse the amount of games lost by a team.
"""
return int(r.get("wedVerloren", 0))
|
61f12cf213b40b3035dffa8ee2ef88f8fa99e2c5
| 589,417
|
def get_patches_per_dimension(dimension: int, size: int, stride: int) -> int:
"""
Returns the number of patches that can be created in the given dimension without going over
the dimension bounds.
"""
assert size % stride == 0
overlapping = (size // stride) - 1 if stride != size else 0
return (dimension // stride) - overlapping
|
d70c76ac9465105b5c94878f13f722ab58c3040d
| 514,696
|
def interact_features(feature_df, interact_list, drop_original_columns=True):
"""
This function create interactions between pairs of features
:param feature_df: a pandas dataframe
:param interact_list: list of lists or tuples with two strings each, representing columns to be interacted.
:param drop_original_columns: (bool) if set to True, columns to be interacted will be droped from the dataframe. note-
if set to true a column cannot appear in more then one interaction pair.
:return: DataFrame with the interactions columns, without the original columns.
"""
for features_pair in interact_list:
feature_0 = features_pair[0]
feature_1 = features_pair[1]
if (feature_0 not in feature_df.columns) or (feature_1 not in feature_df.columns):
print('Warning: one the features: ' + feature_0 + ',' + feature_1 + 'do not exists in the Data.')
else:
new_feature = feature_df[feature_0] * feature_df[feature_1]
if drop_original_columns:
feature_df = feature_df.drop(feature_0, axis=1)
feature_df = feature_df.drop(feature_1, axis=1)
feature_df[feature_0 + '_X_' + feature_1] = new_feature
return feature_df
|
97de16f1d9179d74fe2eaaa15e75ec903948814f
| 599,307
|
def _pretty_list(value: list, key_length: int) -> str:
"""Pretty prints a list. Automatically warps lines if line length of 88 is exceeded (-> black compatibility)."""
htchar = ' '
indent = 4
nlch = '\n' + htchar * indent
items = [repr(item) for item in value]
if (len(', '.join(items)) + key_length + 9) > 88:
return '[%s]' % (
nlch
+ htchar * indent
+ (',' + nlch + htchar * indent).join(items)
+ ','
+ nlch
)
return '[%s]' % (', '.join(items))
|
0bb251ad84e296308edb8d0fdd326a5cbaa809f4
| 269,158
|
def ravel_group_params(parameters_group):
"""Take a dict(group -> {k->p}) and return a dict('group:k'-> p)
"""
return {f'{group_name}:{k}': p
for group_name, group_params in parameters_group.items()
for k, p in group_params.items()}
|
4a768e89cd70b39bea4f658600690dcb3992a710
| 2,847
|
def drop_cols(_df, drop_list):
"""Drop columns from dataframe supplied in drop_list."""
try:
_df.drop(drop_list, axis=1, inplace=True)
except ValueError:
pass
return _df
|
0904c897af7beb05ffdde5e63882962f101c4cb6
| 496,488
|
import math
def hard_negative_mining(loss, labels, neg_pos_ratio=3):
"""
用于训练过程中正负例比例的限制.默认在训练时,负例数量是正例数量的三倍
Args:
loss (N, num_priors): the loss for each example.
labels (N, num_priors): the labels.
neg_pos_ratio: 正负例比例: 负例数量/正例数量
"""
pos_mask = labels > 0
num_pos = pos_mask.long().sum(dim=1, keepdim=True)
num_neg = num_pos * neg_pos_ratio
loss[pos_mask] = -math.inf # 无穷
# 两次sort 找出元素在排序中的位置
_, indexes = loss.sort(dim=1, descending=True) # descending 降序 ,返回 value,index
_, orders = indexes.sort(dim=1)
neg_mask = orders < num_neg # loss 降序排, 背景为-无穷, 选择排前num_neg的 负无穷,也就是 背景
return pos_mask | neg_mask
|
3b2e38ab2b0bbd9732fceafdfd023ea220b3c5eb
| 706,894
|
def jenkins_api_query_build_statuses(jenkins_url):
"""Construct API query to Jenkins (CI)."""
return "{url}/api/json?tree=builds[result]".format(url=jenkins_url)
|
eb8625ba17dfb6f630701f4356cbcce6a6d39265
| 627,674
|
from typing import Sequence
def convert_bitstring_to_int(bitstring: Sequence[int]) -> int:
"""Convert a bitstring to an integer.
Args:
bitstring (list): A list of integers.
Returns:
int: The value of the bitstring, where the first bit in the least
significant (little endian).
"""
return int("".join(str(bit) for bit in bitstring[::-1]), 2)
|
a94fe0db9e89678e276ae7a426124cbeb5c495bf
| 53,408
|
def Explode(isc_string):
"""Explodes isc file into relevant tokens.
Inputs:
isc_string: String of isc file
Outputs:
list: list of isc file tokens delimited by brackets and semicolons
['stanza1 "new"', '{', 'test_info', ';', '}']
"""
str_array = []
temp_string = []
prev_char = ''
for char in isc_string:
if( char in ['\n'] ):
continue
if( char in ['{', '}', ';'] ):
if( ''.join(temp_string).strip() == '' ):
str_array.append(char)
else:
str_array.append(''.join(temp_string).strip())
str_array.append(char)
temp_string = []
else:
temp_string.append(char)
prev_char = char
return str_array
|
e21f8f567f38c557b851c4a9bab08db01d421fb0
| 132,786
|
def transform_item(item):
"""Transform a single Crossref Metadata JSON value.
:param item: a JSON value.
:return: the transformed item.
"""
if isinstance(item, dict):
new = {}
for k, v in item.items():
# Replace hyphens with underscores for BigQuery compatibility
k = k.replace("-", "_")
# Get inner array for date parts
if k == "date_parts":
v = v[0]
if None in v:
# "date-parts" : [ [ null ] ]
v = []
elif k == "award":
if isinstance(v, str):
v = [v]
new[k] = transform_item(v)
return new
elif isinstance(item, list):
return [transform_item(i) for i in item]
else:
return item
|
dbe3ffae5e2357efd6a028cc04a427aba3b6bf19
| 165,311
|
import math
def super_smoother(data, length):
"""Python implementation of the Super Smoother indicator created by John Ehlers
Arguments:
data {list} -- list of price data
length {int} -- period
Returns:
list -- super smoothed price data
"""
ssf = []
for i, _ in enumerate(data):
if i < 2:
ssf.append(0)
else:
arg = 1.414 * 3.14159 / length
a_1 = math.exp(-arg)
b_1 = 2 * a_1 * math.cos(4.44/float(length))
c_2 = b_1
c_3 = -a_1 * a_1
c_1 = 1 - c_2 - c_3
ssf.append(c_1 * (data[i] + data[i-1]) / 2 + c_2 * ssf[i-1] + c_3 * ssf[i-2])
return ssf
|
e392d6199b399e0dbc3fec1c4352a0d639d77bb0
| 520,560
|
import re
import string
def rm_word_all_punct(dfcol):
""" Remove words that are entirely punctuation """
punct = re.escape(string.punctuation)
ss = f"((?<=\s)|^)([{punct}]+)((?=\s)|$)"
return dfcol.str.replace(ss, r'', regex=True)
|
2ad82922b01da11466e2b77ed9980ce83de117fe
| 544,405
|
def PatternStrToList(pattern):
"""Return a list of integers for the given pattern string.
PatternStrToList('531') -> [5, 3, 1]
"""
return [ord(p)-ord('0') for p in pattern]
|
8cf6a03ac3d0e3cecd45c2f996614a7ebb0948c8
| 220,586
|
def is_annotation_size_unusual(annotation, minimum_size, minimum_aspect_ratio, maximum_aspect_ratio):
"""
Checks if object described by annotation has unusual size - is too small or has unusual aspect ratio
:param annotation: net.utilities.Annotation instance
:param minimum_size: int, minimum size object must have to be considered normal
:param minimum_aspect_ratio: float, minimum aspect ratio object must have to be considered normal.
Both width to height and height to width ratios are tested against this criterion
:param maximum_aspect_ratio: float, maximum aspect ratio object must have to be considered normal.
Both width to height and height to width ratios are tested against this criterion
:return: bool, True if object size is unusual, False otherwise
"""
if annotation.width < minimum_size or annotation.height < minimum_size:
return True
if annotation.aspect_ratio < minimum_aspect_ratio or 1 / annotation.aspect_ratio < minimum_aspect_ratio:
return True
if annotation.aspect_ratio > maximum_aspect_ratio or 1 / annotation.aspect_ratio > maximum_aspect_ratio:
return True
return False
|
d9f62e3600faeee0662d29aaba4d0d26a6b9252f
| 679,746
|
def qualify(path: str) -> str:
"""Add the scheme to a file path, if required."""
if path.startswith("/"):
return f"file://{path}"
else:
return path
|
d84925b381913c8502b38e3e549cde39766e7926
| 400,267
|
from pathlib import Path
def filter_extensions(files, extensions):
"""Filter files by extensions."""
extensions = {f".{ext}" for ext in extensions}
return [x for x in files if Path(x).suffix in extensions]
|
2acd9f79efdef3a0855c78598dbd906e5eee2b21
| 89,070
|
def get_load_dur_curve_building(building, get_therm=True, with_dhw=False):
"""
Returns load duration power curve of building object.
Parameters
----------
building : object
Building object of pycity (should have apartment with power curves)
get_therm : bool, optional
Defines if thermal or electrical load duration curve should be used
(default: True)
True - Return thermal power
False - Return electrical power
with_dhw : bool, optional
Defines if domestic hot water (dhw) should be included (only relevant,
if get_therm == True).
(default: False)
True - Return space heating power only
False - Return space heating and hot water power
Returns
-------
load_dur_curve : np.array
Duration load curve array (power sorted in descending order)
"""
if get_therm: # Thermal power
power_curve = building.get_space_heating_power_curve()
if with_dhw:
power_curve += building.get_dhw_power_curve()
else: # Electrical power
power_curve = building.get_electric_power_curve()
# Sort descending
power_curve.sort()
load_dur_curve = power_curve[::-1]
return load_dur_curve
|
b7fb6424270d078c891bf049b6a487e874b59e02
| 321,687
|
def phase_LogLinear(phase, slope=0.04):
"""A logLinear phase function, roughly appropriate for cometary nuclei.
An H-G phase function is likely a better approximation.
Parameters
----------
phase : float or array
Phase angle (degrees)
slope : float, optional
The slope for the phase function. Default 0.04.
Returns
-------
phi : float or array
Phase function evaluated at phase
"""
return 10**(-0.4 * slope * phase)
|
1c5cdbf4a41387244d38a0fde368af3ecf224f52
| 26,169
|
def MakePublic(curve, sk):
"""
Make a public key (curve point) out of a private key.
"""
return curve.G * sk
|
32c54ce6d858678327cbcf190a30ba76a015723c
| 448,949
|
def get_index_size_in_kb(opensearch, index_name):
"""
Gets the size of an index in kilobytes
Args:
opensearch: opensearch client
index_name: name of index to look up
Returns:
size of index in kilobytes
"""
return int(
opensearch.indices.stats(index_name, metric='store')['indices']
[index_name]['total']['store']['size_in_bytes']) / 1024
|
a5a339d5055e66e007caa82f7a70cb2cf4d84e6a
| 661,374
|
def format_errors(errors):
"""Format serializer errors to conform to our messaging format. (ie, sending a list of
messages or a single message under 'success', 'info', 'warning', or 'failure').
:param errors: An error dictionary as produced by rest_framework serializers.
:returns: A list of messages."""
out_errors = []
for key in errors:
for msg in errors[key]:
if key != 'non_field_errors':
out_errors.append('{}: {}'.format(key, msg))
else:
out_errors.append(msg)
return out_errors
|
f433c59bccf8576e0720308550f00be368887e5e
| 472,242
|
def get_nth_digit(N, n):
"""
return the nth digit from an N digit number
>>> get_nth_digit(12345, 3)
4
>>> get_nth_digit(12345, 7)
Traceback (most recent call last):
...
IndexError: string index out of range
"""
return int(str(N)[n])
|
25c01c14589fb091154e8509a84f98811946938f
| 700,747
|
import json
def load_json_data(json_data, encoding='utf-8'):
"""Load JSON contents from binary data.
Parameters
----------
json_data : bytes
Binary data encoding JSON contents.
encoding : str (optional, default 'utf-8')
Encoding that was used.
Returns
-------
contents : dict
JSON contents.
"""
return json.loads(json_data.decode(encoding))
|
be1e9d9a1feab3d07247ab2990c9f4bbf898f1da
| 76,275
|
def find_layer_idx(model, layer_name):
"""Looks up the layer index corresponding to `layer_name` from `model`.
Args:
model: The `keras.models.Model` instance.
layer_name: The name of the layer to lookup.
Returns:
The layer index if found. Raises an exception otherwise.
"""
layer_idx = None
for idx, layer in enumerate(model.layers):
if layer.name == layer_name:
layer_idx = idx
break
if layer_idx is None:
raise ValueError("No layer with name '{}' within the model".format(layer_name))
return layer_idx
|
ab5c8bcde11e22aa0081a2bb60c9b99c324b6525
| 676,058
|
def get_late_roman(lang='both'):
"""Access Late Roman period productions
Parameters
----------
lang : {'both', 'eng', 'cat'}
The language you need for the output
Returns
-------
productions : dict, list
Late Roman period productions. If lang='both', return dict, else return a list
"""
p = {'Late Roman amphorae (LRA)': 'Late Roman amphorae (LRA)',
'Terra Sigillata - Hispanic': 'Terra Sigillata - Hispànica',
'Terra Sigillata - African A': 'Terra Sigillata - Africana A',
'Terra Sigillata - African C': 'Terra Sigillata - Africana C',
'Terra Sigillata - African D': 'Terra Sigillata - Africana D',
'DSP Derivee de sigille paleochretienne': 'DSP Derivee de sigille paleochretienne',
'Roman common ware': 'Ceràmica Comuna Romana',
'Roman oil lamp': 'Llàntia romana',
'Late Roman C': 'Late Roman C',
'Late Roman cooking ware': 'Late Roman cooking ware'
}
productions = None
if lang == 'both':
productions = p
elif lang == 'eng':
productions = list(p.keys())
elif lang == 'cat':
productions = list(p.values())
return productions
|
e2407e0bba5b7c9b84eb0b4a0fec4e9b82a216b1
| 566,175
|
def shorten_titl(str_in, nout=5):
"""Shorten the title with *, so it can still be matched by
glob"""
if len(str_in) > nout * 2:
str_out = str_in[:5] + '*' + str_in[-5:]
else:
str_out = str_in
return str_out
|
19fd358ba94646f076e8795a86eba7568705c475
| 67,463
|
def dist_forward(distribution, x):
"""
Forward pass with an arbitrary PyTorch distribution.
Args:
distribution: PyTorch base distribution which is used to compute the log probabilities of x.
x: Input to compute the log probabilities of.
Shape [n, d].
Returns:
torch.Tensor: Log probabilities for each feature.
"""
# Make room for multiplicity of layer
# Output shape: [n, d, 1]
x = x.unsqueeze(2)
# Compute gaussians
# Output shape: [n, d, multiplicity]
x = distribution.log_prob(x)
return x
|
7072174b0f6e4abb99c45ea6deb5127ffc47ff9b
| 328,743
|
def sum(P):
""" Sum of the real vector P """
sumP = 0.0
for i in range(len(P)): sumP += P[i]
return (sumP)
|
5dc09f517c24040e972552f83810387c01c7bb7f
| 197,228
|
def read_words(words_file):
""" (file open for reading) -> list of str
Return a list of all words (with newlines removed) from open file
words_file.
Precondition: Each line of the file contains a word in uppercase characters
from the standard English alphabet.
"""
wordlist = []
for line in words_file:
wordlist.append(line.rstrip('\n'))
return wordlist
|
29aa1e468e905cd69852d10f397a41136e87f515
| 407,017
|
def commute(self):
"""Switch states between `True` and `False`"""
self.flag = not self.flag
return self.flag
|
51570c04bd6da684cc66e222c8957eb908df2626
| 505,516
|
import requests
def get_json(name, *args, **kwargs):
"""Retrieve JSON from a (REST) API server after checking correct response."""
r = requests.get(*args, **kwargs)
assert r.ok, "%s access failed: %s" % (name, r.reason)
return r.json()
|
59dc53615b7ca6e48d0d9a83536ea9ca8d81577f
| 623,103
|
def map_quicktree_dirs(struct):
"""Maps ``struct.dirs`` to alphanumeric keys.
``struct.dirs`` is mapped in alphanumeric order. Keys are selected
for assignment in alphanumeric order as well, and are looped over.
:param QuickTreeStruct struct:
:return: ``struct.dirs`` mapped to alphanumeric keys
:rtype: dict[str, list[str]]
"""
ret_keys = [
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d",
"e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r",
"s", "t", "u", "v", "w", "x", "y", "z"
]
ret = {x: [] for x in ret_keys}
sorted_dirs_list = sorted(struct["dirs"])
for i in range(len(sorted_dirs_list)):
ret_key = ret_keys[i % len(ret_keys)]
ret[ret_key] += [sorted_dirs_list[i]]
return ret
|
311d7cc4f7e69f530b52b4078470bef10246e323
| 286,350
|
def is_magic_choice(attr_name: str) -> bool:
"""Determine iff attr_name meets magic choice formatting requirements.
This is a helper for MagicKind metaclass that determines if an attribute
name should be treated as a user defined magic-value choice
Args:
attr_name: name of an attribute
Returns:
True iff all the following are true about attr_name:
* Is a valid python identifier.
* Is upper case.
* Does not begin with an underscore.
"""
return (
attr_name.isupper()
and attr_name.isidentifier()
and not attr_name.startswith("_")
)
|
b30ae7fdf439db85255dac70d41d7803b6d2d091
| 550,436
|
def polygon_to_lists(poly, swap_x_y=False, normalize=False, img_width=1, img_height=1, as_string=False):
"""
Turns a polygon into two lists, one with xs and one with ys. Coordinates can be normalized.
:param poly: the polygon to process, array of (x,y) pairs
:type poly: np.ndarray
:param swap_x_y: whether to swap x and y
:type swap_x_y: bool
:param normalize: whether to return normalized coordinates (requires image width and height parameters)
:type normalize: bool
:param img_width: the image width to use for normalizing the coordinates
:type img_width: int
:param img_height: the image height to use for normalizing the coordinates
:type img_height: int
:param as_string: whether to return the values as string or float
:type as_string: bool
:return: tuple of one list containing all xs and one containing all ys
:rtype: tuple
"""
px = []
py = []
for p in poly:
if swap_x_y:
x = p[1]
y = p[0]
else:
x = p[0]
y = p[1]
if normalize:
x = x / img_width
y = y / img_height
if as_string:
x = str(x)
y = str(y)
px.append(x)
py.append(y)
return px, py
|
64a51e07dbfb5420eb664e8cb9742e15b026fc2c
| 416,272
|
def prune_empty(row):
"""
prune attributes whose value is None
"""
new_row = {}
for k in row:
if row[k]:
new_row[k] = row[k]
return new_row
|
7d88b0c751838ac7a5424a7d44457302b8d45a81
| 542,296
|
def grab_color(ims, color=None):
""" Grab R,G,or B from RGB image or return original if None """
if color == "red":
t_ims = ims[..., 0]
elif color == "green":
t_ims = ims[..., 1]
elif color == "blue":
t_ims = ims[..., 2]
else:
t_ims = ims
return t_ims
|
a5eb4a35c47775b4c62c4d2f3dac1ea7e74f0736
| 472,146
|
def filter_by_year(statistics, year, yearid):
"""
Inputs:
statistics - List of batting statistics dictionaries
year - Year to filter by
yearid - Year ID field in statistics
Outputs:
Returns a list of batting statistics dictionaries that
are from the input year.
"""
table = []
for dit in statistics:
if dit[yearid] == str(year):
table.append(dit)
return table
|
84adfe5ccf5f8633a622e057dd029db6cd37f44b
| 145,578
|
def key_number_to_mode_accidentals(key_number):
"""Converts a key number to number of accidentals and mode.
Parameters
----------
key_number : int
Key number as used in ``pretty_midi``.
Returns
-------
mode : int
0 for major, 1 for minor.
num_accidentals : int
Number of accidentals.
Positive is for sharps and negative is for flats.
"""
if not ((isinstance(key_number, int) and
key_number >= 0 and
key_number < 24)):
raise ValueError('Key number {} is not a must be an int between 0 and '
'24'.format(key_number))
pc_to_num_accidentals_major = {0: 0, 1: -5, 2: 2, 3: -3, 4: 4, 5: -1, 6: 6,
7: 1, 8: -4, 9: 3, 10: -2, 11: 5}
mode = key_number // 12
if mode == 0:
num_accidentals = pc_to_num_accidentals_major[key_number]
return mode, num_accidentals
elif mode == 1:
key_number = (key_number + 3) % 12
num_accidentals = pc_to_num_accidentals_major[key_number]
return mode, num_accidentals
else:
return None
|
9d1fd21f5fb627f9218f4ccfbe556b962a05dbbf
| 485,144
|
def askHowManyLoops(game):
"""
Asks for an integer of how many times would you like to play {game} and then returns it.
"""
while True:
try:
loop_times = int(input(f"\nHow many times would you like to play {game}? "))
return loop_times
except ValueError:
print("Incorrect response, try again."); continue
|
9802bbf6fb38d7b4f46dfc64076b0fdcf99e22ea
| 217,721
|
def batch_files(pool_size, limit):
""" Create batches of files to process by a multiprocessing Pool """
batch_size = limit // pool_size
filenames = []
for i in range(pool_size):
batch = ['numbers/numbers_%d.txt' %
j for j in range(i*batch_size, (i+1)*batch_size)]
filenames.append(batch)
return filenames
|
509b909ffed6a2eb1f71798cdaa38ebe885cc9a6
| 558,745
|
def create_unbroadcast_axis(shape, broadcast_shape):
"""Creates the reduction axis for unbroadcasting.
Args:
shape: A list. The shape after the broadcast operation.
broadcast_shape: A list. The original shape the array being unbroadcast
had.
Returns:
A list. The axes along which the array needs to be reduced. These axes will
be distributed evenly into the original shape.
"""
return tuple(
-(1 + i)
for i in range(len(broadcast_shape))
if i >= len(shape) or broadcast_shape[-(1 + i)] > shape[-(1 + i)])
|
2758f1f1b993dfa7bdba10343cc9afde4cfcf38e
| 684,249
|
def get_atom_num(ndx_file: str) -> int:
"""Computes number of atoms in the particular index file.
Args:
:param str ndx_file: .ndx - index of the protein atoms of the current conformation.
Returns:
:return: number of atoms in the .ndx file.
:rtype: int
"""
with open(ndx_file, 'r') as index_file:
index_file.readline() # first line is the comment - skip it
indices = index_file.read().strip()
elems = indices.split()
atom_num = len(elems)
return atom_num
|
cad487b5a01318a3159ce4787beaab5355cb0dea
| 561,295
|
def parse_entry(entry):
""" Separates an entry into a numeric value and a tied/fixed
parameter, if present.
"""
if entry.startswith('nan'):
val = float(entry[:3])
par = entry[3:]
else:
i = -1
while not entry[i].isdigit(): i -= 1
if i != -1:
val = float(entry[:i+1])
par = entry[i+1:]
else:
val = float(entry)
par = ''
return val,par
|
04543a118a3859fbc4e7aff6887bdb787204cbf3
| 391,464
|
def make_kms_map(map_string):
"""Convert a string into a map."""
# The one line version:
# dict({tuple(k.split(':')) for k in [i.strip() for i in m.split(',')]})
result = dict()
# split string by comma and strip
lines = [i.strip() for i in map_string.split(",")]
for line in lines:
# split into key/value pairs and store
k, v = line.split(":")
result[k] = v
return result
|
10405d9e11f3ae7e262c0ceb4bedfa2f407a0ec0
| 687,991
|
def ret_reg_name(p):
"""
Returns the name of the return register
:param p: the project
:return: the name of the return register
"""
return p.arch.register_names[p.arch.ret_offset]
|
4bc211ee7603d393a687d0f1db45c7d26a983acb
| 410,818
|
def error_is_missing_assignment_function(error):
"""
Returns True if the error is missing function for an assignment in the
students code.
"""
_, value, tb = error
if "module 'exam' has no attribute" in str(value):
while tb.tb_next:
tb = tb.tb_next
filename = tb.tb_frame.f_code.co_filename.split("/")[-1]
if filename == "test_exam.py":
return True
return False
|
5221e40cbd595afb72e00c7a6562b7ea7f500fb0
| 249,031
|
import torch
def rerank_beams_v2(beams, scores):
"""
beams: [batch, beam, len]
scores: [batch, beam], negative logprobs
"""
_, reranked_ids = torch.sort(scores, dim=1, descending=False) # [batch, beam]
tiled_reranked_ids = reranked_ids.unsqueeze(-1).repeat(1, 1, beams.size(-1))
reranked_beams = torch.gather(beams, dim=1,
index=tiled_reranked_ids)
return reranked_beams
|
fd20cbeeda5404000e4ac2beac8b7fc57aec05e6
| 414,205
|
def null_out_formatter(feats, out_features, _out, _nullvalue):
"""Function which contains the out formatting. It has to deal with
aggregated and point data.
Parameters
----------
feats: list of dicts [iss_i]{feats} or np.array (iss_i, feats_n)
the features information.
out_features: list of featurenames
the featurenames.
_out: optional str ['ndarray', 'dict']
the type of output desired.
_nullvalue: float
the nullvalue desired for a variable.
Returns
-------
feats_o: np.array(iss_i, feats_n) or list of dicts [iss_i]{feats}
the formatted features.
"""
return feats
|
bebdf988b2374f2149f0e1eee5a50cc796cbbd7c
| 402,240
|
import socket
from typing import List
def receive_data(*, from_socket: socket.socket,
from_timeout=2) -> bytes:
"""
Centralised fuction to handle receiving one or more packet buffers from TCP socket
Args:
from_socket:
Socket sending stream to this instance.
from_timeout:
Set timeout for from_socket
Returns:
Complete binary stream from socket
"""
from_socket.settimeout(from_timeout)
fragments: List[bytes] = []
try:
stream = from_socket.recv(4096)
fragments.append(stream)
while True:
if len(stream) < 4096:
break
else:
stream = from_socket.recv(4096)
fragments.append(stream)
except TimeoutError:
pass
return b''.join(fragments)
|
d6577ba5048fe86d33ce3657f4b9741f7324ca24
| 336,137
|
import json
def load_json(json_file):
"""
Opens json-file and returns it as dictionary
:param json_file: path to json-file
:type json-file: string
:returns: the content of the json file as dictionary
"""
with open(json_file) as infile:
content = json.load(infile)
return content
|
211623ba735fcc9bbc624814e9d1712eb9f156a1
| 676,527
|
def num_range(num):
"""
Use in template language to loop through numberic range
"""
return range(num)
|
7b66e4ffd264ea7b49850a9300c3a6c80282fce1
| 708,718
|
import shutil
def zip_up_dir(folder_to_zip, zip_endpoint):
"""
Creates a .zip file from a directory.
Args:
folder_to_zip (string): The location of the directory to create a zip file from.
zip_endpoint (string): The place to put the finished zip.
Returns:
string: The location of the final .zip file.
"""
shutil.make_archive(zip_endpoint, 'zip', folder_to_zip)
return zip_endpoint + '.zip'
|
58bdf50c474097b3f341b74c5ca63a4f1c9ec712
| 215,447
|
def dtype_to_field_type(ty):
"""Simple converter that translates Pandas column types to data types for
Draco.
"""
if ty in ["float64", "int64"]:
return "number"
elif ty in ["bool"]:
return "boolean"
elif ty in ["object"]:
return "string"
elif ty in ["datetime64[ns]"]:
return "datetime"
else:
raise ValueError(f"unsupported type {ty}")
|
9bfc3e29d5b482ad106b8a98703c99d63a6638fe
| 435,384
|
def difference(array, *lists):
"""Creates a list of list elements not present in the other lists.
Args:
array (list): List to process.
lists (list): Lists to check.
Returns:
list: Difference of the lists.
Example:
>>> difference([1, 2, 3], [1], [2])
[3]
.. versionadded:: 1.0.0
"""
return (list(difference(set(array).difference(lists[0]),
*lists[1:])) if lists
else array)
|
7b56db50e2844244fc2ce6d884d2ed26ac35471a
| 589,888
|
def get_nominal_genes(gene_to_vegasp, cutoff=0.05):
"""
Get nominally significant genes at the cutoff threshold
"""
genes = {gene for gene, vegasp in gene_to_vegasp.iteritems() if vegasp <= cutoff}
return genes
|
0f117534ba62440efa81181916e0db0d3bcdfc60
| 466,908
|
def filter_contigs(all_contigs, ancient_contigs):
"""Filter contigs if in ancient contigs
Args:
all_contigs(dict): fasta dict of contigs, seqname as key, sequence as value
ancient contigs(list): list of ancient contigs names
Returns:
(dict): ancient contigs, seqname as key, sequence as value
"""
a_contigs = {}
for c in all_contigs:
cname = c.split()[0][1:]
if cname in ancient_contigs:
a_contigs[c] = all_contigs[c]
return(a_contigs)
|
671a5c9908b3c91023c98892f1ad4e4c6249f6f8
| 319,670
|
from typing import OrderedDict
def format_errors(err, mode=1):
"""From error dictionary *err*, returns a LaTeX-formatted string,
after handling None entries.
"""
onedecimal = r"""{0:8.1f}"""
twodecimal = r"""{0:8.2f}"""
threedecimal = r"""{0:12.3f}"""
fourdecimal = r"""{0:12.4f}"""
shortblank = r"""{0:8s}""".format('')
longblank = r"""{0:12s}""".format('')
if mode == 1:
me = ' ----' if err['me'] is None else '%+.2f' % (err['me'])
stde = '----' if err['stde'] is None else '%.2f' % (err['stde'])
mae = ' ----' if err['mae'] is None else '%6.2f' % (err['mae'])
mape = ' ---- ' if err['mape'] is None else '%6.1f\%%' % (100 * err['mape'])
mapbe = ' ---- ' if err['mapbe'] is None else '%6.1f\%%' % (100 * err['mapbe'])
mapce = ' ---- ' if err['mapce'] is None else '%6.1f\%%' % (100 * err['mapce'])
text = """$\{%s; %s\}$ %s %s %s""" % \
(me, stde, mae, mape, mapce)
return text
if mode == 2:
sdict = OrderedDict()
for lbl in ['pexe', 'nexe', 'maxe', 'mine', 'me', 'mae', 'rmse', 'stde']:
sdict[lbl] = ' ----' if err[lbl] is None else fourdecimal.format(err[lbl])
for lbl in ['pexpe', 'nexpe', 'maxpe', 'minpe', 'mpe', 'mape', 'rmspe', 'stdpe',
'pexpbe', 'nexpbe', 'maxpbe', 'minpbe', 'mpbe', 'mapbe', 'rmspbe', 'stdpbe',
'pexpce', 'nexpce', 'maxpce', 'minpce', 'mpce', 'mapce', 'rmspce', 'stdpce']:
sdict[lbl] = ' ----' if err[lbl] is None else threedecimal.format(100 * err[lbl])
text = """nex: {nexe}{nexpe}{nexpbe}{nexpce}\n""" \
"""pex: {pexe}{pexpe}{pexpbe}{pexpce}\n""" \
"""min: {mine}{minpe}{minpbe}{minpce}\n""" \
"""max: {maxe}{maxpe}{maxpbe}{maxpce}\n""" \
"""m: {me}{mpe}{mpbe}{mpce}\n""" \
"""ma: {mae}{mape}{mapbe}{mapce}\n""" \
"""rms: {rmse}{rmspe}{rmspbe}{rmspce}\n""" \
"""std: {stde}{stdpe}{stdpbe}{stdpce}\n""".format(**sdict)
return text
if mode == 3:
sdict = OrderedDict()
# shortblanks changed from empty strings Aug 2015
for lbl in ['pexe', 'nexe', 'maxe', 'mine', 'me', 'mae', 'rmse', 'stde']:
sdict[lbl] = shortblank if err[lbl] is None else twodecimal.format(err[lbl])
for lbl in ['pexpe', 'nexpe', 'maxpe', 'minpe', 'mpe', 'mape', 'rmspe', 'stdpe',
'pexpbe', 'nexpbe', 'maxpbe', 'minpbe', 'mpbe', 'mapbe', 'rmspbe', 'stdpbe',
'pexpce', 'nexpce', 'maxpce', 'minpce', 'mpce', 'mapce', 'rmspce', 'stdpce']:
sdict[lbl] = shortblank if err[lbl] is None else onedecimal.format(100 * err[lbl])
return sdict
|
b12dc4ba94dbe501a732da76649741cc42c6454b
| 289,991
|
from typing import List
def return_right_point(points_list: List[tuple]) -> tuple:
"""
Returns the point, tuple such as (x,y) from points_list with maximal
x coordinate. When there are two points it returns the upper right point
"""
return max(points_list)
|
f36b5f1c15d18f8aa5110daeb3a521637ff760e0
| 561,444
|
def jaccard(words_1, words_2):
"""words_1 และ words_2 เป็นลิสต์ของคำต่าง ๆ (ไม่มีคำซ้ำใน words_1 และ ไม่มีคำซ้ำใน words_2)
ต้องทำ: ตั้งตัวแปร jaccard_coef ให้มีค่าเท่ากับ Jaccard similarity coefficient ที่คำนวณจากค่าใน
words_1 และ words_2 ตามสูตรที่แสดงไว้ก่อนนี้
Doctest :
>>> words_1 = ['x', 'y', 'z', 'xyz']
>>> words_2 = ['y', 'x', 'w']
>>> jaccard(words_1,words_2)
0.4
"""
# Check intersect
in_other = 0
for i in words_1:
if i in words_2:
in_other += 1
# Make list of total member in both list
both_list = []
for i in words_1:
if i not in both_list:
both_list.append(i)
for i in words_2:
if i not in both_list:
both_list.append(i)
jaccard_coef = in_other / len(both_list)
return jaccard_coef
|
0cc1c777f70360a4389558aef013116cc3bf50e7
| 675,242
|
def level_width(root, level):
"""Get level height: number of notes in a certain level."""
if root is None:
return 0
if level == 1:
return 1
else:
return (level_width(root.sx, level - 1) +
level_width(root.dx, level - 1))
|
3b5adbb9ef5eee6e7bdcf5ff80cbf4944e43225a
| 562,214
|
def getSlotInstance(
slot_name,
slot_io_name_2_dir_and_width : dict,
external_io_name_2_dir_and_width : dict,
inner_io_name_2_dir_and_width : dict):
"""
instantiate a slot in the wrapper
"""
instance = []
instance.append(f' (* black_box *) {slot_name} {slot_name}_U0 (')
for io, dir_width in slot_io_name_2_dir_and_width.items():
if io in external_io_name_2_dir_and_width:
instance.append(f' .{io}({io}),')
elif io in inner_io_name_2_dir_and_width:
if 'input' in dir_width:
instance.append(f' .{io}({io}_in),')
elif 'output' in dir_width:
instance.append(f' .{io}({io}_out),')
else:
assert False
else:
assert False
instance[-1] = instance[-1].replace(',', ');\n')
return instance
|
482848ada528eeed8955dea68e7d8876b14a838a
| 271,203
|
from pathlib import Path
from typing import List
def search_all_files(directory: Path) -> List[Path]:
"""
Search all compressed files in a directory
Args:
directory (Path): Directory to search.
Returns:
List[Path]: List of files ending in .gz in search directory.
"""
dirpath = Path(directory)
assert(dirpath.is_dir())
file_list = []
for file in dirpath.iterdir():
if file.is_file() and file.suffix == ".gz":
file_list.append(file)
else:
continue
return file_list
|
a9abb5150da9c5492f3acfd5b61609da889d49f6
| 111,410
|
import math
def calc_drag_force(v: float, d: float, Cd=0.5):
""" F_drag = 0.5 * Cd * ρ * v^2 * A
https://www.grc.nasa.gov/www/k-12/airplane/drageq.html
:param v: Velocity
:param d: Diameter
:param Cd: Drag coefficient
:return: The drag force
"""
direction = 1
if v > 0:
direction = -1
return direction * 0.5 * Cd * 1.205 * (v ** 2) * ((d / 2) ** 2 * math.pi)
|
1a2e1daefdc27f71d11cc22919c99dfec3bb3db1
| 457,204
|
def escape_xpath(string: str) -> str:
"""
Xpath string literals don't have escape sequences for ' and "
So to escape them, we have to use the xpath concat() function.
See https://stackoverflow.com/a/6938681
This function returns the *enclosing quotes*, so it must be used without them. For example:
dom.xpath(f"//title[text() = {se.easy_xml.escape_xpath(title)}]")
"""
if "'" not in string:
return f"'{string}'"
if '"' not in string:
return f'"{string}"'
# Can't use f-strings here because f-strings can't contain \ escapes
return "concat('%s')" % string.replace("'", "',\"'\",'")
|
f2ae2a5a14fe4f199a0c336e681344fff2a8c237
| 139,931
|
def replace_at(word, line, index):
""" Replace the text in-line.
The text in line is replaced (not inserted) with the word. The
replacement starts at the provided index. The result is cliped to
the input length
Arguments
---------
word : str
The text to copy into the line.
line : str
The line where the copy takes place.
index : int
The index to start coping.
Returns
-------
result : str
line of text with the text replaced.
"""
word_length = len(word)
result = line[:index] + word + line[(index + word_length):]
return result[:len(line)]
|
1587e97e4886d75d509ec6558aedd66759028b06
| 25,673
|
def repeat_word(word, num_repeats):
""" (str, int) -> str
Return word repeated num_repeats times.
>>> repeat_word('Marcia ', 3)
'Marcia Marcia Marcia'
>>> repeat_word('Hi', 0)
''
"""
return word * num_repeats
|
d5554f51f3f5154d54f706c6543f2f0684991872
| 314,656
|
def symbol_filename(name):
"""Adapt the name of a symbol to be suitable for use as a filename."""
return name.replace("::", "__")
|
e26966e133874c5704aa7232b4abb27d2ed286e7
| 652,127
|
def parse_line(text: str) -> str:
"""Parses one line into a word."""
text = text.rstrip()
if text[0] == "+":
return text[1:]
if text[0] == "@" or text[0] == "!" or text[0] == "$":
w = text.split("\t")[1]
if "#" in w:
return w.split("#")[0].rstrip()
else:
return w
raise ValueError("Invalid input: "+text)
|
44e8bd0defc071438aea15002d3e3c6838e61bfb
| 700,986
|
def _has_docker_file(repo, project_path):
"""Checks if project has a Dockerfile."""
return any(content_file.name == 'Dockerfile'
for content_file in repo.get_contents(project_path))
|
adb9eba9ad908811f8f5b04098ec6be44ef824f9
| 653,092
|
def dayAbbrevFormat(day: int) -> str:
"""
Formats a (0-6) weekday number as an abbreviated week day name, according to the current locale.
For example: dayAbbrevFormat(0) -> "Sun".
"""
days = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]
return days[day % 7]
|
9a306cfd24061eb5d0177508c528f7a91857acf5
| 497,761
|
import random
def cxSimulatedBinary(ind1, ind2, eta):
"""Executes a simulated binary crossover that modify in-place the input
individuals. The simulated binary crossover expects :term:`sequence`
individuals of floating point numbers.
:param ind1: The first individual participating in the crossover.
:param ind2: The second individual participating in the crossover.
:param eta: Crowding degree of the crossover. A high eta will produce
children resembling to their parents, while a small eta will
produce solutions much more different.
:returns: A tuple of two individuals.
This function uses the :func:`~random.random` function from the python base
:mod:`random` module.
"""
for i, (x1, x2) in enumerate(zip(ind1, ind2)):
rand = random.random()
if rand <= 0.5:
beta = 2. * rand
else:
beta = 1. / (2. * (1. - rand))
beta **= 1. / (eta + 1.)
ind1[i] = 0.5 * (((1 + beta) * x1) + ((1 - beta) * x2))
ind2[i] = 0.5 * (((1 - beta) * x1) + ((1 + beta) * x2))
return ind1, ind2
|
a056a041168b96d5469952d184c487c92e2d98c7
| 650,508
|
def get_prefrosh_and_adjacent(prefrosh_id, prefrosh_list):
"""Returns a prefrosh and the IDs of the neighboring two frosh in the provided list."""
idx, prefrosh = next(((idx, pf) for idx, pf in enumerate(prefrosh_list)
if pf['prefrosh_id'] == prefrosh_id))
prev_id = prefrosh_list[idx - 1]['prefrosh_id'] if idx > 0 else None
next_id = prefrosh_list[idx + 1]['prefrosh_id'] if idx < len(prefrosh_list) - 1 else None
return [prefrosh, prev_id, next_id]
|
b56199ad0bdb2ca532bdfcd456864ee59fdb90ba
| 73,645
|
def analytical_value_h_phi(distr, par, c):
""" Analytical value of the Phi entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par.a,
par.b in U[a,b].
c : float, >=1
Parameter of the Phi-entropy: phi = lambda x: x**c
Returns
-------
h : float
Analytical value of the Phi entropy.
"""
if distr == 'uniform':
a, b = par['a'], par['b']
h = 1 / (b-a)**c
else:
raise Exception('Distribution=?')
return h
|
0be52c4a254f08181f739415e6f04d117ca60910
| 306,244
|
import re
def filter_text(text):
""" filter the text: keep letters and numbers"""
filtered_text = re.findall('\w+', str(text), re.UNICODE)
return filtered_text
|
f27017558d7b014efb0606e7dddc5fd2ea69c129
| 590,180
|
def token_precision_recall(predicted_parts, gold_set_parts):
"""
Get the precision/recall for the given token.
:param predicted_parts: a list of predicted parts
:param gold_set_parts: a list of the golden parts
:return: precision, recall, f1 as floats
"""
ground = [tok.lower() for tok in gold_set_parts]
prediction = list(predicted_parts)
tp = 0
for subtoken in prediction:
if subtoken == "***" or subtoken is None:
continue # Ignore UNKs
if subtoken.lower() in ground:
ground.remove(subtoken.lower())
tp += 1
precision = float(tp) / len(predicted_parts)
recall = float(tp) / len(gold_set_parts)
if precision + recall > 0:
f1 = 2 * precision * recall / (precision + recall)
else:
f1 = 0.
return precision, recall, f1
|
7799c1a789741fa926d62068518a53119554d1bb
| 486,782
|
def FieldChoicesFromEnum(enum):
"""
Extracts (value, name) pairs from a protobuf enum for use as choices.
Args:
enum: A protocol buffer enum type.
Returns:
A list of (value, name) pairs, where name is the name of an enum entry
and value is the integer value associated with the enum entry.
"""
return [(v, n) for (n, v) in enum.items()]
|
2845f5556fe5e39c0d9c14600a0ad5f6eec41cd9
| 477,031
|
def mean(lyst):
"""Returns the mean of a list of numbers."""
sum = 0
for number in lyst:
sum += number
if len(lyst) == 0:
return 0
else:
return sum / len(lyst)
|
376fb0bb0335228eb963d31a779ee08017f66b56
| 531,138
|
def ProgressingPercentage(max_iter, i: int, next_step: int, step = 10):
"""
Function that shows the progressing percentage of an iterative process.
@param max_iter (int): Maximal number of interations
@param i (int): Current iteration
@param next_step (int): Next step of the percentage (set to 0 for the first iteration and then use the return parameter)
@param step (int, optional): Size of the step (should be a fraction of 100). Defaults to 10.
@returns int: The updated next step
"""
if i*100.0/(max_iter-1) >= next_step:
print("The progression is {}%".format(next_step))
return next_step + step
return next_step
|
41f98638f0eedd258fda14901bf31c04804bbaf6
| 208,505
|
def http_dump(fl):
"""Format fileobject wrapped in urllib.addinfourl as HTTP message string
and return.
"""
return "\r\n".join([
str(fl.code) +" "+ fl.msg,
"".join(fl.headers.headers),
fl.read().strip()])
|
b02b6eff172baed46d02a6b545d37306a05365ed
| 410,973
|
def read_vocab(file_path):
"""
Given a file, prepare the vocab dictionary where each line is the value and
(line_no - 1) is the key
"""
vocab = {}
with open(file_path, "r") as file_contents:
for idx, word in enumerate(file_contents):
vocab[idx] = word.strip()
return vocab
|
dd5efdf575e6b43dedbd1d4b6b474f3c6923e836
| 140,698
|
def parse_field(line, d):
""" Extract regular field & value from:
Destination directory = "R:\speed-test\big_files\"
Directories processed = 1
Total data in bytes = 527,331,269
...
"""
if " = " in line:
field, value = line.split(" = ")
d[field.strip()] = value.strip()
return d
|
0feb32c13f7f1e4278d2e1ff5176c5d957c144df
| 414,530
|
def list_in_list(a, l):
"""Checks if a list is in a list and returns its index if it is (otherwise
returns -1).
Parameters
----------
a : list()
List to search for.
l : list()
List to search through.
"""
return next((i for i, elem in enumerate(l) if elem == a), -1)
|
494d9a880bcd2084a0f50e292102dc8845cbbb16
| 4,280
|
def get_resource_timestamp(pefile_object):
"""
Retrieves timestamps from resource directory entries, if available.
:param pefile.PE pefile_object: pefile object.
:return: Recovered timestamp from PE resource table entry (if present)
:rtype: int
"""
timestamp = 0
if hasattr(pefile_object, 'DIRECTORY_ENTRY_RESOURCE'):
resourcedata = pefile_object.DIRECTORY_ENTRY_RESOURCE
timestamp = getattr(resourcedata.struct, 'TimeDateStamp', 0)
return timestamp
|
7e759f353476058d2dd2a0defd3b827fb862fec6
| 596,158
|
def find_max_weight(regions):
"""Find the maximal weight in the given regions"""
mw = 0
for r in regions:
mw = max(mw, r.profEntry.weight)
return mw
|
7c08f3334d1ec42400509df120dde95a1a05e26d
| 665,386
|
def split_box(low, high):
"""Split box into octants (lowest_corner, highest_corner) and return."""
half_length = (high[0] - low[0]) // 2
centre = tuple([coord + half_length for coord in low])
octants = [
(centre, high),
(low, centre),
((low[0], centre[1], low[2]), (centre[0], high[1], centre[2])),
((low[0], centre[1], centre[2]), (centre[0], high[1], high[2])),
((centre[0], centre[1], low[2]), (high[0], high[1], centre[2])),
((low[0], low[1], centre[2]), (centre[0], centre[1], high[2])),
((centre[0], low[1], low[2]), (high[0], centre[1], centre[2])),
((centre[0], low[1], centre[2]), (high[0], centre[1], high[2]))
]
return octants
|
3d39d24a976455e45598cc6ef60cec3a8ac48388
| 486,614
|
import struct
def Fbytes(f):
"""
Return bytes representation of float
"""
return struct.pack("f", f)
|
117fb86216ad6983851923ac9dbd0196cc29b92d
| 12,466
|
def _decode_instance(encoded_data, decoded_objects, data_to_decode):
""" Decode a data structure
Args:
encoded_data (:obj:`dict`, :obj:`list`, or scalar): data structure with
encoded objects
decoded_objects (:obj:`dict`): dictionary that maps the unique ids of
encoded objects to dictionaries that represent the decoded objects
data_to_decode (:obj:`list`): list of tuples of data structures that still
need to decoded. The first element represents the data structure that
needs to be decoded. The second element represents the object that will
represent the decoded data structure.
Returns:
:obj:`dict`, :obj:`list`, or scalar: decoded data structure
"""
if isinstance(encoded_data, dict) and '__type' in encoded_data:
obj_type = encoded_data.get('__type')
obj = decoded_objects.get(encoded_data['__id'], None)
if obj is None:
obj = {'__type': obj_type}
decoded_objects[encoded_data['__id']] = obj
data_to_decode.append((encoded_data, obj))
elif isinstance(encoded_data, list):
obj = []
data_to_decode.append((encoded_data, obj))
elif isinstance(encoded_data, dict):
obj = {}
data_to_decode.append((encoded_data, obj))
else:
obj = encoded_data
return obj
|
8e9cb5502aded89cc04268b3098cff9e25fb1a91
| 40,422
|
import sqlite3
def get_aircraft_registrant(database, mode_s_code_hex):
"""
Looks up an aircraft's registrant's name based on the mode_s_code_hex
in the FAA's database. Example: FEDERAL EXPRESS CORP (a FedEx plane)
Parameters:
1 - database file path
2 - mode_s_code_hex (must be all uppercase string)
Returns:
The aircraft's registrant
"""
# Connect to the database
conn = sqlite3.connect(database)
select_statement = (
'SELECT '
'IFNULL(faa_master.name, "REG?") '
'FROM faa_master '
'WHERE faa_master.mode_s_code_hex LIKE "' + mode_s_code_hex + '%";')
results = conn.execute(select_statement)
# Get the one and only row of the results set
result = results.fetchone()
conn.close()
# In the event that no results at all are returned
if result is not None:
# Create string of registrant name with trailing whitespace removed
registrant = str(result[0]).rstrip()
else:
registrant = "REG?"
return registrant
|
5b07ae5bf762fbcc335fa58aad3358f2d2bddfa0
| 550,278
|
def create_model_identifier(name, version):
"""Get a compatible string as a combination of name and version"""
new_name = "%s-%s" % (name, str(version).replace('.', '-'))
return new_name
|
3aec457ff6836b93293b1adcb9e26a5429cfff09
| 32,751
|
def int_64bit(num):
"""Return `num` as a list of bytes of its 64-bit representation."""
assert num >= 0, "Signed numbers are not supported"
return list(num.to_bytes(8, "little"))
|
76f2f7b094a3362f48b3a81f9677c2c6c8b231ca
| 160,636
|
def print_msg_closure(msg: str):
"""
Closure function that returns a function that prints a message.
:param str msg: Message to attach to print function.
"""
def printer():
print(msg)
return printer
|
4f142de4f7d2102269328641e443ae408171fe4e
| 75,123
|
import itertools
def _flatten(l):
"""Return a flattened shallow list.
Args:
l : list of lists
Returns:
list - concatenation of sublists of l
"""
return list(itertools.chain.from_iterable(l))
|
12d4386ffe3fd92081521d929a9712eadc383740
| 430,268
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.