content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def _extract_args(cmd_line, args):
"""
Extract specified arguments and sorts them. Also removes them from the
command line.
:param cmd_line: Array of command line arguments.
:param args: List of arguments to extract.
:returns: A tuple. The first element is a list of key, value tuples representing each arguments.
The second element is the cmd_line list without the arguments that were
extracted.
"""
# Find all instances of each argument in the command line
found_args = []
for cmd_line_token in cmd_line:
for arg in args:
if cmd_line_token.startswith("--%s=" % arg):
found_args.append(cmd_line_token)
# Strip all these arguments from the command line.
new_cmd_line = [
argument for argument in cmd_line if argument not in found_args
]
arguments = []
# Create a list of tuples for each argument we found.
for arg in found_args:
pos = arg.find("=")
# Take everything after the -- up to but not including the =
arg_name = arg[2:pos]
arg_value = arg[pos + 1:]
arguments.append((arg_name, arg_value))
return arguments, new_cmd_line
|
877ea4a56eaaa81f3b6ebbd6bd727adf91c6d614
| 598,193
|
def LongitudinalBounds(lng, num_tiles):
"""Return west/east bounds of tile containing given longitude.
Args:
lng: (float) Longitude in degrees.
num_tiles: (integer) Number of tiles in the tile grid.
Returns:
Tuple of (west, east) bounds.
"""
# Normalize to between -180 and 180 degrees longitude.
while lng < -180.0:
lng += 360.0
while lng >= 180.0:
lng -= 360.0
degrees_per_tile = 360.0 / num_tiles
x = int((lng + 180.0) / degrees_per_tile)
west = x * degrees_per_tile - 180.0
return (west, west + degrees_per_tile)
|
93d801df46d3f063d5a46eabcccd5254e1a9b991
| 201,565
|
from typing import List
def quality_string_to_array(quality_string: str) -> List[int]:
"""Returns the int array representation for the given quality string."""
return [ord(char) - 33 for char in quality_string]
|
ac92bf15ce0e00507d5a9081ab01c121d0aece09
| 373,741
|
def extract_commit_file_components(commit_files):
"""
Extract components from individual files to create lists for each component
:param commit_files: List of database file objects
:return: Dictionary with all the components from the files
"""
commit_file_components = {
'patches': [],
'additions': [],
'changes': [],
'deletions': [],
'filenames': [],
'statuses': []
}
for commit_file in commit_files:
commit_file_components['patches'].append(commit_file.patch)
commit_file_components['additions'].append(commit_file.additions)
commit_file_components['changes'].append(commit_file.changes)
commit_file_components['deletions'].append(commit_file.deletions)
commit_file_components['filenames'].append(commit_file.filename)
commit_file_components['statuses'].append(commit_file.status)
return commit_file_components
|
8d235c09317d7174dc6b3a9db3a12d8ecc989a45
| 138,351
|
def _convert_train_id_to_eval_id(prediction, train_id_to_eval_id):
"""Converts the predicted label for evaluation.
There are cases where the training labels are not equal to the evaluation
labels. This function is used to perform the conversion so that we could
evaluate the results on the evaluation server.
Args:
prediction: Semantic segmentation prediction.
train_id_to_eval_id: A list mapping from train id to evaluation id.
Returns:
Semantic segmentation prediction whose labels have been changed.
"""
converted_prediction = prediction.copy()
for train_id, eval_id in enumerate(train_id_to_eval_id):
converted_prediction[prediction == train_id] = eval_id
return converted_prediction
|
66b6bae48e39b833408e2a3ec776c1386fdf28eb
| 586,950
|
def _remove_dupes(ls: list) -> list:
"""Remove duplicate values from a list."""
try:
# Try the more efficient method first
return list(set(ls))
except TypeError:
# Fall back to a slower method
ret = []
for v in ls:
if v not in ret:
ret.append(v)
return ret
|
ceabf915565736f72b748c8ad8ebaf06ea667a33
| 595,768
|
from typing import List
def cyclic_sort(nums: List[int]) -> List[int]:
"""
Time Complexity: O(N)
Space Complexity: O(1)
Parameters
----------
nums : List[int]
Input array.
Returns
-------
nums : List[int]
The sorted array.
"""
i = 0
while i < len(nums):
j = nums[i] - 1
if nums[i] != nums[j]:
nums[i], nums[j] = nums[j], nums[i]
else:
i += 1
return nums
|
eff9fb949fb1afec872624096a2b6bcd52b3657e
| 526,269
|
import re
def slug(text):
"""
Convert text to a "slug". Used by
:py:attr:`redock.api.Container.ssh_alias`.
:param text: The original text, e.g. "Some Random Text!".
:returns: The slug text, e.g. "some-random-text".
"""
slug = re.sub('[^a-z0-9]+', '-', text.lower())
return slug.strip('-')
|
a86da4d4bd6326b207c2fa216e0afdc874448215
| 514,070
|
def alt_bases_from_indices(alt_allele_indices, alternate_bases):
"""Get alt allele bases based on their indices.
e.g. one alt allele: [0], ["C"] => "C"
or with two alt alleles: [0,2], ["C", "TT", "A"] => "C-A"
Args:
alt_allele_indices: list of integers. Indices of the alt alleles for a
particular example.
alternate_bases: list of strings. All alternate alleles for the variant.
Returns:
str. Alt allele(s) at the indices, joined by '-' if more than 1.
"""
alleles = [alternate_bases[i] for i in alt_allele_indices]
# Avoiding '/' to support use in file paths.
return '-'.join(alleles)
|
5e27f8092ad754f33294e7bd551b9ecc469cfbdf
| 439,195
|
def try_int(val, default=None):
"""Return int or default value."""
try:
return int(val)
except (ValueError, TypeError):
return default
|
8e60e701500c11197386e633470597891926a8c6
| 560,150
|
def get_user_roles_common(user):
"""Return the users role as saved in the db."""
return user.role
|
cf25f029325e545f5d7685e6ac19e0e09105d65a
| 704,544
|
def convert_value(value):
"""
Converts the input value (as a string) to its native type.
"""
# strings are treated separately in parse_card()
if value == "T":
value = True
elif value == "F":
value = False
elif "." in value:
value = float(value)
else:
value = int(value)
return value
|
7eb0444f6ad8c2b22127e9119c7fb8f08ebf1322
| 301,247
|
def calculate_time_duration(start, end):
"""
calculate how long between two times
Args:
start(datetime.datetime): the start time
end(datetime.datetime): the end time
Returns:
duration(dict): dict containing the duration in days, hours,
minutes and seconds
"""
timediff = end - start
totalseconds = timediff.total_seconds()
days, remainder = divmod(totalseconds, 86400)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
duration = {
'days': days, 'hours': hours, 'minutes': minutes,
'seconds': int(seconds)}
return duration
|
e09b8dc34f2c4228e5d17ca10ba56beae61767ef
| 294,092
|
def json_decomment(json, prefix='#', null=False):
"""
Remove any JSON object emember whose name begins with 'prefix'
(default '#') and return the result. If 'null' is True, replace
the prefixed items with a null value instead of deleting them.
"""
if type(json) is dict:
result = {}
for item in json.keys():
if item.startswith(prefix):
if null:
result[item] = None
else:
next
else:
result[item] = json_decomment(json[item], prefix=prefix,
null=null)
return result
elif type(json) is list:
result = []
for item in json:
result.append(json_decomment(item, prefix=prefix, null=null))
return result
else:
return json
|
cf26288369994ab00b1afdcb2da22423b9598377
| 105,100
|
def ident(x):
"""
Identity function - Just returns whatever is passed in.
'id' is a built-in function, in Python, which doesn't give what we
want.
"""
return x
|
249393e8ce774e4b2f915af2fa145943174c7ff9
| 139,685
|
def glyphNameToFileName(glyphName, glyphSet):
"""Default algorithm for making a file name out of a glyph name.
This one has limited support for case insensitive file systems:
it assumes glyph names are not case sensitive apart from the first
character:
'a' -> 'a.glif'
'A' -> 'A_.glif'
'A.alt' -> 'A_.alt.glif'
'A.Alt' -> 'A_.Alt.glif'
'T_H' -> 'T__H_.glif'
'T_h' -> 'T__h.glif'
't_h' -> 't_h.glif'
'F_F_I' -> 'F__F__I_.glif'
'f_f_i' -> 'f_f_i.glif'
"""
if glyphName.startswith("."):
# some OSes consider filenames such as .notdef "hidden"
glyphName = "_" + glyphName[1:]
parts = glyphName.split(".")
if parts[0].find("_")!=-1:
# it is a compound name, check the separate parts
bits = []
for p in parts[0].split("_"):
if p != p.lower():
bits.append(p+"_")
continue
bits.append(p)
parts[0] = "_".join(bits)
else:
# it is a single name
if parts[0] != parts[0].lower():
parts[0] += "_"
for i in range(1, len(parts)):
# resolve additional, period separated parts, like alt / Alt
if parts[i] != parts[i].lower():
parts[i] += "_"
return ".".join(parts) + ".glif"
|
3230b7ac0f185e02ecb4fcf12b55af0847f7243e
| 594,821
|
import ast
def attr_list(root, func_name):
"""
Extracts attributes from ast tree processing only func_name function or method
:param root: root node of ast tree
:param func_name: name of the function
:return: a list of attributes names
"""
atts = []
functions = sorted(
{node.name for node in ast.walk(root) if isinstance(node, ast.FunctionDef)}
)
for root in ast.walk(root):
if isinstance(root, ast.FunctionDef) and root.name == func_name:
for child in ast.walk(root):
if (
isinstance(child, ast.Attribute)
and isinstance(child.ctx, ast.Load)
and child.attr not in functions
):
atts.append(child.attr)
return atts
|
bf757a6ce5fe9f9c704bb33cdd1cbdbc1877fac7
| 439,829
|
def build_speech_response(title, ssml_output, plain_output):
"""Build a speech JSON representation of the title, output text, and end of session."""
# In this app, the session always ends after a single response.
return {
'outputSpeech': {
'type': 'SSML',
'ssml': ssml_output
},
'card': {
'type': 'Simple',
'title': title,
'content': plain_output
},
'shouldEndSession': True
}
|
2d38b9d0d8a261c6011eec3416227e972329de86
| 695,454
|
def remove_comment(string_entry):
"""
Takes a single line and removes and .ini type comments.
Also remove any spaces at the begining or end of a commented line
"""
result = string_entry
# Comment checking
for c in '#;':
if c in result:
result = result.split(c)[0]
return result
|
a924c31f40b9a37da1748fc170f605d178511cbc
| 482,976
|
def flux_to_total(cube):
"""Convert a flux (i.e. per second quantity) to total"""
assert 'days' in str(cube.coord('time').units)
time_span_days = cube.coord('time').bounds[:, 1] - cube.coord('time').bounds[:, 0]
time_span_seconds = time_span_days * 60 * 60 * 24
cube.data = cube.data * time_span_seconds
units = str(cube.units)
assert ('s-1' in units) or ('W' in units), 'input units must be a flux per second'
if 's-1' in units:
cube.units = units.replace('s-1', '')
elif 'W' in units:
cube.units = units.replace('W', 'J')
return cube
|
ca46a98bb235c30f49b2e46c7e54326f56eb9d9a
| 501,568
|
def generate_checksum(csum_type, file_path):
"""
generate a checksum for the hashlib checksum type.
:param csum_type: the hashlib compliant checksum type
:param file_path: the absolute path to the resource being hashed
:return: hexdigest of the checksum
"""
csum = csum_type()
if file_path is None:
print("ERROR: file_path is None")
return None
with open(file_path, "rb") as fd:
# process files in 1MB chunks so that large files won't cause excessive memory consumption.
chunk = fd.read(1024 * 1024)
while chunk:
csum.update(chunk)
chunk = fd.read(1024 * 1024)
return csum.hexdigest()
|
ceb4bf894f90494546fde4674cfb5de618d0354d
| 323,406
|
def bytescl(img, bottom, top):
"""
Scale a pixel image to limits (0, 1).
Keyword arguments:
img -- Original pixel image.
bottom -- Lower limit of img.
top -- Upper limit of img.
Output(s):
scl_img -- Scaled image with new limits 0(min) - 1(max).
"""
scl_img = (((top - bottom) * (img - img.min())) / (img.max() - img.min())) + bottom
return scl_img
|
e9b62e982920165be0a6897fb7f152c32737a14b
| 73,514
|
def is_validated(user):
"""Is this user record validated?"""
# An account is "validated" if it has the `validated` field set to True, or
# no `validated` field at all (for accounts created before the "account
# validation option" was enabled).
return user.get("validated", True)
|
c1ddfc52a62e71a68798dc07e7576a4ae42aa17f
| 1,562
|
def land_to_index(num):
"""
This function returns the (ilat,ilon) from land sequential number.
"""
#This is the last column
if num%720 == 0:
j = 719
i = num/720 - 1
#normal columns
else:
j = num%720 - 1
i = num/720
return (i,j)
|
61b867643e9a60c69b6babc722d7377b561441d0
| 216,846
|
import re
import logging
def extract_name(resume_segments):
"""
Find name in the string_to_search
:param resume_segments: Dictionary containing segmented resume data
:type resume_segments: Dictionary
:return: A string containing the name, or None if no name is found.
:rtype: str
"""
try:
string_to_search = resume_segments['contact_info']
name_pattern = re.compile(r"^([A-Za-z\u00E9-\u00F8\.-][\s]*)+$")
name = ''
for line in string_to_search:
if name_pattern.match(line):
name = line
break
return name
except Exception as e:
logging.error('Issue parsing name:: ' + str(e))
return None
|
60583d1b2fc5ffbe75f0d6b0d3b23b581dd0bc3a
| 507,730
|
def is_target_platform(ctx, platform):
"""
Determine if the platform is a target platform or a configure/platform generator platform
:param ctx: Context
:param platform: Platform to check
:return: True if it is a target platform, False if not
"""
return platform and platform not in ('project_generator', [])
|
7c48e886f15beba0f2350aee4a6b1a8aef408537
| 686,595
|
def fancy_prediction(classifier, input_str: str) -> str:
"""
Helper function to print out prediction results of a single input string.
The input string will be given a list of labels and their corresponding
chance that the label matches with the input string.
Parameters
----------
classifier:
The trained fasttext classifier used to predict the input string
input_str: str :
The input string to predict
Returns
-------
str
The well-formmated results of the prediction on the given input string.
"""
predictions = classifier.predict_proba(
[input_str], k=len(classifier.labels)
)
longest_label_len = max(map(len, classifier.labels))
predict_str = ""
for curr_string_prediction in predictions:
for label, prob in curr_string_prediction:
predict_str += f"{str(label).ljust(longest_label_len)}: {prob}"
predict_str += '\n'
predict_str += "Text:\n"
# tokens = input_str.split()
# if len(tokens) > 21:
# predict_str += ' '.join(tokens[:10]) + " ... " + ' '.join(tokens[-10:])
# else:
predict_str += input_str
return predict_str
|
8d4fdec8682f094058db5d267acfd7c732957a54
| 146,074
|
def absolutify(request, path):
"""Return the absolute URL of a path."""
return request.build_absolute_uri(path)
|
ea2064fe80e577635447ccf58c07414da0a4b5a8
| 169,009
|
def get_x( vx, vy, x1, y1, y_ref ):
"""
Helper function for draw_lines
Calculates 'x' matching: 2 points on a line, its slope, and a given 'y' coordinate.
"""
m = vy / vx
b = y1 - ( m * x1 )
x = ( y_ref - b ) / m
return x
|
fccc197a7521c486f69116355ee1535698fdd146
| 438,339
|
import io
import base64
def send_bytes(writer, filename, mime_type=None, **kwargs):
"""
Convert data written to BytesIO into the format expected by the Download component.
:param writer: a writer that can write to BytesIO
:param filename: the name of the file
:param mime_type: mime type of the file (optional, passed to Blob in the javascript layer)
:return: dict of data frame content (base64 encoded) and meta data used by the Download component
"""
data_io = io.BytesIO()
# Some pandas writers try to close the IO, we do not want that.
data_io_close = data_io.close
data_io.close = lambda: None
# Write data content to base64 string.
writer(data_io, **kwargs)
data_value = data_io.getvalue()
data_io_close()
content = base64.b64encode(data_value).decode()
# Wrap in dict.
return dict(content=content, filename=filename, mime_type=mime_type, base64=True)
|
4c497a0d43e58580d0f17840146b22825037ec24
| 604,469
|
def calculate_closest_pattern_dictionary(distances_dictionary):
"""
Calculates the closets pattern from a dictionary with the distances of
a point to a dictionary
:param distances_dictionary: a dictionary where each key is the index of a
fis point and the value is the distance to that fix point
:return:the pattern with the smallest distance
"""
return min(distances_dictionary, key=distances_dictionary.get)
|
5a2e42ae8b85e129ff97bc19bb839423ed6a1355
| 568,589
|
def format_as_string(string: str):
"""
Formats a given string in a different color using ANSI escape sequences
(see https://stackoverflow.com/a/287944/5299750) and adds double quotes
:param string: to be printed
"""
ansi_start = '\033[32m'
ansi_end = '\033[0m'
return f"{ansi_start}\"{string}\"{ansi_end}"
|
4ba55f4b35bf55e91085c49724d7dce81a8ed12a
| 640,902
|
def check_rank(shape, required_rank):
""" Check if the shape's rank equals the expected rank """
if isinstance(shape, tuple):
return len(shape) == required_rank
else:
return False
|
f38afd67c132aa713429e376a63eb0dd26dbe863
| 114,509
|
def is_abstract_class(cls):
""" Returns boolean telling whether given class is abstract or not.
A class is abstract if it has not implemented any abstractmethod or
abstractproperty of base classes.
"""
return bool(getattr(cls, "__abstractmethods__", False))
|
d89acf617b30e78c8eda7b91b6453044425f38dd
| 105,097
|
def json_elements_to_weight_fractions(elements):
"""Calculate element weight fractions from the Elements data."""
results = []
for element in elements:
assert element["Element"].isalpha()
line = f"{element['Element']} {element['WeightFraction_whole']:.6f}"
results.append(line)
return results
|
e297f485dfab68c056b5af5deda1a802a3298650
| 456,718
|
import re
from datetime import datetime
def format_date(row):
""" Extracts the unix timestamp from the string and converts it to a datetime object, best if used with map
:param row: row from a dataframe """
row=str(row)
unix = [int(s) for s in re.findall(r'-?\d+\.?\d*', row)]
dt_object = datetime.fromtimestamp(unix[0]/1000)
return(dt_object)
|
b9c3fc5445e7e9985b62ace145faece86576dd25
| 63,144
|
import torch
def _make_input(t, requires_grad=False, device=torch.device('cpu')):
"""Make zero inputs for AE loss.
Args:
t (torch.Tensor): input
requires_grad (bool): Option to use requires_grad.
device: torch device
Returns:
torch.Tensor: zero input.
"""
inp = torch.autograd.Variable(t, requires_grad=requires_grad)
inp = inp.sum()
inp = inp.to(device)
return inp
|
c65c4ba243d786471b810d48af2251fff94c5d5f
| 26,997
|
def None_Omit(Input_Str):
"""
Replace None object with "None" string.
:param Input_Str: input string
:type Input_Str : str
:return: modified string as str
"""
result = Input_Str
result = result.replace("None", '\"None\"')
return result
|
978d4786d30c4ceba88185acb76ff5e84d7ab664
| 207,551
|
def delete_nucleotides(seq, start, n):
"""Return the sequence with ``n`` deletions from position ``start``."""
return seq[:start] + seq[start + n :]
|
3bcfd9dd0cb7c55c0bfdca8b9b901e170b4a23b6
| 328,479
|
def order_columns(pks, columns):
"""
Orders column list to include primary keys first and then non primary
key columns
:param pks: primary key list
:param columns: columns
:return: primary key columns + non primary key columns ordered
"""
pk_list = []
non_pk_list = []
for c in columns:
for pk in pks:
if c.get("name") == pk:
pk_list.append(c)
break
elif pks[-1] == pk:
non_pk_list.append(c)
return pk_list+non_pk_list
|
01326edb0af00c5009cbb86b85496cd3806f33f0
| 163,164
|
def invert_dictionary(dictionary):
"""Invert a dictionary
.. note::
If the dictionary has unique keys and unique values, the inversion would be perfect. However, if there are
repeated values, the inversion can take different keys
Args:
dictionary (dict): A dictionary
Returns:
dict: inverted dictionary
"""
return {v: k for k, v in dictionary.items()}
|
46ced17c91ae61f50fa6204acdfbbb92561f6c6a
| 151,272
|
def normalize_raw(raw_line: str) -> str:
"""Replace hard tabs with 4 spaces.
"""
return raw_line.replace('\t', ' ')
|
fb52105035a97d328a6027226cb921544b4897f2
| 374,158
|
import re
def replace_titles(targets, predictions):
"""Replaces titles with a __unk__ token."""
def replace_fn(text):
return re.sub(r"\@([^@]*\([^@]*\)[^@]*)\@", "__unk__", text)
return (list(map(replace_fn, targets)), list(map(replace_fn, predictions)))
|
e7d4635d497793077f6366f8b4457d5e168e8f4b
| 53,090
|
def divide_clusters(clustered_reads, ref_names):
"""
Takes the output of cluster_txs and splits them into two groups based on having their name be in ref_names or not.
Returns a dict mapping cluster IDs to tuples of [ref_txs, non_ref_txs].
Discards any cluster that does not contain members of both ref and non-ref.
"""
ref_names = set(ref_names)
divided_clusters = {}
for chrom in clustered_reads:
for cluster_id, tx_list in clustered_reads[chrom].items():
ref = [tx for tx in tx_list if tx.name in ref_names and len(tx.intron_intervals) > 0]
iso = [tx for tx in tx_list if tx.name not in ref_names and len(tx.intron_intervals) > 0]
if len(ref) > 0 and len(iso) > 0:
divided_clusters[cluster_id] = [ref, iso]
return divided_clusters
|
8757dddd991c327d74bb42b28e753bbe212d8d9c
| 229,283
|
def getFirstPlist(textString):
"""Gets the next plist from a set of concatenated text-style plists.
Returns a tuple - the first plist (if any) and the remaining
string"""
plistStart = textString.find('<?xml version')
if plistStart == -1:
# not found
return ("", textString)
plistEnd = textString.find('</plist>', plistStart + 13)
if plistEnd == -1:
# not found
return ("", textString)
# adjust end value
plistEnd = plistEnd + 8
return (textString[plistStart:plistEnd], textString[plistEnd:])
|
19de59d42661488ad254a7afa8aded4f3f17bf1a
| 28,682
|
import hashlib
def hash_buffer(buffer):
"""Compute the MD5 digest of a byte buffer."""
block_size = 1024 * 1024 # 1 MiB
m = hashlib.md5()
while True:
block = buffer.read(block_size)
if not block:
break
m.update(block)
return m.digest()
|
1d40d956052d96485fce0236fe1e3c07e58543f1
| 504,541
|
def get_l2_cols(names):
"""
Extract column names related to Layer 2
Parameters
----------
names : list
list with column names
Returns
-------
list
list containing only Layer 2 column names
"""
ret_names = []
for n in names:
if "RtGAMSE" in n: continue
if "RtGAM" in n: ret_names.append(n)
return(ret_names)
|
76627db426d081ec4f094e02dc3848194083eb1a
| 305,412
|
import torch
def latin_hypercube(n_pts, dim, dtype=None, device=None):
"""Latin hypercube with center perturbation."""
X = torch.zeros(n_pts, dim, dtype=dtype, device=device)
centers = (1.0 + 2.0 * torch.arange(0.0, n_pts, dtype=dtype, device=device)) / float(2 * n_pts)
for i in range(dim): # Shuffle the center locataions for each dimension.
X[:, i] = centers[torch.randperm(n_pts)]
# Add some perturbations within each box
pert = (-1.0 + 2.0 * torch.rand(n_pts, dim, dtype=dtype, device=device)) / float(2 * n_pts)
X += pert
return X
|
036c4c6585ed10e09390a49fb765e0332987c9b8
| 612,028
|
def spoiler(text):
"""Return text in a spoiler"""
return f"||{text}||"
|
26066d6371d2ee14fd39e37e9068f74bfd16e43b
| 291,201
|
def crop_to_bottom_half(image):
"""Return only the bottom half of image."""
image = image.crop((0, image.size[1] / 2, image.size[0], image.size[1]))
return image
|
7e8f7e106e483afdf26242de0963e1033bdf9720
| 639,062
|
def _get_grid_uniongrid_elements(merged_grid, grid):
"""
Get the mapping between a grid and the merged grid
Parameters
----------
merged_grid : NumPy Array of size [num_merged_elements x 2]
Merged pseudo-grid, i.e., as obtained with merge_grids()
grid : NumPy Array of size [num_grid_elements x 2]
Pseugo grid, i.e.: high_grid, mortar_grid, or low_grid.
Returns
-------
elements : List of length num_merged_elements
Containing the local elements of the grids overlapping the merged grid,
at the a interval.
Credits
-------
The follwing piece of code was modified from:
www.geeksforgeeks.org/find-intersection-of-intervals-given-by-two-lists/
Author: Sarthak Shukla (Indian Institute of Information Technology Nagpur)
"""
# First, convert the FEM-like 1D grids to lists
arr1 = merged_grid.tolist()
arr2 = grid.tolist()
# Initialize pointers
i = j = 0
# Length of lists
n = len(arr1)
m = len(arr2)
# Elements list
elements = []
# Loop through all intervals unless one of the interval gets exhausted
while i < n and j < m:
# Left bound for intersecting segment
l = max(arr1[i][0], arr2[j][0])
# Right bound for intersecting segment
r = min(arr1[i][1], arr2[j][1])
# If the segment is valid, append the element to the list
if l < r:
elements.append(j)
# If i-th interval's right bound is
# smaller increment i else increment j
if arr1[i][1] < arr2[j][1]:
i += 1
else:
j += 1
return elements
|
71d41c9ad6d67a2c08b9878d3d86acdbe1ec5137
| 536,984
|
def is_alpha(char:str)->bool:
"""
Check whether a character is an English alphabet or not
Args:
word (str): [description]
Returns:
Boolean: [description]
"""
try:
return char.encode('ascii').isalpha()
except:
return False
|
6a506e04377fcfed0310ff406ec692bd79b72f4d
| 445,863
|
def snaps_to_hydroobject(gdf, datamodel, method, tolerance=0.001, dtype=bool):
"""
Check if geometries snap to HydroObject
Parameters
----------
gdf : ExtendedGeoDataframe
ExtendedGeoDataFrame, typically a layer in a HyDAMO datamodel class
datamodel : HyDAMO
HyDAMO datamodel class
method : str, options: 'intersecting', 'overal', 'centroid', 'ends'
Method that can be used to deterine nearest hydrobject
tolerance : numeric
Tolerance used to snap to the hydroobjct
dtype : dtype, optional
Dtype to assign to the result series. The default is bool.
Returns
-------
Pandas Series
Default dtype is bool
"""
branches = datamodel.hydroobject
gdf.snap_to_branch(branches, snap_method=method, maxdist=tolerance)
series = ~gdf.branch_offset.isna()
return series.astype(dtype)
|
6f0a82ce5a043fb26e67438bcca8a9e2ab2e5f56
| 625,127
|
def make_response(resp):
""" Returns Flask tuple `resp, code` code per http://flask.pocoo.org/docs/0.10/quickstart/#about-responses
"""
if 'errorMsg' in resp:
# Error 162 pertains to "Historical data request pacing violation"
if resp['errorCode'] in [None, 162]:
return resp, 429
# Bad request if arg which made it to TWS wasn't right
return resp, 400
else:
return resp
|
a6d8493cbcf0ff858114cec8032c46bbbff6a4e2
| 129,288
|
import re
def _name(user_filename):
"""Parse base name from user_filename
Can't assume the file separators will be understood so have to
parse out the name manually.
Will need to be uniquely named by sirepo.server, but not done
yet.
Args:
user_filename (str): Passed in from browser
Returns:
str: suitable name
"""
# crude but good enough for now.
m = re.search(r'([^:/\\]+)\.\w+$', user_filename)
return m.group(1) if m else user_filename
|
a71cbd0362a6aa511f261ea35d0ef6507436b7e6
| 494,551
|
def make_dataset_header(data, file_format, aminoacids):
"""Creates the dataset header for NEXUS files from ``#NEXUS`` to ``MATRIX``.
Parameters:
data (namedtuple): with necessary info for dataset creation.
file_format (str): TNT, PHYLIP, NEXUS, FASTA
aminoacids (boolean): If ``aminoacids is True`` the header will show
``DATATYPE=PROTEIN`` otherwise it will be ``DNA``.
"""
if aminoacids:
datatype = 'PROTEIN'
else:
datatype = 'DNA'
if file_format in ['NEXUS', 'PHYLIP', 'FASTA']:
header = """
#NEXUS
BEGIN DATA;
DIMENSIONS NTAX={0} NCHAR={1};
FORMAT INTERLEAVE DATATYPE={2} MISSING=? GAP=-;
MATRIX
""".format(data.number_taxa, data.number_chars, datatype)
elif file_format == 'MEGA':
return "#MEGA\n!TITLE title;"
else: # file_format: TNT
if aminoacids:
molecule_type = "prot"
else:
molecule_type = "dna"
header = """
nstates {0};
xread
{1} {2}""".format(molecule_type, data.number_chars, data.number_taxa)
return header.strip()
|
5b7a4110be7bc2b06c96e7ed12b057e92a8170df
| 371,679
|
def net2list(net_root):
"""
Use topological order(BFS) to print the op of a net in a list
"""
bfs_queue = []
op_list = []
cur = net_root
for _, n in cur.ops.iteritems():
bfs_queue.append(n)
while bfs_queue:
node = bfs_queue[0]
bfs_queue = bfs_queue[1:]
op_list.append(node.op)
for _, n in node.ops.iteritems():
bfs_queue.append(n)
return op_list
|
824b7b9a46f72d44a8efed73c9ed0e1d658bacb3
| 67,642
|
def sum_entries(hist_data, default=True):
"""Recursively get sum of entries of histogram
Sometimes hist.entries gives zero as answer? This function always works though.
:param hist_data: input histogrammar histogram
:param bool default: if false, do not use default HG method for evaluating entries, but exclude nans, of, uf.
:return: total sum of entries of histogram
:rtype: int
"""
if default:
entries = hist_data.entries
if entries > 0:
return entries
# double check number of entries, sometimes not well set
sume = 0
if hasattr(hist_data, "bins"):
# loop over all counters and integrate over y (=j)
for i in hist_data.bins:
bi = hist_data.bins[i]
sume += sum_entries(bi)
elif hasattr(hist_data, "values"):
# loop over all counters and integrate over y (=j)
for i, bi in enumerate(hist_data.values):
sume += sum_entries(bi)
elif hasattr(hist_data, "entries"):
# only count histogrammar.Count() objects
sume += hist_data.entries
return sume
|
b3979ec88fa84a646c38ed65e9fd096afe30f896
| 401,245
|
import re
def split_problematic_endpoints_line(line):
"""
If the line of host contains more than one ":",
for example: 10.99.184.69:900010.37.170.125:9006
this splits the line and return a list of correct endpoints
Args:
``line``: the problemtic line which contains more than one endpoint string.
Returns:
the splitted list of the problematic line which has correct endpoint strings.
"""
colon_parts = line.strip().split(":")
offset = len(colon_parts[-1])
colon_positions = [m.start() for m in re.finditer(':', line)]
start = 0
split_parts = []
for colon_position in colon_positions:
end = colon_position + offset + 1
split_part = line[start:end]
split_parts.append(split_part)
start = end
return split_parts
|
24e5cf08074df9527014ae8d83110c1e264debc1
| 102,683
|
def cf_contains(element: str, string: str) -> bool:
"""Casefold (aka 'strong `lower()`') check if a substring is in a larger string.
Args:
element: The shorter string to be tested for containment in `string`.
string: The larger string.
Returns:
Caseless test of whether the larger string contains the shorter string.
"""
return element.casefold() in string.casefold()
|
7a60459383df8eb33526cca164f49ed151a7ec6e
| 348,439
|
def prompt_for_yes_no(prompt_string, default=None):
"""Prompts for "yes" or "no" and returns True or False respectively."""
valid = {'yes': True, 'ye': True, 'y': True,
'no': False, 'n': False}
if default is not None:
assert isinstance(default, bool), "Use the desired boolean return value for parameter 'default'"
valid[''] = default
while True:
choice = input(prompt_string).strip().lower()
if choice in valid:
return valid[choice]
print("Please respond with 'yes' or 'no'\n")
|
6ecbe68c011263923f326c02e5f00080c19d6045
| 207,121
|
def flatten(nested_iterable):
"""
Flatten a nested iterable into a list.
Parameters
----------
nested_iterable : list or tuple
an iterable which can contain other iterables
Returns
-------
flattened : list
Examples
--------
>>> flatten([[0, 1], [2, 3]])
[0, 1, 2, 3]
>>> flatten([[0, 1], [[3], [4, 5]]])
[0, 1, 3, 4, 5]
"""
flattened = []
stack = list(nested_iterable)[::-1]
while len(stack) > 0:
inp = stack.pop()
if isinstance(inp, (tuple, list)):
stack.extend(inp[::-1])
else:
flattened.append(inp)
return flattened
|
3af943cf303497572ad980b775cd4c7c8b3616e9
| 542,662
|
def flatten(list_of_list):
"""Flatten list of lists."""
flattened_list = []
for lst in list_of_list:
flattened_list += lst
return flattened_list
|
28ed36fdbca737585d310687780e85e82ce29527
| 359,006
|
import random
def roll_past(goal):
"""
Returns: The score from rolling a die until passing goal.
This function starts with a score of 0, and rolls a die, adding the
result to the score. Once the score passes goal, it stops and
returns the result as the final score.
If the function ever rolls a 1, it stops and the score is 0.
Parameter goal: The target goal to hit
Precondition: goal is an int > 0
"""
score = 0
loop = True
while loop:
roll = random.randint(1,6)
if roll == 1:
score = 0
loop = False
else:
score = score + roll
loop = score < goal
return score
|
b92c196a5c3d0084dbbd3887ef29a1b60926d53d
| 553,816
|
def get_middle_indexes(lst):
"""
Fetch indexes of one or two middle numbers of a list.
"""
n = len(lst)
if n <= 2:
return [None]
if n % 2 == 0:
return [n / 2 - 1, n / 2]
else:
return [n // 2]
|
06164efb2b575e582acefd590dffbff44869ef6c
| 64,288
|
import math
def coriolis_frequency(lat_deg):
"""
Calculate the coriolis factor for a given latitude
:param lat_deg: float deg
:return: float hr**-1 coriolis factor
"""
w = 2.0 * math.pi / 24
return 2.0 * w * math.sin(math.radians(lat_deg))
|
846a04d0891f8d311bc2e89c66da70631c96b8f0
| 109,782
|
def shorten_name(name):
"""Shortens a parkour room name."""
if name[0] == "*":
return name.replace("#parkour", "", 1)
return name.replace("-#parkour", "", 1)
|
ea5b095e5f7677ec4a160d3c68d3b17aa36287c1
| 507,388
|
def user_can_delete_greylisting(user, greylisting):
"""
Is a user allowed to remove a guest from the greylist?
Arguments:
user: User
greylisting: GreylistedGuest
Returns: bool
"""
return (
greylisting.addedBy == user or
user.has_perm('PartyList.can_delete_any_greylisted_guest')
)
|
652d03b9e11cc5a54446a38a2c0adb2d0726b0a6
| 177,287
|
def find_foh_coeff(count_begin, count_final, o_array):
"""
Finds the foh coefficient for the linear fly trap model. The foh coefficient specifies
the fraction of flies which transition from "on the trap" to "in the trap, but hidden"
during a given time step.
Arguments:
count_begin = number of flies in trap at the start of the trial
count_final = number of flies in trap at the end of the trial
o_array = array of "on trap" counts giving number of flies on the trap at
each time step
Returns:
foh = coefficient specifying the fraction of flies transistioning from
"on trap" o[n] to "in trap hidden" h[n] during a given time step
"""
foh = (count_final - count_begin)/float(o_array.sum())
return foh
|
c9718b07a36f1b0ac06ceb878f4f4b79a1553e7b
| 219,533
|
def interactive_strategy(game) -> int:
"""
Return a move for game through interactively asking the user for input.
"""
move = input("Enter a move: ")
return int(move)
|
c2327d241f85ce69997655d0d414cccecd0fc512
| 270,985
|
def get_grading_dict(grader_dict):
"""
Inverts the information in grader_dict to create a grading_dict that maps a Student object to a list of Submission objects that they will grade.
Parameters
----------
grader_dict : dict.
Maps a Submission object to a list of graders (Student objects).
Returns
-------
grading_dict : dict.
Maps a grader (Student object) to a list of Submission objects.
grading_dict = { Student object: [ Submission objects ] }
"""
grading_dict = {}
for key, val in grader_dict.items():
for grader in val:
if grader not in grading_dict.keys():
grading_dict[grader] = []
grading_dict[grader].append(key)
return grading_dict
|
ba8d24d13d6497ce1000989e071eea6f5d6759b8
| 310,241
|
import re
def hostname_from_message_id(message_id):
"""
Get the hostname value from this message ID header.
It seems all messages from Jenkins have a message ID that includes the
FQDN.
:param message_id: STOMP header string, eg.
"ID:ceph-jenkins.example.com-44193-1511460447792..."
:returns: hostname string, eg. "ceph-jenkins.example.com", or None if we
found no FQDN in the message ID.
"""
r = re.match('ID:(.+)-\d+-', message_id)
if r:
return r.group(1)
|
1243700a4bd1eea6f41b054caaf751f3f381e529
| 397,498
|
import requests
def get_report_bytes(report_data):
"""Downloads report described by report_data, returns as bytes object."""
if len(report_data["urls"]) > 1:
# not sure when this would ever be the case, need to investigate
raise NotImplementedError("Report contains multiple urls")
r = requests.get(report_data["urls"][0])
return r.content
|
7f8cb581909000c9dd3136911ea0130cd7cc3c65
| 455,480
|
def isplaceholder(sym):
"""
Checks whether a symbol in a structure is a placeholder for a representation
"""
if isinstance(sym, str):
return sym[0] == "#"
else:
return False
|
fc67e60eff6de0851b851975c0b78b4cfd1a66bb
| 379,431
|
def lerp(x, x0,x1,y0,y1):
"""
This function is a helper function to normalize values.
Mathematically, this function does a linear interpolation for x from
range [x0,x1] to [y0,y1].
see https://en.wikipedia.org/wiki/Linear_interpolation
Args:
x: Value to be interpolated
x0: Lower range of the original range.
x1: Higher range of the original range.
y0: Lower range of the targeted range.
y1: Higher range of the targeted range.
Returns:
float: interpolated value.
"""
# if not(x0 <= x <= x1):
# print x
x, x0,x1,y0,y1 = map(float, (x, x0,x1,y0,y1))
return y0+(x-x0)*(y1-y0)/(x1-x0)
|
978b7570899b1b88a2b365055eaa4da6be83f453
| 66,234
|
import collections
import six
def _unflatten_dict(flat_dict, prefixes, delimiter="/"):
"""Unflattens a dictionary into a dict of dicts by one level.
Args:
flat_dict: The dictionary to unflatten.
prefixes: The string keys to use for the unflattened dictionary. Keys in the
flat_dict which do not begin with a prefix are unmodified.
delimiter: The value used to delmit the keys in the flat_dict.
Returns:
The unflattened dictionary.
"""
unflat_dict = collections.defaultdict(dict)
for key, value in six.iteritems(flat_dict):
parts = key.split(delimiter)
if len(parts) > 1:
prefix = parts[0]
if prefix in prefixes:
suffix = key[len(prefix + delimiter):]
unflat_dict[prefix][suffix] = value
else:
unflat_dict[key] = value
else:
unflat_dict[key] = value
return unflat_dict
|
9aca3fe0bc72101c0b70864629a58671341bddcd
| 453,684
|
def read_file(filepath):
"""Return a string with file contents."""
with open(filepath, mode='rt', encoding='utf-8') as f:
return f.read()
|
f089b0beead174048bebb5e3e54624d252bb641a
| 129,887
|
import re
def simplify_text(text):
"""
Remove nonsense words in text.
E.g.
text = "Find value of x."
simplified_text = "Find x.
"""
text = " " + text
nonsense = ['GRIDIN', 'Suppose', 'below', 'exact', 'shown', 'composite', 'total', 'decimal', ' all ']
nonsense += [' the ', ' this ', ' that ', ' its ', ' as (an |a |)', ' each ', ' then ', ' such ', 'given']
nonsense += ['information', 'calculator', 'proportion']
nonsense += ['exactly', 'respectively']
nonsense += ['refer to', 'at right', ]
nonsense += ['Express your answer in degrees.']
nonsense += ['find in degrees.']
nonsense += ['(square|) meters', '(square|) centimeters', '(square|) millimeters',
'(square|) inches', '(square|) feet', '(square|) units', '(square|) miles']
nonsense += ['value of', 'variable of', 'variable']
nonsense += ['solve problem', 'your answer']
nonsense += ['in (diagram|figure)', 'use figure', 'use areas', 'use (an |a )']
nonsense += ['area A of the shaded region is']
nonsense += ['unless otherwise stated']
nonsense += ['find indicated', 'find measure\.']
nonsense += ['with side lengths indicated']
for word in nonsense:
pattern = re.compile(word, re.IGNORECASE)
text = pattern.sub(" ", text)
text = re.sub(r'[\s]+', r' ', text)
# replace some phrased with standard text
text = re.sub(r'area A of shaded region is', r'', text)
text = re.sub(r'(cm|m|ft|in)\^2', r'', text)
text = re.sub(r'what is|solve for|determine|solve|what it|how long is|Which polynomial best represents', r'Find',
text, flags=re.IGNORECASE) # Note: flages=
text = re.sub(r'to, find', r'to find', text)
text = re.sub(r'Find \\angle', r'find m \\angle', text, flags=re.IGNORECASE)
text = re.sub(r'measure of m', r'measure of', text, flags=re.IGNORECASE)
text = re.sub(r'polygon figure', r'polygon', text)
text = re.sub(r'express ', r'find ', text, flags=re.IGNORECASE)
text = re.sub(r'figure (?=[A-Z]+)', r'', text) # 428
text = re.sub(r' an ', r' ', text)
text = re.sub(r' of a ', r' of ', text)
text = re.sub(r'is equal to', r'=', text)
text = re.sub(r'(?<=\d) long(?![a-z])', r' ', text)
text = re.sub(r'in terms of [a-z] and [a-z]', r' ', text)
text = re.sub(r' is (?:an|a) (?=[a-z\d]{2})', r' is ', text)
text = re.sub(r'triangle \\triangle', r'\\triangle', text, flags=re.IGNORECASE)
text = re.sub(r'(m |)\\overrightarrow', r'', text)
text = re.sub(r'(?<=[A-Z][A-Z]) \\cong (?=[A-Z][A-Z][\s\,\.\?])', r' = ', text) # AB \cong EF -> AB = EF
text = re.sub(r', and ', r' and ', text)
text = re.sub(r'preimeter', r'perimeter', text)
text = re.sub(r' (?=[\,\.\?])', r'', text)
text = re.sub(r' side length ', r' side ', text)
text = re.sub(r'triangle \\triangle', r'triangle', text)
text = re.sub(r'(?<=\\angle [A-Z]{3}) \\cong (?=\\angle [A-Z]{3})', r' = ', text)
text = re.sub(r'(?<=\\angle [A-Z\d]) \\cong (?=\\angle [A-Z\d])', r' = ', text)
text = re.sub(r'BY = CZ = AX = 2.5 diameter of \\odot G = 5', r'BY = CZ = AX = 2.5, diameter of \\odot G = 5',
text) # error, 977
text = re.sub(r'=', r' = ', text)
text = re.sub(r'[\s]+', r' ', text)
text = text.strip()
return text
|
765659c251ec72ddc41f0a70fd8f2230b22bb36d
| 233,736
|
import six
def _uniform_iterator(sequence):
"""Key, value iteration on a dict or list."""
if isinstance(sequence, dict):
return six.iteritems(sequence)
else:
return enumerate(sequence)
|
203b8e9d04b8f052279cba177a6b628ba305936f
| 520,025
|
from typing import Sequence
from typing import Union
import pathlib
from typing import Optional
import collections
def find_common_prefix(
paths: Sequence[Union[str, pathlib.Path]]
) -> Optional[pathlib.Path]:
"""
Find the common prefix of two or more paths.
::
>>> import pathlib
>>> one = pathlib.Path('foo/bar/baz')
>>> two = pathlib.Path('foo/quux/biz')
>>> three = pathlib.Path('foo/quux/wuux')
::
>>> import uqbar.io
>>> str(uqbar.io.find_common_prefix([one, two, three]))
'foo'
:param paths: paths to inspect
"""
counter: collections.Counter = collections.Counter()
for path in paths:
path = pathlib.Path(path)
counter.update([path])
counter.update(path.parents)
valid_paths = sorted(
[path for path, count in counter.items() if count >= len(paths)],
key=lambda x: len(x.parts),
)
if valid_paths:
return valid_paths[-1]
return None
|
40c90feb637972ab77ba0ac120f52fc1892eb34a
| 249,661
|
def try_parse_int(candidate):
"""
Convert the given candidate to int. If it fails None is returned.
Example:
>>> type(try_parse_int(1)) # int will still be int
<class 'int'>
>>> type(try_parse_int("15"))
<class 'int'>
>>> print(try_parse_int("a"))
None
Args:
candidate: The candidate to convert.
Returns:
Returns the converted candidate if convertible; otherwise None.
"""
try:
return int(candidate)
except (ValueError, TypeError):
return None
|
6ed382c8268a0aa1eb6f4a26222d09d36538183d
| 500,247
|
def get_form_field_type(field):
"""
Returns the widget type of the given form field.
This can be helpful if you want to render form fields in your own way
(i.e. following Bootstrap standards).
Usage::
{% load libs_tags %}
{% for field in form %}
{% get_form_field_type field as field_type %}
{% if "CheckboxInput" in field_type %}
<div class="checkbox">
<label>
// render input here
</label>
</div>
{% else %}
{{ field }}
{% endif %}
{% endfor %}
"""
return field.field.widget.__str__()
|
6e21606fddc0238a8b42ebe9e51fc8c78e5e267b
| 99,973
|
from typing import List
import itertools
def flatten(list_of_lists: List[List]) -> List:
"""Chain together a list of lists
>>> flatten([[1, 2], [3, 4, 5], ['a']])
[1, 2, 3, 4, 5, 'a']
"""
return list(itertools.chain(*list_of_lists))
|
a14bd1208f28fbd7bd9633df842e7071ac8f13d3
| 514,289
|
def pillar_distances_sqr(grid, xy, ref_k0 = 0, kp = 0, horizon_points = None):
"""Returns array of the square of the distances of primary pillars in x,y plane to point xy.
arguments:
xy (float pair): the xy coordinate to compute the pillar distances to
ref_k0 (int, default 0): the horizon layer number to use
horizon_points (numpy array, optional): if present, should be array as returned by
horizon_points() method; pass for efficiency in case of multiple calls
"""
# note: currently works with unmasked data and using primary pillars only
pe_j = grid.extent_kji[1] + 1
pe_i = grid.extent_kji[2] + 1
if horizon_points is None:
horizon_points = grid.horizon_points(ref_k0 = ref_k0, kp = kp)
pillar_xy = horizon_points[:, :, 0:2]
dxy = pillar_xy - xy
dxy2 = dxy * dxy
return (dxy2[:, :, 0] + dxy2[:, :, 1]).reshape((pe_j, pe_i))
|
5ea4178114648a663e69480dbb213e230571fcc9
| 319,963
|
import socket
import binascii
def ip_to_integer(ip_address):
"""
Converts an IP address expressed as a string to its
representation as an integer value and returns a tuple
(ip_integer, version), with version being the IP version
(either 4 or 6).
Both IPv4 addresses (e.g. "192.168.1.1") and IPv6 addresses
(e.g. "2a02:a448:ddb0::") are accepted.
"""
# try parsing the IP address first as IPv4, then as IPv6
for version in (socket.AF_INET, socket.AF_INET6):
try:
ip_hex = socket.inet_pton(version, ip_address)
ip_integer = int(binascii.hexlify(ip_hex), 16)
return (ip_integer, 4 if version == socket.AF_INET else 6)
except:
pass
raise ValueError("invalid IP address")
|
ebe704befab52ed82767dc4a6a09ead89a6947d9
| 128,023
|
from pathlib import Path
import json
def prep_realdir(path: Path):
"""Prepare a real directory for tests."""
base = path / "dataset"
base.mkdir()
(base / "foo").mkdir()
(base / "foo" / "bar").mkdir()
(base / "qux").mkdir()
(base / "_meta.json").touch()
(base / "readme.txt").touch()
(base / "binary.dat").touch()
(base / "foo" / "data.bin").touch()
meta = {"hello": "world"}
with open(base / "foo" / "data.bin_meta.json", "w") as f:
json.dump(meta, f)
with open(base / "foo" / "notReally.json", "w") as f:
f.write("this is not valid JSON")
return base
|
17f26831d5e14a42ceb3aa6f9035d89343b66629
| 268,101
|
def encode_literal_num(n: int, size: int) -> bytes:
"""Send numbers as literal string in decimal
`size`: target length of the byte str
example: 123 -> ["0","0",... , "1","2","3"]
"""
s = str(n)
s = "0" * (size - len(s)) + s
# prevent overflow
assert s[0] == "0"
return s.encode("utf-8")
|
09067cfad4b7304893241bf88beb50ceceb57055
| 631,232
|
def isSafe(row,
column,
oblique,
oblique_inv,
ctrl_line,
ctrl_oblique,
ctrl_obliqueinv):
"""Check if it is safe to put a queen at that position
Args:
row (int): row number
column (int): column number
oblique (array): for the diagionales
oblique_inv (array): for the diagionales
ctrl_line (array): for the rows
ctrl_oblique (array): for the diagionales
ctrl_obliqueinv (array): for the diagionales
Returns:
[boolean]: is-it safe to put a queen at that position
"""
if ctrl_oblique[oblique[row][column]] or \
ctrl_obliqueinv[oblique_inv[row][column]] or ctrl_line[row]:
return False
return True
|
65a6c35c5040b04e8577582073de2b4f1fc1105a
| 600,924
|
def _str_between(text, start, stop):
"""Returns the string found between substrings `start` and `stop`."""
return text[text.find(start) + 1 : text.rfind(stop)]
|
2d9942a0d7cadf57f05556e033678cceea0dfa6c
| 220,303
|
from typing import List
from typing import Any
def find_object_by_key(list_: List[dict], search_key: str, search_value: Any) -> dict:
"""Find an object in a list, using a key/value pair to search.
>>> fruits = [{"id": 1, "fruit": "banana"}, {"id": 2, "fruit": "apple"}, {"id": 3, "fruit": "mango"}]
>>> find_object_by_key(fruits, "id", 1)
{'id': 1, 'fruit': 'banana'}
>>> find_object_by_key(fruits, "fruit", "banana")
{'id': 1, 'fruit': 'banana'}
>>> find_object_by_key(fruits, "fruit", "pear")
{}
>>> find_object_by_key(fruits, "fruit", "mango")
{'id': 3, 'fruit': 'mango'}
"""
for obj in list_:
if obj.get(search_key) == search_value:
return obj
return {}
|
79ad40be4680b5309cbc422fb0c94dd55e2fe4fa
| 362,007
|
def _map_boolean_to_human_readable(boolean, resource, token):
"""
Map a boolean into a human readable representation (Yes/No).
:param boolean: boolean with the value that we want to transform
:param resource: resource containing all the values and keys
:param token: user token
"""
if boolean:
return 'Yes'
else:
return 'No'
|
380a9ff38cc5999a9e062b2487a7e54158c02a69
| 35,946
|
def merged_namedtuple(cls, source, **kwargs):
"""Fill a namedtuple with elements from a different namedtuple and extra kwargs.
Args:
cls: namedtuple class Type that will be created.
source: namedtuple Instance whose members are used for the new instance of
cls.
**kwargs: Extra arguments used to fill cls.
Returns:
A namedtuple of type cls
"""
f = {}
for field in source._fields:
f[field] = getattr(source, field)
for field in kwargs:
f[field] = kwargs[field]
return cls(**f)
|
c217d97a490a28358a534c57d03d8d992b1ca88e
| 244,948
|
def generator_to_list(function):
"""
Wrap a generator function so that it returns a list when called.
For example:
# Define a generator
>>> def mygen(n):
... i = 0
... while i < n:
... yield i
... i += 1
# This is how it might work
>>> generator = mygen(5)
>>> generator.next()
0
>>> generator.next()
1
# Wrap it in generator_to_list, and it will behave differently.
>>> mygen = generator_to_list(mygen)
>>> mygen(5)
[0, 1, 2, 3, 4]
"""
def wrapper(*args, **kwargs):
return list(function(*args, **kwargs))
wrapper.__name__ = function.__name__
wrapper.__doc__ = function.__doc__
return wrapper
|
cb4d6ad2af71347145f8483a506426bdbab9f9c0
| 671,623
|
import math
def BP(candidate, references):
"""
calculate brevity penalty
"""
c = len(candidate)
ref_lens = (len(reference) for reference in references)
r = min(ref_lens, key=lambda ref_len: (abs(ref_len - c), ref_len))
if c > r:
return 1
else:
return math.exp(1 - r / c)
|
f715238fb746c50399164d2f4892b6e1c43d8c9c
| 328,261
|
def construct_proxy_url(scheme:str, ip:str, port:str)->str:
"""
构造代理url
:param scheme: 代理的协议 HTTP or HTTPS
:param ip: ip地址
:param port: 端口
:return: 完整的代理地址
"""
return '{}://{}:{}'.format(scheme, ip, port)
|
51c1a1d56e7ca37ff82fe9fbb404a67e22974898
| 507,483
|
def maybe_copy(obj, inplace=False, **kwargs):
"""Copy an object if `inplace` flag is set to `False`. Otherwise return the object unchanged."""
return obj if inplace else obj.copy(**kwargs)
|
d3e93b1e39283818a14832530e514320725945cb
| 285,873
|
import decimal
def _decimal_to_json(value):
"""Coerce 'value' to a JSON-compatible representation."""
if isinstance(value, decimal.Decimal):
value = str(value)
return value
|
5ad6337c39710260da55a2ba7f524573efec08d0
| 419,768
|
def parse_point(point, filepath):
"""Parses the x, y, and timestamp attributes from an XML Point element."""
# verify that point contains all required attributes
if not point.attrib.has_key("x"):
raise ValueError("Point element does not have \"x\" attribute.\n" +
"Offending XML file: {0}".format(filepath))
if not point.attrib.has_key("y"):
raise ValueError("Point element does not have \"y\" attribute.\n" +
"Offending XML file: {0}".format(filepath))
if not point.attrib.has_key("time"):
raise ValueError("Point element does not have \"time\" attribute.\n" +
"Offending XML file: {0}".format(filepath))
x = int(point.attrib["x"])
y = int(point.attrib["y"])
timestamp = float(point.attrib["time"])
return x, y, timestamp
|
853efd03ec9cf179381f9e7335b2c73c05e0edd8
| 346,785
|
import ujson
def load_config(cfg_file="config.json") -> dict:
"""
Loads the configuration from the given cfg_file.
:param cfg_file: Config file containing the configuration JSON.
:return: dict containing config parameters
"""
with open(cfg_file, 'r') as json_file:
return ujson.loads(json_file.read())
|
dddab3befe280259afdc9d35d0a6e13576e9a128
| 230,977
|
def class_id(cls):
"""
Returns the full id of a *class*, i.e., the id of the module it is defined in, extended by the
name of the class. Example:
.. code-block:: python
# module a.b
class MyClass(object):
...
class_id(MyClass)
# "a.b.MyClass"
"""
name = cls.__name__
module = cls.__module__
# skip empty and builtin modules
if not module or module == str.__module__:
return name
else:
return module + "." + name
|
3616a596196a6da31997fb13f9714861d5f67ac2
| 402,654
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.