content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import json
def sanitize_for_json(string):
"""Escape JSON specific characters from a string."""
# json.dumps adds double-quotes to the string. Indexing to remove them.
return json.dumps(string)[1:-1] | 5eb0f21861ee0320eb57c8df4d259c7dcbeb2ba3 | 102,937 |
def collate_tensors(stream, pad):
"""
>>> tensors = [torch.tensor(x) for x in [[1,2,3], [1]]]
>>> pad = 0
>>> collate_tensors(tensors, pad)
tensor([[1, 2, 3],
[1, 0, 0]])
"""
assert len(stream) > 0
length = max(v.size(0) for v in stream)
n_samples = len(stream)
collated = stream[0].new_full((n_samples, length), pad)
for i, v in enumerate(stream):
collated[i, : v.size(0)] = v
return collated | 35a14b0c0d2a0710de51f1c15aa0a7b08a213c5a | 102,938 |
def extract_gang_related_incidents(df):
""" Get the gang-related incidents for use in the Hawkes process
"""
df_gangs = df[df.gang==1]
df_gangs = df_gangs[df_gangs.location <= 77]
print("Found %d gang-related incidents" % len(df_gangs))
return df_gangs | fa7c5b750923a822614ade47bf9d1101d9d56c38 | 102,940 |
def is_float_or_int(value):
"""
Returns True if the value is a float or an int, False otherwise.
:param value:
:return:
"""
if type(value) is float:
return True
elif type(value) is int:
return True
else:
return False | ba73c9fa071523b80621a6a388d10c847975f65f | 102,945 |
import logging
def _decodeCStd( version, strictAnsi ):
"""
Decodes the C standard given the values of the macros
__STDC_VERSION__ and __STRICT_ANSI__
"""
retVal = None
if version == '__STDC_VERSION__' and strictAnsi == '1':
retVal = 'c90'
elif version == '199409L' and strictAnsi == '1':
retVal = 'iso9899:199409'
elif version == '199901L' and strictAnsi == '1':
retVal = 'c99'
elif version == '201112L' and strictAnsi == '1':
retVal = 'c11'
elif version == '__STDC_VERSION__' and strictAnsi == '__STRICT_ANSI__':
retVal = 'gnu90'
elif version == '199901L' and strictAnsi == '__STRICT_ANSI__':
retVal = 'gnu99'
elif version == '201112L' and strictAnsi == '__STRICT_ANSI__':
retVal = 'gnu11'
elif version == '201710L' and strictAnsi == '__STRICT_ANSI__':
retVal = 'gnu17'
else:
logging.warning( 'Cannot decode default language standard for C. (__STDC_VERSION__ = %s __STRICT_ANSI__ = %s)',
version, strictAnsi )
return retVal | 2620f3ae0e1a42c2c616c2b087163071dcdc29eb | 102,946 |
def extract_turn_10_more(df):
"""
Given a concise conversation dataframe, extract those with 10 or more dialog turns.
Arg:
df: A conversation dataframe from a subreddit.
Return:
turn_10_more: A dataframe containing only those conversations with 10 or more turns.
"""
turn_dist = df.groupby('conversation id').size()
turn_dist_10_more_index = turn_dist[turn_dist >= 10].index
turn_10_more = df[df['conversation id'].isin(list(turn_dist_10_more_index))]
return turn_10_more | 5f4559f023dfc85bf02d7e0ed6326fd1ffd1abce | 102,947 |
def session_regenerate_id(space, delete_old_session=False):
"""Update the current session id with a newly generated one"""
if space.ec.interpreter.session.is_active():
space.ec.interpreter.session.create_id()
return space.w_True
return space.w_False | ae427de4daeff68f88a34ca042d53afdded06e73 | 102,949 |
def feet2m(lengthFeet):
"""Convert a length in feet to meters."""
return lengthFeet * 0.3048 | 1956d48ffb9214e140b058654b17567457e9b54d | 102,953 |
def get_conversion(img_height, leftx_base, rightx_base):
"""
Get the conversion factors for pixel/meters
"""
# y-direction:
# the lane lines are about 30 m long
# in the perspective transform we take about half of that
# and project it to the warped image
ym_per_pix = 15/img_height
# the lane is about 3.7 m wide
# in the warped image that corresponds to the number of pixels between
# the left and right lane
xm_per_pix = 3.7/(rightx_base - leftx_base)
return ym_per_pix, xm_per_pix | f9379ba83e782413ff9ad322d48cc8cb570d53e7 | 102,954 |
def rollback_delete_rule(table_name):
"""Helper function to make SQL to create a rule to allow deleting from the ecommerce table"""
return f"DROP RULE delete_protect ON ecommerce_{table_name}" | b32d75617558f34eea18633c817d433daf31360b | 102,956 |
import itertools
def build_src_summary(sources, convert_ints=False):
"""
Condenses sources by merging consecutive integers into a string range.
For example, [1, 2, 3, 5] => ["1-3", 5]. Non-integers are simply copied to the result,
so for example [1, 2, 3, "18-36", 42] => ["1-3", "18-36", 42].
:param sources: an iterable of integer values, e.g. line numbers
:param convert_ints: if True, convert any single integers in the result to strings
:return: a list of source ranges, where ranges are strings and any single sources are
either ints or strings as determined by convert_ints
"""
src_ranges = []
def pairwise(iterable):
# make two independent iterators
a, b = itertools.tee(iterable)
# advance the second by one
next(b, None)
# return pairwise groupings from two iterators
return zip(a, b)
range_start = None
next_val = None
if len(sources) < 2:
return [str(v) if convert_ints else v for v in sources]
for current, next_val in pairwise(sources):
if range_start is None and isinstance(current, int):
range_start = current
if not isinstance(current, int):
src_ranges.append(current)
elif next_val != current + 1:
if range_start != current:
src_ranges.append(f"{range_start}-{current}")
else:
src_ranges.append(str(current) if convert_ints else current)
range_start = None
else:
if range_start is not None:
src_ranges.append(f"{range_start}-{next_val}")
else:
src_ranges.append(str(next_val) if convert_ints else next_val)
return src_ranges | ceebbcce4b0ba0af0a0eec977a5be9cd0ee6e590 | 102,958 |
def make_alphabet(sentences, pad_char=None, extra_chars=None,
allowed_chars=None, exclude_chars=None):
"""Make alphabet from the given corpus of tokenized *sentences*.
:param sentences: tokenized sentences.
:type sentences: list([list([str])])
:param pad_char: add a token for padding.
:type pad_char: str
:param extra_chars: add tokens for other purposes.
:type extra_chars: list([str])
:param allowed_chars: if not None, all charactes not from *allowed_chars*
will be removed.
:type allowed_chars: str|list([str])
:param exclude_chars: if not None, all charactes from *exclude_chars* will
be removed.
:type exclude_chars: str|list([str])
:return: the alphabet created; the index of the padding token (it's always
the last index, if pad_char is not None); the indices of the extra
characters.
:rtype: tuple(dict({char: int}), int, list([int])|None)
"""
abc = {
x: i for i, x in enumerate(sorted(set(
x for x in sentences
if (not allowed_chars or x in allowed_chars)
and (not exclude_chars or x not in exclude_chars)
for x in x for x in x
)))
}
def add_char(char):
if char:
assert char not in abc, \
"ERROR: char '{}' is already in the alphabet".format(char)
idx = abc[char] = len(abc)
else:
idx = None
return idx
if extra_chars is not None:
extra_idxs = [add_char(c) for c in extra_chars]
else:
extra_idxs = None
pad_idx = add_char(pad_char)
return abc, pad_idx, extra_idxs | a729dcd070ef4635bf1c9d26edf997dd01a7a2bc | 102,959 |
import requests
def pull_neo_feed(url: str) -> dict:
"""
Pull Near Earth Objects (NEOs) from NASA's NEO Feed
Args:
url (str): NEO API URL
Returns:
dict: All NEOs from the specified API URL
"""
get_response: requests.Response = requests.get(url=url)
json_data: dict = get_response.json()
return json_data["near_earth_objects"] | 6f9e48439f994e7012276165644e83c250543747 | 102,961 |
def split(name):
"""
Splits a string of form firstname lastname into two strings, returning a list
containing those names.
"""
spIn = name.rfind(' ')
first = name[ :spIn]
last = name[spIn + 1: ]
return [first, last] | c378170e2683186525605bb503c6ffa5c5cb0b54 | 102,963 |
def isResponse(code):
"""
Checks whether a code indicates a response number.
:param code: Code to test
:return: True if option number is a valid response number, False otherwise.
"""
return 64 <= code <= 191 | 7a3f4d7f6a0b5c5eb38fa427f8a5496b7caaccd1 | 102,973 |
import sqlite3
def get_data_for_episode(conn: sqlite3.Connection, table_name: str, ep_num: int):
"""
:param conn: Sqlite3 connection object
:param table_name: Name of table to be queried
:param ep_num: Episode to get data from
:return: action, state, reward, step_num, joint_pos, joint_vel, cum_reward
"""
cur = conn.cursor()
sqlite_statement = \
f'''
SELECT action, state, reward, step_num, joint_pos, joint_vel, cum_reward
FROM {table_name} where episode_num == {ep_num}
'''
cur.execute(sqlite_statement)
data = cur.fetchall()
data2 = [*zip(*data)]
return tuple(data2) | 7c78fd0b5323a74ad98ab2ceeaaa43f6960e5e81 | 102,974 |
def dunderkey(*args):
"""Produces a nested key from multiple args separated by double
underscore
>>> dunderkey('a', 'b', 'c')
>>> 'a__b__c'
:param *args : *String
:rtype : String
"""
return '__'.join(args) | 8a7fe3921000f5f4e8052207ddda1c79553ef113 | 102,977 |
def parse_case_snake_to_camel(snake, upper_first=True):
"""
Convert a string from snake_case to CamelCase.
:param str snake: The snake_case string to convert.
:param bool upper_first: Whether or not to capitalize the first
character of the string.
:return: The CamelCase version of string.
:rtype: str
"""
snake = snake.split('_')
first_part = snake[0]
if upper_first:
first_part = first_part.title()
return first_part + ''.join(word.title() for word in snake[1:]) | b6e581c885a02c77207162ae8e1b66698ccaadfb | 102,985 |
def int_to_bytes(i):
"""Convert int to bytes
"""
return i.to_bytes(8, "big") | 3f10c96663bab191dd5f8ccf24a83878985016e7 | 102,988 |
import inspect
def func_accepts_var_args(func):
"""
Return True if function 'func' accepts positional arguments *args.
"""
return any(
p for p in inspect.signature(func).parameters.values()
if p.kind == p.VAR_POSITIONAL
) | 1c0b334003579b2d4e40c13c1a661e388362ff8d | 102,992 |
def _defer(f, *args, **kwargs):
"""
Create a closure that calls the given function with the given arguments.
The point of this function is to avoid the surprising behavior that can
occur if you define a closure in a scope where variables are changing (e.g.
in a for-loop). The confusing thing is that closures have access to the
scope they were defined in, but only as it exists when they are ultimately
called. So if the scope changes between when the closure is defined and
when it's called, the closure will use the final value of any variables.
This function serves to create a static local scope containing the
variables needed by the closure, which avoids the problem.
More information:
https://stackoverflow.com/questions/10452770/python-lambdas-binding-to-local-values
"""
class defer:
def __repr__(self):
return '<deferred>'
def __call__(self):
return f(*args, **kwargs)
return defer() | 4875e03372c1665dbc415b8dac09b4682cea1012 | 102,996 |
def split_word(word):
"""
Evenly split one string into two equal strings.
Args:
word (str): String to split.
returns:
Tuple -- (word_one, word_two)
"""
size = len(word) / 2
return word[:size], word[size:] | 5cde741f1ea2f85ff14f5ba24d0b0c90087d67fc | 102,998 |
def group_aruments(seq, group=254):
"""
group the list into lists of 254 items each.
This is due to argument restrictions in python.
http://docs.djangoproject.com/en/dev/topics/http/urls/#patterns
"""
return (seq[pos:pos + group] for pos in range(0, len(seq), group)) | bb9c7c6a9c11e66dc4c18a262ff4d1e60dbaa928 | 102,999 |
def Serialize(obj):
"""Convert a string, integer, bytes, or bool to its file representation.
Args:
obj: The string, integer, bytes, or bool to serialize.
Returns:
The bytes representation of the object suitable for dumping into a file.
"""
if isinstance(obj, bytes):
return obj
if isinstance(obj, bool):
return b'true' if obj else b'false'
return str(obj).encode('utf-8') | 268cb942e4d3c99ff6d59c980566445af5b69af3 | 103,002 |
import re
def split_filename(filename):
"""Returns a list of file name pieces. The list will contain any CamelCase, snake_case or
common delimiter seperated words (last element is nominally the file extension)"""
out_string=re.sub("([\a-z])([\A-Z])",r'\1 \2',filename)
out_list=re.split("[\W|\.|_]+",out_string)
return out_list | 4c982690b05ba836ad9f77bc83a328431a2f4a15 | 103,003 |
def sumer(lst):
"""
Compute the sum of all elements in the list.
Example: [1, 2, 3, 4, 5].sum == 15
"""
return sum(lst) | ca6c92d0d3c5034b8d4fcee37e61ff07923ad01f | 103,005 |
import math
def dis_points(lat1, lon1, lat2, lon2):
"""
Parameters
----------
lat1 : Integer
Latitude of first point
lon1 : Integer
Longitude of first point
lat2 : Integer
Latitude of second point
lon2 : Integer
Longitude of second point
Returns
-------
distance : Integer
distance between two points in kilometers
"""
R = 6373.0
lat1 = math.radians(lat1)
lon1 = math.radians(lon1)
lat2 = math.radians(lat2)
lon2 = math.radians(lon2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2)**2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
distance = R * c
return distance | 55f889572fd958032e04370e19afdf6034608c5d | 103,006 |
import configparser
import re
import warnings
def read(fIni):
"""Reads an *.INI file object and returns a two-level dict of
section: key: value collections.
"""
parser = configparser.ConfigParser()
parser.read_file(fIni)
config = {}
for section in parser.sections():
fields = parser[section]
config[section] = {}
for k, v in fields.items():
if re.match(r"^\d+$", v):
config[section][k] = int(v)
elif re.match(r"^\".+\"$", v):
config[section][k] = v[1:-1]
elif re.match(r"^(true|false)$", v.lower()):
config[section][k] = v.lower() == "true"
else:
warnings.warn("Unparsable value for key '%s', skipping" % k)
return config | d067618c5e10d4ac9829b99dc804315a5c685b8f | 103,012 |
def format_number(number, SI=True, space=' '):
"""Return a human-readable metric-like string representation
of a number.
:param number: the number to be converted to a human-readable form
:param SI: If is False, this function will use the convention
that 1 kilobyte = 1024 bytes, otherwise, the convention
that 1 kilobyte = 1000 bytes will be used
:param space: string that will be placed between the number
and the SI prefix
:return: a human-readable metric-like string representation of
*number*
"""
symbols = [ ' ', # (none)
'k', # kilo
'M', # mega
'G', # giga
'T', # tera
'P', # peta
'E', # exa
'Z', # zetta
'Y'] # yotta
if SI: step = 1000.0
else: step = 1024.0
thresh = 999
depth = 0
max_depth = len(symbols) - 1
# we want numbers between 0 and thresh, but don't exceed the length
# of our list. In that event, the formatting will be screwed up,
# but it'll still show the right number.
while number > thresh and depth < max_depth:
depth = depth + 1
number = number / step
if type(number) == type(1) or (int(number) == number):
number = int(number)
format = '%3i%s%s'
elif number < 9.95:
# must use 9.95 for proper sizing. For example, 9.99 will be
# rounded to 10.0 with the .1f format string (which is too long)
format = '%3.1f%s%s'
else:
format = '%3.0f%s%s'
return(format % (float(number or 0), space, symbols[depth])) | 4d743231b2220e41ae205e4ebf51df573026d32e | 103,015 |
def initCtrl(size):
"""Init array usefull to check the diagionales and the lines
Args:
size (int): size of the board
Returns:
[array]: ctrl_line
[array]: ctrl_oblique
[array]: ctrl_obliqueinv
"""
x = 2 * size - 1
ctrl_oblique = [False] * x
ctrl_obliqueinv = [False] * x
ctrl_line = [False] * size
return ctrl_line, ctrl_oblique, ctrl_obliqueinv | 3ac3cfbfb5f68e9f0a9fbd03cc1d77dd5bc6955b | 103,016 |
import json
import click
def validate_filter(ctx, param, value):
"""Try to parse the given filter as a JSON string."""
try:
if value:
return json.loads(value)
except ValueError:
raise click.BadParameter('filters need to be in JSON format') | 0feb6ea0240683ec447c56d27edaefe5411c1eff | 103,019 |
def user_name(user):
"""Return the name of the user, which is a string."""
return user[0] | 54a150525ac95e1059abc7f7bd5803f756ba6088 | 103,023 |
def get_fqn(the_type):
"""Get module.type_name for a given type."""
module = the_type.__module__
name = the_type.__qualname__
return "%s.%s" % (module, name) | d75f3f70250b272802d4dff7f49602788a17b872 | 103,025 |
def is_function_symbol(objdump_line):
"""Returns true if it's a function, returns false otherwise"""
if not ".text" in objdump_line:
return False
return True | b85437b9ae5fb1f0fdb656aebd84169e155e8bc6 | 103,027 |
def remove_from_string(string, letters):
"""Given an original string and a string of letters, returns a new string
which is the same as the old one except all occurrences of those letters
have been removed from it."""
new_string = ''
for char in string:
if char not in letters:
new_string += char
return new_string | 57b276f74a52b826adefdb694cf9e1fa79fc5fa5 | 103,033 |
def idfs(corpus):
""" Compute IDF
Args:
corpus (RDD): input corpus
Returns:
RDD: a RDD of (token, IDF value)
"""
N = corpus.count()
uniqueTokens = corpus.flatMap(lambda x: list(set(x[1])))
tokenCountPairTuple = uniqueTokens.map(lambda x: (x, 1))
tokenSumPairTuple = tokenCountPairTuple.reduceByKey(lambda a, b: a + b)
return tokenSumPairTuple.map(lambda x: (x[0], N / float(x[1]))) | 0da55160aebdfc3a74979619a959c694ae774c45 | 103,034 |
def split(path):
"""split(path) -> streamname, filename
Separate the stream name and file name in a /-separated stream path and
return a tuple (stream_name, file_name). If no stream name is available,
assume '.'.
"""
try:
stream_name, file_name = path.rsplit('/', 1)
except ValueError: # No / in string
stream_name, file_name = '.', path
return stream_name, file_name | 971d59b90bb44ee85bc01e9e8688fdb6295fc64d | 103,036 |
def roi_to_image5d(roi):
"""Convert from ROI image to image5d format, which simply adds a time
dimension as the first dimension.
Args:
roi: ROI as a 3D (or 4D if channel dimension) array.
Returns:
ROI with additional time dimension prepended.
"""
return roi[None] | 71b9b687852a41cb0953c2fbb7a35873d90282d9 | 103,037 |
def get_dMdh_text(params, func_type=None):
"""
Get text associated with the fit of dM/dh(h)
"""
line1 = r'$\frac{d\mathcal{M}}{dh}(h|r)$ is assumed to take the form:'
line2 = (r'$\frac{d\mathcal{M}}{dh}(h_s) = '
r'exp\bigg{(}'
r'\frac{-h_s^2}{(a+bh_s+ch_s^2)^{\frac{d}{2}}}'
r'\bigg{)}$')
line3 = r'$h_s = \frac{h-h_{max}(r)}{\eta(r)}$'
a, b, c, d = params['a'], params['b'], params['c'], params['d']
line4 = (r'where a = {:.4f}, b = {:.4f}, c = {:.4f}, '
r'and d = {:.4f}'.format(a, b, c, d))
text = '\n'.join([line1, line2, line3, line4])
return text | 30db1545b5936cbf5eb3a2083abdc15fde58cec7 | 103,056 |
from typing import List
from typing import Pattern
from typing import Optional
def latest(lines: List[str], regex: Pattern) -> Optional[str]:
"""
Return the last released version.
Arguments:
lines: Lines of the changelog file.
regex: A compiled regex to find version numbers.
Returns:
The last version.
"""
for line in lines:
match = regex.search(line)
if match:
return match.groupdict()["version"]
return None | 0d527c822272a2f0911c99bb8908969ac0e3c48c | 103,057 |
from typing import List
from typing import Any
from typing import Tuple
import random
def split_data(data: List[Any],
sizes: Tuple[float, float, float] = (0.8, 0.1, 0.1),
seed: int = 0) -> Tuple[List[Any], List[Any], List[Any]]:
"""
Randomly splits data into train, val, and test sets according to the provided sizes.
:param data: The data to split into train, val, and test.
:param sizes: The sizes of the train, val, and test sets (as a proportion of total size).
:param seed: Random seed.
:return: Train, val, and test sets.
"""
# Checks
assert len(sizes) == 3
assert all(0 <= size <= 1 for size in sizes)
assert sum(sizes) == 1
# Shuffle
random.seed(seed)
random.shuffle(data)
# Determine split sizes
train_size = int(sizes[0] * len(data))
train_val_size = int((sizes[0] + sizes[1]) * len(data))
# Split
train = data[:train_size]
val = data[train_size:train_val_size]
test = data[train_val_size:]
return train, val, test | 2b10c2d1e0185f950feddb83ce338baf1a690150 | 103,064 |
def _shadow_blob(x, y, z, cmap_indices, cmap, scale, mlab):
"""Shows blobs as shadows projected parallel to the 3D visualization.
Parmas:
x: Array of x-coordinates of blobs.
y: Array of y-coordinates of blobs.
z: Array of z-coordinates of blobs.
cmap_indices: Indices of blobs for the colormap, usually given as a
simple ascending sequence the same size as the number of blobs.
cmap: The colormap, usually the same as for the segments.
scale: Array of scaled size of each blob.
mlab: Mayavi object.
"""
pts_shadows = mlab.points3d(x, y, z, cmap_indices,
mode="2dcircle", scale_mode="none",
scale_factor=scale*0.8, resolution=20)
pts_shadows.module_manager.scalar_lut_manager.lut.table = cmap
return pts_shadows | c633e51542d7b782565eea347827d4da2a9b84cb | 103,066 |
def sort(numbers, k):
"""Sort the input list containing numbers with a maximum value of k using
counting sort.
"""
freq = [0] * (k + 1) # init with zeros, +1 to account for number 0.
for num in numbers:
freq[num] += 1
out = []
for num, count in enumerate(freq):
out.extend([num] * count)
return out | c6d62def7481c37f1ff99fd0901292f55b59957d | 103,068 |
import re
def iden_defect(defect_label):
"""
iden_defect is a conveneince function which utilises the decomposes title of a vasppy caclulation for utilisation in other funtions assuming naming conventions are followed
args: defect label (string)
returns: defect identifiers (list of strings)
"""
return re.findall(r"[^]_[^_]+",defect_label) | b099f862f08232d8387dd5b3a3a4534f26c88f15 | 103,073 |
import base64
def _b64_decode(s):
"""Decode bytes from URL-safe base64 inserting required padding."""
padding = 4 - len(s)%4
return base64.urlsafe_b64decode(s + b'='*padding) | 3723fc2644fd5742b0984e763a6eff46b1c6c10d | 103,076 |
def int_or_chr_key(s):
"""Return a sortable value as an integer if possible otherwise, convert the
character to an integer"""
try:
return int(s)
except Exception:
return ord(s) | f5249aafa930fa7a407566edf8402e2f3f14ad79 | 103,082 |
def extract_label_values(df, values_col=2):
"""
extract values from dataframe of labels
Parameters
----------
df : pandas.DataFrame
labels
values_col: int
column index of df where values start
"""
return df.iloc[:, 2:].values | bd7905921dec4914e42545bc6f7877b92d87507d | 103,088 |
async def event() -> dict:
"""Create a mock event object."""
return {"id": "290e70d5-0933-4af0-bb53-1d705ba7eb95", "name": "A test event"} | 761828a067a02e54a3b98a79f9cc5f7c58f2a9b9 | 103,090 |
def is_complex(field: dict) -> bool:
"""Determine if a field is complex from its schema."""
field_type = field["type"]
field_items_type = field.get("items", {}).get("type")
if field_type == "array" and field_items_type == "array":
return True
return False | caee3f4aded848abd41100834b23227bf4a0a399 | 103,097 |
def _prepare_body_remove_subports(subports):
"""Prepare body for PUT /v2.0/trunks/TRUNK_ID/remove_subports."""
return {'sub_ports': [{'port_id': sp['port_id']} for sp in subports]} | 7427a77553efef92d82218f87060a1a4d5c3375c | 103,098 |
from typing import Union
from typing import Any
from typing import Tuple
from typing import Callable
import attrs
def iter_validator(iter_type, item_types: Union[Any, Tuple[Any]]) -> Callable:
"""Helper function to generate iterable validators that will reduce the amount of
boilerplate code.
Parameters
----------
iter_type : any iterable
The type of iterable object that should be validated.
item_types : Union[Any, Tuple[Any]]
The type or types of acceptable item types.
Returns
-------
Callable
The attr.validators.deep_iterable iterable and instance validator.
"""
validator = attrs.validators.deep_iterable(
member_validator=attrs.validators.instance_of(item_types),
iterable_validator=attrs.validators.instance_of(iter_type),
)
return validator | 58add73ae65e5cf41ec151c0172810958b1b117d | 103,099 |
def grad(x):
"""
The gradient (derivative) of the sigmoid function
:param x: inputs
:return: derivative of sigmoid, given x.
"""
deriv = x * (1 - x)
return deriv | 8e0a9aa5525a053e8d71ccb928893d3e61f5c260 | 103,102 |
def ten_column_sheet(ws):
"""Worksheet with values 0-9 in the first row"""
ws.append(list(range(10)))
return ws | fe85085c5da625bddefae4b86941bae0ed16a6e0 | 103,108 |
import json
def read_json(filename, strict=False):
""" Loads a JSON file
"""
data = []
with open(filename,'r') as file:
data = json.load(file,strict=False)
return data | 6cef25ae250319e9195b8f79d718cc6aa8a81ce2 | 103,109 |
import json
def load_api_key(key_file, json_key):
"""
Loads the API key for The Blue Alliance.
Arguments:
key_file: The file name for the file with the authentication key.
json_key: The dictionary key value for the authentication key.
Returns:
A string
"""
with open(key_file) as key_json_file:
key_obj = json.load(key_json_file)
return key_obj[json_key] | ec28dc967392be8d4ae870e44681b9a964ae8a1e | 103,111 |
def get_supported_flags(app):
"""Get set of supported feature flags"""
return app.config.get("SUPPORTED_FLAGS", set()) | 2082755df908a18aa624d91fd3578583501fe57c | 103,114 |
from typing import List
import re
def split_word(item: str) -> List[str]:
"""split item into list of words at commas or runs of whitespace.
Retains any duplicates and empty strings.
"""
return [part for part in re.split(r"(?:,|\s+)", item)] | d6fbceedd0ff3b9f5eb4c2cefa32f51be10bf33d | 103,124 |
def leer_archivo(ubicacion):
"""
Permite leer el contenido de un archivo dada su ubicacion
----
Regresa
str => el contenido del archivo
----
puede arrojar
FileNotFoundError => Si no encuentra el archivo
PermissionError => SI no tengo permisos para leer el archivo
"""
archivo = open(ubicacion, 'r')
texto = archivo.read()
archivo.close()
return texto | 2dc0fc579706ddc68751c4e0d3c07fdfd32757c2 | 103,129 |
import logging
def call_has_props(call, props):
"""Check if a call has matching properties of a given props dictionary."""
for prop, expected_value in props.items():
actual_value = getattr(call, prop)
if actual_value != expected_value:
logging.critical(
'%s::%s property mismatch, %s: expected=%r; actual=%r',
call.service,
call.method,
prop,
expected_value,
actual_value)
return False
return True | 76a32b7202b6ea9928d74ee5ec3cb54885779d8e | 103,143 |
def cookieToDict(cookies: str) -> dict:
"""
Converts a cookie string to a dict, expects the form:
name1=value1; name2=value2; ...
"""
finalCookies = {}
if cookies is not None:
cookieList = cookies.split(";")
for cookieItem in cookieList:
cookie = cookieItem.split("=")
finalCookies[cookie[0].strip()] = cookie[1].strip()
return finalCookies | 6dd2fd8fe457f74ef730fbbe04b783a5273d2500 | 103,144 |
def to_nom_val_and_std_dev(interval):
"""
For a given interval [mu - sigma, mu + sigma] returns (mu, sigma)
(Here mu is nominal value and sigma is standard deviation)
Parameters
----------
interval: ~list [lwr_bnd, upr_bnd]
Returns: ~tuple
"""
lwr_bnd, upr_bnd = interval
sigma = (upr_bnd - lwr_bnd)/2
mu = lwr_bnd + sigma
return (mu, sigma) | 009bf4866caad2e628fb5d367a8b5d30d684fedf | 103,147 |
def get_links(html_tree):
"""Extract links and link text from HTML tree."""
links = []
for link in html_tree.xpath('//a'):
links.append((link.text, link.get('href')))
return links | d2da3be377d6ffd351eb663510746b83effd93ba | 103,149 |
def rosenbrock(x, a=1, b=100):
"""Famous Rosenbrock example"""
return (a-x[0])**2 + b*(x[1] - x[0]**2)**2 | 04bd67a53558c865245695b648d26e25d51965e6 | 103,150 |
from typing import Callable
def optimal_position(positions: list, cost_function: Callable):
"""Calculate the optimal position which minimises the cost.
Warning: This function assumes that there is no local minimum
Args:
positions (list): list of input points
cost_function (Callable): cost function taking the distance as input
Returns:
(int) Cost of the minimum
Examples:
>>> optimal_position([0,1,2],lambda distance: distance)
2
>>> optimal_position([0,1,2],cost_function_part2)
2
>>> optimal_position([16,1,2,0,4,2,7,1,2,14],lambda distance: distance)
37
>>> optimal_position([16,1,2,0,4,2,7,1,2,14],cost_function_part2)
168
"""
minimum = sum([cost_function(abs(pos)) for pos in positions])
for i in range(1, max(positions)):
new = sum([cost_function(abs(pos - i)) for pos in positions])
if new < minimum:
minimum = new
else:
break
return minimum | 72154345b07d9867e792664590f50ace06a622c5 | 103,153 |
def default_in_transformer(row, lhs, rhs):
"""
Performs the in check of the lhs in in the rhs. If the lhs has an `is_in`
method this is used, if not the `in` operator is used.
:param row: The row being checked (not used)
:param lhs: The left hand side of the operator
:param rhs: The right hand side of the operator
:return: True if lhs is in right, False otherwise
"""
if hasattr(lhs, "is_in"):
return lhs.is_in(rhs)
else:
return lhs in rhs | 17e6af712a3376fb7dcbc1b7aa50242d01b16d2b | 103,154 |
from typing import Dict
from typing import List
from typing import Set
def same_order(layer_dict: Dict[str, Dict[str, str]], all_orders: List[str]) -> int:
"""
Check how many distinct orders order some layer dictionary contains.
:param layer_dict: Layer dictionary
:param all_orders: All orders in the dictionary
:return: Amount of distinct orders in layer_dict
"""
orders: Set[str] = set()
for order in all_orders:
state_string: str = ''
for _, layers in layer_dict.items():
state_string += layers[order]
orders.add(state_string)
return len(orders) | ee9558310f8c35477c04152928a901297162469b | 103,164 |
def validate_hl_parent_id(cls, field_value):
"""
Validates the top level HL segment to ensure that the segment does not have a parent id.
This validation is utilized withing hierarchical transactions such as the 270/271 (eligibility)
and 276/277 (claims status).
"""
if field_value:
raise ValueError(f"invalid hierarchical_parent_id_number {field_value}")
return field_value | fec5f92cbb368309e623778eb4ed29b2ac8c936d | 103,167 |
def extract_samples(in_dist, **kwargs):
"""Convert using a set of values sampled from the PDF
Parameters
----------
in_dist : `qp.Ensemble`
Input distributions
Keywords
--------
size : `int`
Number of samples to generate
Returns
-------
data : `dict`
The extracted data
"""
samples = in_dist.rvs(size=kwargs.pop('size', 1000))
xvals = kwargs.pop('xvals')
return dict(samples=samples, xvals=xvals, yvals=None) | 8ff684e436140462cb8a0496faa00c8830555a9d | 103,168 |
from typing import Optional
from pathlib import Path
def _resolve_path(path: Optional[str]) -> Optional[Path]:
"""Resolves a user provided path. Returns None for empty string.
Does not confirm the file exists.
"""
if not path:
return None
return Path(path).expanduser().absolute() | 2df25a558b2849597db4180e20bf04e1054d3b2f | 103,169 |
import math
def normalize_angle(radians: float) -> float:
"""Isolates any radians angle to a radians angle between 0 and 2 PI
:param radians: The angle in radians
:return: Returns the same angle in radians, but ensures that it is between 0 and 2 PI
"""
# While the radians is less than 0, we want to continually add 2 PI to it so that it will be within 0 and 2 PI
while radians < 0:
radians += 2 * math.pi
# While the radians is greater than 2 PI, we want to continually subtract 2 PI to it so that it will be within 0 and 2 PI
while radians > 2 * math.pi:
radians -= 2 * math.pi
return radians | 61ca880fb7f1fa91473a2d16c9b9b88211a726a9 | 103,170 |
def get_rps(data_frame):
"""Calculate RPS for all requests
Args:
data_frame (DataFrame): data
Returns:
int: Requests per second
"""
from_date = data_frame.iloc[0].time
to_date = data_frame.iloc[-1].time
duration = to_date - from_date
if duration < 0:
raise ValueError(
"Incorrect time values from_date > to_data (%f > %f)" %
(from_date, to_date)
)
if duration == 0:
duration = 1
requests_count = data_frame.shape[0]
return requests_count/duration | 5b944397b82ca37f1a4ad4f0761dbc86225a8583 | 103,171 |
import yaml
import json
def safe_file_read(fileName, fileType):
"""Safely read a file so if the file isn't found it gives a helpful error
Arguments:
fileName {string} -- Name of the file that will be read
fileType {string} -- The file type of the file. Either a json, yml, or txt
Raises:
FileNotFoundError: Error for when a yml isn't found. See the var error_message for more info.
FileNotFoundError: Error for when a json isn't found. See the var error_message for more info.
FileNotFoundError: Error for when a txt isn't found. See the var error_message for more info.
TypeError: Error for when the function isn't given a supported file type.
Returns:
[object] -- Contents of the file
"""
error_message = \
"""
Make sure the following is done correctly:
1. A volume is setup with a files for config
2. The volume is linked to the right location (:/src)
3. The file name is the correct name
"""
if "yml" == fileType or "yaml" == fileType:
try:
with open(fileName, "r") as file:
content = yaml.safe_load(file)
return content
except FileNotFoundError:
raise FileNotFoundError(error_message)
elif "json" == fileType:
try:
with open(fileName, "r") as file:
content = json.load(file)
return content
except FileNotFoundError:
raise FileNotFoundError(error_message)
elif "txt" == fileType:
try:
with open(fileName, "r") as file:
content = file.read()
return content
except FileNotFoundError:
raise FileNotFoundError(error_message)
else:
raise TypeError("fileType param for safe_file_read function should be one of the following file types: yml, json, or txt") | 9d171c4ecfba45e4a85edb653a7f4e2854f5995c | 103,175 |
def pdfrompl(pl, trim=True):
"""
This function takes a list of paths and returns a path dictionary
(two-level by source and destination node).
:param pl: List of paths
:param trim: Eliminate full loops (True by default)
:return: path dictionary
"""
pd = dict()
for path in pl:
src = path[0]
dst = path[-1]
if not trim or src != dst:
pd.setdefault(src, dict())[dst] = path
return pd | 3407fa9305884e542c1c091677d8da7dcb2b81f0 | 103,177 |
from typing import Union
import pathlib
def calculate_save_location(file: Union[pathlib.Path, str], source_directory: Union[pathlib.Path, str],
output_directory: Union[pathlib.Path, str], output_format: str = 'html') -> pathlib.Path:
"""
Calculates where a file should be saved.
:param file: A pathlib.Path object pointing to the file.
:param source_directory: Where all the source files are stored.
:param output_directory: Where all the HTML fies should be placed.
:param output_format: The extension for the save location.
:return:
"""
if isinstance(file, str):
file = pathlib.Path(file)
if isinstance(source_directory, str):
source_directory = pathlib.Path(source_directory)
if isinstance(output_directory, str):
output_directory = pathlib.Path(output_directory)
directory: pathlib.Path = file.parent
start, stop = directory.parts.index(source_directory.parts[0]), directory.parts.index(source_directory.parts[-1])
pre: pathlib.Path = pathlib.Path(''.join(directory.parts[:start]))
post: pathlib.Path = pathlib.Path(''.join(directory.parts[stop + 1:]))
return pathlib.Path(pre).joinpath(output_directory).joinpath(post).joinpath(
file.stem + '.' + output_format) | d306ed8a72e944cebf10e821a48df0c9aba56212 | 103,183 |
def _has_twin(ax):
"""
Solution for detecting twin axes built on `ax`. Courtesy of
Jake Vanderplas http://stackoverflow.com/a/36209590/1340208
"""
for other_ax in ax.figure.axes:
if other_ax is ax:
continue
if other_ax.bbox.bounds == ax.bbox.bounds:
return True
return False | e1d9e44015305b0e542f62c03a4f19da6cf91b46 | 103,184 |
def _get_match_fraction(aligned_segment):
"""Get the fraction of a read matching the reference"""
matching_bases = aligned_segment.cigartuples[0][1]
return float(matching_bases)/aligned_segment.query_length | 235deb526d5f33662f5f05c36d3a3b755079296e | 103,187 |
def parse_name(text: str) -> dict:
"""Parse out a personal name in form 'last, first'. Return as a dict."""
if "," in text:
return dict(zip(['last', 'first'],
(x.strip() for x in text.split(","))))
else:
return {'last': text.strip()} | 89d226c2fac29a9847687846e97417b595dffd40 | 103,188 |
import re
def FlagStart(data):
"""Finds the first flag in a string of data and finds out information
Information: What flag is it, it's key, beginning and end
Args:
data: String of data you want to find the information about
Returns:
[flag, key, match.start(), match.end()]: if the functions finds out the information
None: if the function doesn't find out any information
"""
match = re.search("\[{2}((A:[a-zA-Z0-9]+)|(O:[a-zA-Z0-9]+)|(I:[a-zA-Z0-9]+))\]{2}", data)
if match:
flag = data[match.start() + 2 : match.start() + 3]
key = data[match.start() + 4 : match.end() - 2]
return [flag, key, match.start(), match.end()]
else:
return None | e42a9ad2b0ee089684e57dcc48d542c93b46f9ef | 103,191 |
def zip_(*arrays):
"""Groups the elements of each array at their corresponding indexes.
Useful for separate data sources that are coordinated through matching
array indexes.
Args:
arrays (list): Lists to process.
Returns:
list: Zipped list.
Example:
>>> zip_([1, 2, 3], [4, 5, 6], [7, 8, 9])
[[1, 4, 7], [2, 5, 8], [3, 6, 9]]
.. versionadded:: 1.0.0
"""
# zip returns as a list of tuples so convert to list of lists
return [list(item) for item in zip(*arrays)] | 76ee0a4f6ea278ceeec377ec0878af96d8bafef5 | 103,194 |
def funding_rate(self, symbol: str, **kwargs):
"""funding Rate history
GET /fapi/v1/fundingRate
https://binance-docs.github.io/apidocs/futures/en/#get-funding-rate-history
Args:
symbol (str, optional): the trading symbol
Keyword Args:
limit (int, optional): limit the results. Default 100; max 1000.
startTime (int, optional): Start Time.
endTime (int, optional): End Time.
If startTime and endTime are not sent, the most recent limit datas are returned.
If the number of data between startTime and endTime is larger than limit, return as startTime + limit.
In ascending order.
"""
params = {"symbol": symbol, **kwargs}
return self.query("/fapi/v1/fundingRate", params) | f76b3d8d93d34164e0397c86bb18a1eb674612eb | 103,195 |
import re
def text_normalization(text):
"""
Remove extra spaces, punctuation, HTML Tags and other special characters
Adapted From: Fake News Challenge - https://github.com/hanselowski/athene_system
:param text: string to normalize
:return: Normalized text
"""
text = re.sub(u"<p>", u"", text)
text = re.sub(u"</p>", u"", text)
text = re.sub(u'’', u"'", text)
text = re.sub(u'\u2019', u"'", text)
text = re.sub(u'”', u'"', text)
text = re.sub(u'“', u'"', text)
text = re.sub(u' ', u" ", text)
text = re.sub(u'–', u"-", text)
text = re.sub(u'\u000a', u"", text)
text = re.sub(u'\u2014', u"-", text)
text = re.sub(u'\n+$', u" ", text)
text = re.sub(u'\s+', u" ", text)
text = re.sub(u'^\s+', u"", text)
text = re.sub(u'\s+$', u"", text)
text = re.sub(u"\s{2,}", u" ", text)
text = re.sub(u"\.{2,}", u".", text)
return text | c606b2084ca4e252675e2e55c5263504e40d19a2 | 103,203 |
def gray_to_int(s: str) -> int:
"""Given a zero left-padded gray code representation of an int, return int value."""
s = s.lstrip('0')
n = int(s, 2)
mask = n >> 1
while mask != 0:
n = n ^ mask
mask = mask >> 1
return n | a9a937f5f473d7a9c3dcf3b0f3dd1194badf28e6 | 103,204 |
def parse_org_members(members_json):
""" Extract what we need from Github orgs members listing response. """
return [{'login': member['login']} for member in members_json] | 038eadc48a01bf5579acf0e8d6fcd3bba66a3623 | 103,206 |
def make_doc_func(fnm):
"""
Construct a trivial function with a docstring that includes a
specified call graph image
"""
def doc_fun(*args):
pass
doc_fun.__doc__ = """
**Call graph**
.. image:: _static/jonga/%s
:width: 20%%
:target: _static/jonga/%s\n""" % (fnm, fnm)
return doc_fun | b42770a0c3764df936e0c6f5ea47680ead737883 | 103,209 |
def nb_consecutives(x, y, dx, dy, position, color):
"""Maximum number of consecutive stones of color `color` in the board position
`position`,starting from (x,y) and using slope (dx, dy).
Parameters
----------
x: int
x-coordinate of the start position
y: int
x-coordinate of the start position
dx: int
x-coordinate slope
dy: int
y-coordinate slope
position: numpy.ndarray
position of the board
color:
what color we want the consecutive stones to be
Return
------
max_consec: int
Max. number of consecutive stones of color starting at x,y, with slope dx/dy
"""
m = len(position)
# don't start counting if you're not the first stone of a series
if ((0 <= x - dx < m) and (0 <= y - dy < m)
and position[x - dx][y - dy] == color):
return 0
# check what is the biggest nb_coordinates that you can fit in direction dx/dy
nb_consec = 0
while (0 <= x < m) and (0 <= y < m and position[x][y] == color):
nb_consec += 1
x += dx
y += dy
return nb_consec | 8524ba56c104cbf541659d7bd86a564d306dd7cd | 103,211 |
def get_total_delta_seconds(delta):
"""
Replacement for datetime.timedelta.total_seconds() for Python 2.5, 2.6
and 3.1
"""
return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10**6) / 10**6 | f241756c976c53a9c2eb172e0bb1cf0b6958fcbb | 103,214 |
def ab2mkk(a, b, tolerance=1e-12):
"""
Transforms A and B TD matrices into MK and K matrices.
Args:
a (numpy.ndarray): TD A-matrix;
b (numpy.ndarray): TD B-matrix;
tolerance (float): a tolerance for checking whether the input matrices are real;
Returns:
MK and K submatrices.
"""
if max(abs(a.imag).max(), abs(b.imag).max()) > tolerance:
raise ValueError("A- and/or B-matrixes are complex-valued: no transform is possible")
a, b = a.real, b.real
tdhf_k, tdhf_m = a - b, a + b
tdhf_mk = tdhf_m.dot(tdhf_k)
return tdhf_mk, tdhf_k | b3ac4040acebc9c95f11cf4686d5f07cba763ea6 | 103,218 |
def version_split(s, delimiters={"=", ">", "<", "~"}):
"""Split the string by the version:
mypacakge<=2.4,==2.4 -> (mypacakge, <=2.4,==2.4)
In [40]: version_split("asdsda>=2.4,==2")
Out[40]: ('asdsda', ['>=2.4', '==2'])
In [41]: version_split("asdsda>=2.4")
Out[41]: ('asdsda', ['>=2.4'])
In [42]: version_split("asdsda")
Out[42]: ('asdsda', [])
"""
for i, c in enumerate(s):
if c in delimiters:
return (s[:i], s[i:].split(","))
return (s, []) | 16b46a14723e6ea22f689f1fc729da055f984a75 | 103,223 |
from pathlib import Path
def ensure_path(path):
"""Ensure `path` is an Path object (or None)."""
if path is None:
return path
if not isinstance(path, Path):
path = Path(path)
return path | 057c7d2391ef6c36749b43d6b965b96e2550f663 | 103,224 |
def fib(n):
"""
Takes a term `n` and returns `fib(n)`.
:param n: the term for the sequence
:return: fib(n)
"""
if n == 1:
return 1
elif n == 2:
return 2
else:
return fib(n - 1) + fib(n - 2) | 1889e7cebd33a6507d87d1221d2214346bede3ae | 103,225 |
def _add(a, b):
"""Adds a and b, where a and b are ints, "infinity", or "-infinity"."""
if a in ("infinity", "-infinity"):
a, b = b, a
if b == "infinity":
assert a != "-infinity"
return "infinity"
if b == "-infinity":
assert a != "infinity"
return "-infinity"
return int(a) + int(b) | f3a5e88968b4a2ccc8bad44d88499687d16098b6 | 103,227 |
import six
def format_expose(expose):
"""
Converts a port number or multiple port numbers, as used in the Dockerfile ``EXPOSE`` command, to a tuple.
:param: Port numbers, can be as integer, string, or a list/tuple of those.
:type expose: int, unicode, list, or tuple
:return: A tuple, to be separated by spaces before inserting in a Dockerfile.
:rtype: tuple
"""
if isinstance(expose, (list, tuple)):
return map(six.text_type, expose)
elif isinstance(expose, six.string_types):
return expose,
return six.text_type(expose), | 0286ed62c3d3a859a53fd3f5af627e0b0ecb0891 | 103,229 |
def _map_authorhip_to_paragraphs(labels_paragraph_author):
"""Map authorship labels per document to a binary label determining whether two paragraphs have
the same author. Return a list of labels per document and tuples per document, containing the
indices of the compared paragraphs. Used in task 3."""
paragraph_pairs = []
labels = []
for author_list in labels_paragraph_author:
curr_para_pairs = []
curr_labels = []
n_para = len(author_list)
for i in range(n_para - 1):
for j in range(i + 1, n_para):
curr_para_pairs.append((i, j))
if author_list[i] == author_list[j]:
curr_labels.append(1)
else:
curr_labels.append(0)
paragraph_pairs.append(curr_para_pairs)
labels.append(curr_labels)
return labels, paragraph_pairs | 3ba798dcf6392d2d421a03f87fa980063d142e24 | 103,230 |
from typing import List
import math
def sqrt_frac(n: int) -> List:
"""
Returns the continued fraction representation of sqrt(n)
in the form [i; a_1, a_2, ..., a_n]
Thanks to:
https://math.stackexchange.com/questions/1198692/continued-fraction-expansion-for-%E2%88%9A7
:param n: non-negative integer
:return: continued fraction
"""
if not n >= 0:
raise ValueError
whole = math.floor(math.sqrt(n))
array = [whole]
if whole - math.sqrt(n) == 0:
return array
# x/(sqrt(a) + b)
first = (1 , n, -whole)
rad = first
while True:
# (sqrt(a) + b)/x
flipped = [rad[1], -rad[2], (rad[1] - rad[2]**2)//rad[0]]
whole = math.floor((math.sqrt(flipped[0])+flipped[1])/flipped[2])
flipped[1] -= whole*flipped[2]
array.append(whole)
rad = (flipped[2], flipped[0], flipped[1])
if rad == first:
break
return array | 260a8d67324959c0283dcb5ef524c8236c3efa53 | 103,240 |
def trapezoid_integrate(f, a, b, N, points):
"""
Computes the integral value from a to b for function f using Trapezoid method.
Parameters
----------
f : callable
The integrating function
a,b : float
The min and max value of the integrating interval
N : int
Number of partitions.
points : ndarray
The partition points
Returns
-------
return_value : float
The value of the integral from a to b of the function f
"""
total_sum = 0
total_sum += f(points[0]) + f(points[N])
first_sum = 0.0
for i in range(1, N):
first_sum += f([points[i]])
first_sum *= 2
total_sum += float(first_sum)
interval_length = b - a
denominator = 2 * N
return (interval_length / denominator) * total_sum | 5917230a5569a9f3a909084ae69c50e9b3748be8 | 103,242 |
def _axis_label_from_meta(
pdata: dict,
**kwargs
) -> str:
"""Generates an automated axis label from data
using system name, variable name and variable unit.
Args:
pdata: plot data structure
Returns:
str - axis label
"""
return f"{pdata['system']} {pdata['name']} [{pdata['var_unit']}]" | ac91cbccb2fa879513c5e99f425d796bf153793d | 103,244 |
def get_start_position(grid_data, clue_num):
"""Find the start co-ordinates for a particular clue number in ipuz data."""
for y, row in enumerate(grid_data):
for x, cell in enumerate(row):
if cell == clue_num:
return {'x': x, 'y': y}
return None | 311006f8f202ed675b3a1acbd0b10541ac02957d | 103,245 |
import glob
def get_n_files(fname='wgl_1.txt', in_dir='.'):
"""Return the number of cross-correlation ascii files in
subdirectories.
"""
files = glob.glob('{}/*/{}'.format(in_dir, fname))
return len(files) | 0642d4aced8b13608083f7c7f4c8cda220a85867 | 103,248 |
import math
def check_nan(dict_):
"""
Check nan for a dict.
>>> dict_ = {'a': 0, 'b': 1, 'c': 0, 'd': 10}
>>> check_nan(dict_)
True
>>> from math import nan
>>> check_nan({'a': 0, 'b': nan, 'c': 0, 'd': nan})
ArithmeticError: We find the loss(es) [b, d] nan in PairwiseDis.
"""
# check nan
flag_nan = [ key for key, loss in dict_.items() if math.isnan(loss) ]
if len(flag_nan) > 0:
str_keys_nan = ', '.join(flag_nan)
raise ArithmeticError(f'We find the loss(es) [{str_keys_nan}] nan.')
return True | bb3ab871272985f9379b92ae814ec223ec8bd657 | 103,249 |
def get_format_invocation(f, clang_format_binary, fix, warnings_as_errors, quiet):
"""Gets a command line for clang-format."""
start = [clang_format_binary]
if fix:
start.append('-i')
else:
start.append('--dry-run')
if warnings_as_errors:
start.append('--Werror')
start.append(f)
return start | 9c8c451db8b914a7fef034432f51f056f87bb076 | 103,250 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.