content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def translate(s: str, src: str, dest: str) -> str:
"""
Converts characters from `s` that appear in `src` with the
characters at corresponding positions in `dest`
"""
if len(src) != len(dest):
raise RuntimeError("impossible error")
for a, b in zip(src, dest):
s = s.replace(a, b)
return s
|
52695fd7a56f4f2ba0a91f8f64683e6bda3d0b1a
| 79,436
|
def dotProduct(listA, listB):
"""
listA: a list of numbers
listB: a list of numbers of the same length as listB
Returns the dot product of all the numbers in the lists.
"""
dotProd = 0
for num in range(len(listA)):
prod = listA[num] * listB[num]
dotProd = dotProd + prod
return dotProd
|
623fc234f90a4e54aa1a847616faf6e140cc4e61
| 79,437
|
def dias_perdidos_por_fumar(cigarros_fumados_por_dia, anos_fumando):
""" Recebe uma quantidade de cigarros fumados por dia e a quantidade
de anos que fuma, e retorna o total de dias perdidos, sabendo que
cada cigarro reduz a vida em 10 minutos."""
dias = anos_fumando * 365
cigarrosfumados = dias * cigarros_fumados_por_dia
minutos_perdidos_por_fumar = cigarrosfumados * 10
dias_perdidos_por_fumar = minutos_perdidos_por_fumar / 1440
return round(dias_perdidos_por_fumar, 2)
|
17f44f68bf62e33fde039f28f36217254cf74057
| 79,440
|
def sqlite3_column_affinity(column_type):
"""Return the sqlite3 column affinity corresponding to a type string."""
ct = column_type.lower()
if "int" in ct:
return "INTEGER"
elif "char" in ct or "clob" in ct or "text" in ct:
return "TEXT"
elif "blob" in ct or ct == "":
return "NONE"
elif "real" in ct or "floa" in ct or "doub" in ct:
return "REAL"
else:
return "NUMERIC"
|
5a0320464be31d6bcdfd9906a59f4bec3d6d69b6
| 79,441
|
def ip_family(address):
"""Return the ip family for the address
:param: address: ip address string or ip address object
:return: the ip family
:rtype: int
"""
if hasattr(address, 'version'):
return address.version
if '.' in address:
return 4
elif ':' in address:
return 6
else:
raise ValueError("Invalid IP: {}".format(address))
|
c6ebd78401834dca4f1d4fc8ceb9c227efc4d5bd
| 79,442
|
import re
def get_substr(st, de1, de2, strip=True):
"""
Return the first occurrence of a substring between delimiters delim1 and
delim2 from a string. If either delimiter isn't given, return substring
from start or until the end of string. If strip is set true, strip the
result. If nothing is found, return None.
:param st: String from which we search for substr
:param de1: First delimiter (string)
:param de2: Second delimiter (string)
:param strip: Toggle result stripping (boolean)
:return: String or None
"""
if not st or st == '' or not isinstance(strip, bool):
raise ValueError
res = re.search(
(de1 if de1 else '') + '(.*)' + (de2 if de2 else ''), st)
if not res:
return None
ret = str(res.group(1))
if strip:
ret = ret.strip()
return ret
|
0e576a91bacbaef64a82c1d3b1bab7dd49f92144
| 79,446
|
import math
def _decode_box_encoding(box_encoding, anchor, image_size):
"""Decodes bounding box encoding.
Args:
box_encoding: a tuple of 4 floats.
anchor: a tuple of 4 floats.
image_size: a tuple of 2 ints, (width, height)
Returns:
A tuple of 4 integer, in the order of (left, upper, right, lower).
"""
assert len(box_encoding) == 4
assert len(anchor) == 4
y_scale = 10.0
x_scale = 10.0
height_scale = 5.0
width_scale = 5.0
rel_y_translation = box_encoding[0] / y_scale
rel_x_translation = box_encoding[1] / x_scale
rel_height_dilation = box_encoding[2] / height_scale
rel_width_dilation = box_encoding[3] / width_scale
anchor_ymin, anchor_xmin, anchor_ymax, anchor_xmax = anchor
anchor_ycenter = (anchor_ymax + anchor_ymin) / 2
anchor_xcenter = (anchor_xmax + anchor_xmin) / 2
anchor_height = anchor_ymax - anchor_ymin
anchor_width = anchor_xmax - anchor_xmin
ycenter = anchor_ycenter + anchor_height * rel_y_translation
xcenter = anchor_xcenter + anchor_width * rel_x_translation
height = math.exp(rel_height_dilation) * anchor_height
width = math.exp(rel_width_dilation) * anchor_width
image_width, image_height = image_size
x0 = int(max(0.0, xcenter - width / 2) * image_width)
y0 = int(max(0.0, ycenter - height / 2) * image_height)
x1 = int(min(1.0, xcenter + width / 2) * image_width)
y1 = int(min(1.0, ycenter + height / 2) * image_height)
return (x0, y0, x1 - x0, y1 - y0)
|
8b4bb1d6cdc00065649abe2167249cdfde482269
| 79,448
|
def clean_countries(df):
"""Clean names and fix repeats."""
# strip leading and trail spaces in country name
df["country"] = df["country"].str.strip()
# fix repeat countries
df = df.replace({"United States": "USA"})
return df
|
63115c6afd655a0259366a3388b7076fadd3cf6c
| 79,453
|
import requests
import time
def safeRequest(url, recieve_timeout=10):
""" Gets stuff for the internet, with timeouts and size restrictions """
# Returns (Response, File)
max_size = 25000000 # won't download more than 25MB
try:
r = requests.get(url, stream=True, timeout=recieve_timeout, headers={'User-Agent': 'Throat/1 (Phuks)'})
except:
raise ValueError('error fetching')
r.raise_for_status()
if int(r.headers.get('Content-Length', 1)) > max_size:
raise ValueError('response too large')
size = 0
start = time.time()
f = b''
for chunk in r.iter_content(1024):
if time.time() - start > recieve_timeout:
raise ValueError('timeout reached')
size += len(chunk)
f += chunk
if size > max_size:
raise ValueError('response too large')
return r, f
|
c60149979136bb7b1ecb4807175d1a4a67fad5de
| 79,455
|
import hashlib
import base64
def hashed(source_filename, prepared_options, thumbnail_extension, **kwargs):
"""
Generate a short hashed thumbnail filename.
Creates a 12 character url-safe base64 sha1 filename (plus the extension),
for example: ``6qW1buHgLaZ9.jpg``.
"""
parts = ':'.join([source_filename] + prepared_options)
short_sha = hashlib.sha1(parts.encode('utf-8')).digest()
short_hash = base64.urlsafe_b64encode(short_sha[:9]).decode('utf-8')
return '.'.join([short_hash, thumbnail_extension])
|
fbb5b5a018973460a73f5f6b48a6ee9ef353425d
| 79,460
|
def entity_exists_in_hass(hass, entity_id):
"""Check that an entity exists."""
return hass.states.get(entity_id) is not None
|
58989559a89ccc218fdd7ae188351d57bf0a6661
| 79,461
|
def get_header(hdr_tuples, name):
"""
Given a list of header tuples, returns a list of the tuples with
the given name (lowercase).
"""
return [i for i in hdr_tuples if i[0].lower() == name]
|
ec5269d9128a884e7b6fe6671db2084d8cc1d304
| 79,463
|
def create_metrics(bpm, beats, both, dur, num_beats):
"""
Creates metrics dictionary
Args:
bpm: average beats per minute
beats: array of times when beats occur
both: vector containing minimum and maximum voltage
dur: duration of the ECG signal
num_beats: number of beats that occurred during the signal
Returns:
metrics: dictionary of appropriate metrics from signal
"""
metrics = {
"mean_hr_bpm": bpm,
"voltage extremes": both,
"duration": dur,
"num_beats": num_beats,
"beats": beats
}
return metrics
|
7624baa0814e8216c729574e863c9a31685dc323
| 79,465
|
def get_dataset_filename(ds_dict):
"""Figure out the downloaded filename for a dataset entry
if a `file_name` key is present, use this,
otherwise, use the last component of the `url`
Returns the filename
Examples
--------
>>> ds_dict = {'url': 'http://example.com/path/to/file.txt'}
>>> get_dataset_filename(ds_dict)
'file.txt'
>>> ds_dict['file_name'] = 'new_filename.blob'
>>> get_dataset_filename(ds_dict)
'new_filename.blob'
"""
file_name = ds_dict.get('file_name', None)
url = ds_dict.get('url', [])
if file_name is None:
file_name = url.split("/")[-1]
return file_name
|
4fc91641d80323266f924423cae6a1d06f64584e
| 79,472
|
import re
def is_emoji(text: str) -> bool:
""" Checks if a given string is an emoji. """
emoji_pattern = re.compile(
u'([\U0001F1E6-\U0001F1FF]{2})|' # flags
u'([\U0001F600-\U0001F64F])' # emoticons
"+", flags=re.UNICODE)
if not re.search(emoji_pattern, text):
return False
return True
|
57dfaf272541c959a3cfe1ac84778ccecd258e99
| 79,473
|
import yaml
def yamlfmt(dict_obj):
"""Convert a dictionary object into a yaml formated string"""
return yaml.dump(dict_obj, default_flow_style=False)
|
c18d6fd58bf580306f8e9f61f207254407258e90
| 79,476
|
def to_python(num):
"""
Convert a string passed in from AppleScript to a Python number.
http://stackoverflow.com/questions/379906/python-parse-string-to-float-or-int | Javier
"""
try:
return int(num)
except ValueError:
return float(num)
|
19be28b0167defa321eb13e8ccb3b96f6f46bf98
| 79,485
|
def get_iou(box1, box2):
"""Computes the value of intersection over union (IoU) of two boxes.
Args:
box1 (array): first box
box2 (array): second box
Returns:
float: IoU value
"""
b1_x1, b1_y1, b1_x2, b1_y2 = tuple(box1)
b2_x1, b2_y1, b2_x2, b2_y2 = tuple(box2)
xA = max(b1_x1, b2_x1)
yA = max(b1_y1, b2_y1)
xB = min(b1_x2, b2_x2)
yB = min(b1_y2, b2_y2)
interArea = max(0, xB - xA) * max(0, yB - yA)
if interArea == 0:
iou = 0
else:
box1Area = (b1_x2 - b1_x1) * (b1_y2 - b1_y1)
box2Area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1)
iou = interArea / (box1Area + box2Area - interArea)
return iou
|
1274cd18bdd40e892d7a30b3d29bbf4deab4e804
| 79,487
|
def get_block_signature(img, left, top, right, bottom):
"""
Get an image block's signature.
The signature is a list of the running indices of the black pixels
in the block.
``left``, ``top``, ``right`` and ``bottom`` specify the borders of
the block. ``left`` and ``top`` are inclusive, ``right`` and
``bottom`` are exclusive.
"""
columns = img.size[0]
data = img.getdata()
sig = []
width = right - left
for y in range(top, bottom):
for x in range(left, right):
index = x + y * columns
if not data[index]:
sig.append((x - left) + width * (y - top))
return tuple(sig)
|
6fff4c97d13c8c4da45ffbe721c62a8cb0c17775
| 79,489
|
def _num_unique_words(words):
""" Count unique number of words per song
"""
return len(set(words))
|
e8c3ce3b3c6fa0c4d369a4cca84a3062e1e8ae9d
| 79,494
|
def build_maze(raw_maze):
"""
Builds the 2D-list representation of a maze from its string form.
Args:
raw_maze: the maze in its string form as read from the user or its file
Returns:
The 2D grid that represents a maze
"""
grid = raw_maze.splitlines()
for i in range(len(grid)):
grid[i] = list(grid[i])
return grid
|
ce759ef154d4a4c9f568370ca062e42a95dadc10
| 79,495
|
def win_ts_to_unix_epoch(high, low):
"""Convert Windows timestamp to POSIX timestamp.
See https://goo.gl/VVX0nk
Args:
high (int): high 32 bits of windows timestamp.
low (int): low 32 bits of windows timestamp.
Returns:
float
"""
return high * ((2 ** 32) / 1e9) + low / 1e9 - 11644473600
|
6f309ec4255dd8063814ad329984f0d80afd6b36
| 79,499
|
def get_input( filename ):
"""
The input file contains boarding passes in each line.
This function returns a list of boarding passes.
"""
with open(filename,'r') as input_file:
inputs = input_file.read().splitlines()
return inputs
|
cd4c439c7fbef0e9c81a06142e7b66d592efedb4
| 79,500
|
def author_is_admin(author):
"""Checks if the author is an administrator
Args:
author (discord.Member): Discord member object
Returns:
Boolean: If they are an administrator
"""
return author.guild_permissions.administrator
|
10989ec8968c74497f2d2ab0ac5d4fabd90130b6
| 79,507
|
def join_and_sort(*argv):
""" Takes variable number of traces and joins them to one sorted and keyed list."""
joined_trace = list()
for trace in argv:
joined_trace += trace
joined_trace.sort(key=lambda tup: tup[0])
return joined_trace
|
88a1230e4d609c00e0645d16e7eae73424fbbb4a
| 79,511
|
def _is_valid_social_username(value):
"""
Given a particular string, returns whether the string can be considered a safe username.
This is a very liberal validation step, simply assuring forward slashes do not exist
in the username.
"""
return '/' not in value
|
258d4fff37cc996a0829be88a709460a8495eccc
| 79,514
|
def CreateFixture(class_name, params):
"""Initializes a fixture instance.
Imports a fixture module based on class_name and initializes the
instance using params.
Args:
class_name: fixture's import path under cros.factory.test.fixture +
module name. For example,
"dummy_bft_fixture.DummyBFTFixture".
Then cros.factory.test.fixture.dummy_bft_fixture.DummyBFTFixture will be
used.
params: a dict of params for the contructor.
Returns:
An instance of the specified fixture implementation.
"""
module, cls = class_name.rsplit('.', 1)
module = 'cros.factory.test.fixture.%s' % module
fixture = getattr(__import__(module, fromlist=[cls]), cls)(**params)
return fixture
|
49417f55d674adf89d8c86cd8a8bf2d5062d842d
| 79,517
|
def fetch_columns_with_error_check(df, columns):
"""Slice columns from df and error check that they all exist"""
# Extract
res = df.loc[:, columns].copy()
res.columns = res.columns.remove_unused_levels()
# Error check
# A missing label will be silently ignored above
assert sorted(res.columns.levels[0]) == sorted(columns)
return res
|
0239fa0b84ee7c2650d4bd789a7088e2243171da
| 79,527
|
def sortDictionaryListByKey(dictList, key, reverse=False):
"""
_sortDictionaryListByKey_
Given a list of dictionaries and a key with a numerical
value, sort that dictionary in order of that key's value.
NOTE: If the key does not exist, this will not raise an exception
This is because this is used for sorting of performance histograms
And not all histograms have the same value
"""
return sorted(dictList, key=lambda k: k.get(key, 0.0), reverse=reverse)
|
cf6901a3c6b5a3545538fa887c560375ee6c54ee
| 79,530
|
def _user_exists(username):
""" Check if the given username exists as a system user
"""
with open('/etc/passwd', 'r') as f:
ep = f.read()
return ep.find(username) > 0
|
df1cfe5bc0a9351b3cd4210950541ad7f35a0c12
| 79,534
|
def generate_maximum_value_stat_property(name):
"""
Generate a property instance that can be used to represent an entity's
maximum value stat.
Arguments
---------
name : str
The name of the entity value to represent the maximum stat ofS.
Returns
-------
property
The generated property.
"""
@property
def maximum_value_stat_property(entity):
return getattr(entity, "_maximum_" + name)
@maximum_value_stat_property.setter
def maximum_value_stat_property(entity, value):
setattr(entity, "_maximum_" + name, value)
if not hasattr(entity, name) or value < getattr(entity, name):
setattr(entity, name, value)
return maximum_value_stat_property
|
524d670646a3e70b38397db1c3a20824757f4f15
| 79,536
|
def _parse_iraf_style_section(header_string):
"""
Parse IRAF/NOAO-style data sections to Python indices.
:param header_string:
The IRAF/NOAO-style data section string (e.g., [1:2048,1:4608]).
:type header_string:
str
"""
indices = []
dimensions = header_string.strip("[] ").split(",")
for dimension in dimensions:
start_pixel, end_pixel = map(int, dimension.split(":"))
# These pixels are inclusively marked.
start_index, end_index = start_pixel - 1, end_pixel
indices.append([start_index, end_index])
# IRAF/NOAO give the image shape the wrong way around
return indices[::-1]
|
180e881dd8bc5c85f253371e65127b371fad8003
| 79,537
|
def html_rep(obj):
"""Format an object in an html-friendly way. That is; convert any < and > characters
found in the str() representation to the corresponding html-escape sequence."""
s = str(obj)
if s == '':
s = repr(obj)
return s.strip().replace('<','<').replace('>', '>')
|
e03669fd94a16f53d2e072eb627a99a4494afe0f
| 79,539
|
def _clean_train_id_b4_join(train_id):
"""Clean train_id before joining to a path."""
if train_id.startswith("./") or train_id.startswith(".\\"):
return train_id[2:]
return train_id
|
a8ed22b5f1f8952187fdc9261a900b888a4f78fd
| 79,541
|
def count_outliers(outlier_tuple):
"""Counts the number of tests that identified a datapoint as an outlier
"""
outlier_count = 0
for item in outlier_tuple:
if item == -1:
outlier_count += 1
return outlier_count
|
3b1d6415153797b0adacffd78953f88cf74ab901
| 79,543
|
import json
def parse_json(text_line):
"""Return the well-formed JSON lines, otherwise, retrun None."""
try:
return(json.loads(text_line))
except ValueError:
return(None)
|
79aed227fd503419afb73da5c98b3c3cf16681ed
| 79,545
|
def find_dict(l, key, value):
"""Find dict d from list l where key matches value. Return index"""
for i, d in enumerate(l):
if d[key] == value:
return i
|
1f7b379c01f544cd72781959cda671ff72af252a
| 79,546
|
from pathlib import Path
def filelist(dirpath='.', string='*'):
"""Returns a list with all the files containg `string` in its name.
Note:
List is sorted by the filename.
Args:
dirpath (str or pathlib.Path, optional): list with full file directory
paths.
string (str, optional): string to look for in file names.
Return:
list
See Also:
:py:func:`parsed_filelist`
"""
dirpath = Path(dirpath)
if '*' not in string:
string = '*' + string + '*'
temp = list(dirpath.glob(string))
temp2 = [filepath.name for filepath in temp]
return [x for _,x in sorted(zip(temp2,temp))]
|
4340ff93073425975aa2a5f14437767c3d6077d5
| 79,548
|
def reverse_dict(dict_):
"""reverse dictionary
Example:
>>> d = {'iso': ['a', 'b'], 'iso2': ['b']}
>>> reverse_dict(d)
{'a': ['iso'], 'b': ['iso', 'iso2']}
"""
_ = {}
for k, v in dict_.items():
for vv in list(v):
_.setdefault(vv, []).append(k)
return _
|
730750609d35dccae444806b40e197ea9ac349f1
| 79,550
|
def count_number_of_records(db_filename):
"""Count the number of records in `db_filename`."""
with open(db_filename, 'rt') as f:
return len(
[line for line in f.readlines()
if line.startswith('record(')]
)
|
f5bf484e78d6230e11ebd39b0582b0eecfd6c9b3
| 79,553
|
def get_transaction_dicts(tdb_obj_list, include_raw_data=False):
"""Derive a list of transaction dicts from an object list
:param tdb_obj_list: List of transaction objects
:type tdb_obj_list: `list`
:param include_raw_data: true/false to include raw transactions
:type include_raw_data: `bool`
:return: List of transaction dicts
:rtype: `list`
"""
return [tdb_obj.to_dict(include_raw_data) for tdb_obj in tdb_obj_list]
|
5bf4952b77343828f46595d9c01aaf9ce7ed6ddd
| 79,556
|
def rounder(num, digits=None):
"""Round a floating point number to given number of digits after the
decimal point.
Parameters
----------
num : float
Number to round.
digits : int, optional
Digits after the decimal point.
Returns
-------
int or float
Rounded number.
See Also
--------
initize
Notes
-----
Variant of `intize`, allowing rounding to custom decimal precision.
"""
near = round(num * 2, digits) / 2
if abs(num - near) <= (1e-7 / 10 ** digits if digits else 1e-7):
return round(near, digits)
else:
return round(num, digits)
|
110aeb6a6e0e54ba4fb9c1aefa8fbd43b2987130
| 79,560
|
from typing import Union
from typing import List
from typing import Dict
def values(data: Union[int, float, str, bool, List, Dict[str, float]], **config):
"""
Constructs a values Field Spec
:param data: to use to supply values
:param config: in **kwargs format
:return: the values spec
"""
spec = {
"type": "values",
"data": data
}
if len(config) > 0:
spec['config'] = config
return spec
|
2becc8995f7042af770f10b90513ddafba94ab46
| 79,564
|
def get_status(result):
"""
Return the status of the solution from the result object
:param result: a pyomo result object
:return: the status
"""
return str(result.solver.termination_condition)
|
20318274b78b9804e71d49b77be58d5ad947914d
| 79,565
|
def get_query_features_df(df_samples, queries):
"""Apply queries on df_samples and return a boolean feature dataframe.
If X = df_bool.values, then X[i, j] is True iff condition j is true for sample i
"""
df_bool = df_samples[[]].copy()
for query in queries:
try:
df_bool[query] = df_samples.eval(query)
except Exception as e:
print(query, e)
raise
return df_bool
|
173508348fc8732b4960551cc4047d12c9889b95
| 79,566
|
def remove_smallwords(tokens, smallwords_threshold: int) -> list:
"""
Function that removes words which length is below a threshold
["hello", "my", "name", "is", "John", "Doe"] --> ["hello","name","John","Doe"]
Parameters
----------
text : list
list of strings
smallwords_threshold: int
threshold of small word
Returns
-------
list
"""
tokens = [word for word in tokens if len(word) > smallwords_threshold]
return tokens
|
0f824214d621a6c945c6cf30f6c25b5eba3d6a12
| 79,569
|
def minor(release):
"""Add a suffix to release, making 'X.Y' become 'X.Y.Z'."""
return release + ".1"
|
d8fb13edb617f5b782a5298e39a4969e2652fe42
| 79,570
|
def serial_number(unique_id):
"""Translate unique ID into a serial number string."""
serial_number = str(unique_id)
return serial_number[:4] + '-' + serial_number[4:]
|
64de94b7f78fd36527739db2227ac5894cdc49fb
| 79,572
|
def is_public(element):
"""Check if element's (Function or Event) visibility is public"""
return element.visibility == "public"
|
c524cbf62cee459fd859de7ec769a66cfcc7b36e
| 79,577
|
def join_tweets_users(df_tweet, df_user):
""" Joins a dataframe of tweets with a dataframe of users by matching user ids to tweets
Args:
df_tweet (DataFrame): A dataframe containing tweet text with user ids
df_user (DataFrame): A dataframe containing user name and id
Returns:
df_tweet_user (DataFrame): A dataframe containing tweet text, user id, and user name
"""
df_tweet_user = df_tweet.merge(df_user, on='user_id', how='left')
return df_tweet_user
|
74c520f90b05e2989496c50ddfade94bbd3ce483
| 79,579
|
def to_2dp(fn):
"""Converts pence (e.g. 1000) to 2dp (e.g. 10.00)."""
return lambda *a, **k: '%.02f' % (float(fn(*a, **k)) / 100.0)
|
3ee90c0f1be10c3d0aa4d70eace785a6b7ed1054
| 79,582
|
def sort_dict_by_value(collection: dict, reverse: bool=True) -> list:
"""Sort a dictionary by the values inside, returns a list of keys
Args:
collection (dict(str, int)): A dict of string, integer pairs, where the string is the key and the integer is the value
reverse (bool, optional): reverses the sort, True is descending. Defaults to True.
Returns:
list(str): List of strings sorted by the corresponding values from the 'collection' parameter
"""
return [k for k, _ in sorted(collection.items(), key=lambda item: item[1], reverse=reverse)]
|
3d56d5b889817061e258f1d9328443e9634fa690
| 79,583
|
def _is_combinator_subset_of(specific, general, is_first=True):
"""Return whether `specific` matches a non-strict subset of what `general`
matches.
"""
if is_first and general == ' ':
# First selector always has a space to mean "descendent of root", which
# still holds if any other selector appears above it
return True
if specific == general:
return True
if specific == '>' and general == ' ':
return True
if specific == '+' and general == '~':
return True
return False
|
c35b4dfd9692980e036537b9a4007dfa634e0781
| 79,587
|
def get_resolution(wofls):
"""Get the resolution for a WOfLs product."""
resolutions = {
'ga_ls_wo_3': (-30, 30),
'wofs_albers': (-25, 25),
'ga_s2_wo_3': (-10, 10),
}
return resolutions[wofls]
|
45d45585f0ae1f60e3089a6e2f4fe771327b3abe
| 79,588
|
def is_local_host(location):
"""
:param location: Location string in the format ip[/slot[/port]].
:returns: True if ip represents localhost or offilne else return False.
"""
return any(x in location.lower() for x in ('localhost', '127.0.0.1', 'offline', 'null'))
|
f6f88ef64facb21b3a3b24fed9887e5576a5c25b
| 79,589
|
from typing import List
def average(l: List[float]) -> float:
"""Average of a list of numbers"""
n = len(l)
if n == 0:
return 0
return sum(l) / n
|
715d455608a5c55aa8699bfc6823aa8c96a1fdf3
| 79,591
|
import tempfile
def temp(ext="jpg"):
"""Create a temporary image with the given extension"""
if ext[0] == ".":
ext = ext[1:]
return tempfile.mktemp() + "." + ext
|
d6dce0619f18cc819af5bfa048e0ae3514a711f4
| 79,592
|
def maybe_singleton(py_object):
"""Returns `True` if `py_object` might be a singleton value .
Many immutable values in python act like singletons: small ints, some strings,
Bools, None, the empty tuple.
We can't rely on looking these up by their `id()` to find their name or
duplicates.
This function checks if the object is one of those maybe singleton values.
Args:
py_object: the object to check.
Returns:
A bool, True if the object might be a singleton.
"""
# isinstance accepts nested tuples of types.
immutable_types = (int, str, bytes, float, complex, bool, type(None))
is_immutable_type = isinstance(py_object, immutable_types)
# Check if the object is the empty tuple.
return is_immutable_type or py_object is () # pylint: disable=literal-comparison
|
289612258c9e4a26066e1d0edd48157c55d067b7
| 79,596
|
def Elastic(m1, m2, v1, v2):
"""Returns a tuple of the velocities resulting from a perfectly
elastic collision between masses m1 traveling at v1 and m2
traveling at v2. This simultaneously conserves momentum and energy."""
vp2 = ((m2 * v2) + (m1 * ((2.0 * v1) - v2))) / (m1 + m2)
vp1 = vp2 + v2 - v1
return vp1, vp2
|
96016a25ed40e0842be6796aa820fe46b862c8c6
| 79,598
|
def check_polygon_in_band(polygon, lat_min, lat_max):
"""
Check to see whether any part of a given polygon is inside a given latitude band.
Parameters
----------
polygon : pygpplates polygon
lat_min : float
the minimum latitude of the latitude band
lat_max : float
the maximum latitude of the latitude band
Returns
-------
in_band : boolean
True if inside, False if outside
"""
# pull out lat/lon vertices
lat_lon_array = polygon.to_lat_lon_array()
lats = lat_lon_array[:,0]
# check to see if any part of the polygon falls into our latitude band
in_band = False
for j in range(len(lats)):
if lats[j]>lat_min and lats[j]<lat_max:
in_band = True
break
return in_band
|
bedb60d574079f59bf709d4127c74f8722f0bd00
| 79,599
|
import hashlib
def hash(file) -> str:
"""Returns SHA256 of first 2MB of file contents"""
BUF_SIZE = 2000000
with open(file, 'rb') as f:
data = f.read(BUF_SIZE)
if data:
sha256 = hashlib.sha256()
sha256.update(data)
return sha256.hexdigest()
return ""
|
8a183cbb196b8a1d538cc0017d26751540a46805
| 79,600
|
def mean_metal_z(z):
"""
Mean (mass-weighted) metallicity as a function of redshift
From Madau & Fragos 2017, courtesy of Mike Zevin
Parameters
----------
z : `float or numpy.array`
redshift
Returns
-------
Z : `float or numpy.array`
mean metallicity
"""
Zsun = 0.017
log_Z_Zsun = 0.153 - 0.074 * z ** 1.34
Z = 10 ** log_Z_Zsun * Zsun
return Z
|
9149f5e5de4febe7f980442ce8989a7e1c91a365
| 79,601
|
def is_prime(num):
"""
Returns true is the number is prime.
To check if a number is prime, we need to check
all its divisors upto only sqrt(number).
Reference: https://stackoverflow.com/a/5811176/7213370
"""
# corner case. 1 is not a prime
if num == 1:
return False
# check if for a divisor upto sqrt(number)
i = 2
while i * i <= num:
if num % i == 0:
return False
i += 1
return True
|
590dec3fbebd5647647a8835fd032db14185b4b1
| 79,608
|
def find_all_images(rootdir):
""" Find all the images under rootdir """
return (p for p in sorted(rootdir.iterdir())
if p.is_file() and p.suffix in ('.tif', '.png'))
|
fc1ceca803e0e27307ba28664f02b8f54dde67b0
| 79,613
|
def format_function_call(func_name, *args, **kwargs):
"""
Formats a function of a PipelineStage or Dataset object to ensure proper
recording of the function and its arguments. args and kwargs should be
exactly those passed to the function.
Parameters
----------
func_name : str
Name of the stage
Returns
-------
str
Formatted function call
"""
out_str = func_name + '('
if len(args) != 0:
for a in args:
out_str += (str)(a) + ', '
if len(kwargs.keys()) != 0:
for k in kwargs.keys():
out_str += ((str)(k) + '=' + (str)(kwargs[k]) + ', ')
if out_str[-2] == ',':
out_str = out_str[:-2]
out_str += ')'
return out_str
|
ed249a14b162274460fe2709dbd7846bb7f8e40f
| 79,614
|
import struct
def _Unpack(content, offset, format_string):
"""Unpack the content at the offset.
Args:
content: String to be unpacked.
offset: Offset from the beginning of the content.
format_string: Format string of struct module.
Returns:
See struct.unpack.
"""
size = struct.calcsize(format_string)
result = struct.unpack(format_string, content[offset : offset + size])
return result
|
54acdb7d9e529c4e662c77fb172bd5360f4833a9
| 79,615
|
def to_camel_case(text):
"""
Convert string with underscores to camel case.
.. note:: Source StackOverflow <https://stackoverflow.com/a/19053800>`_
:param str text: The string to convert
:rtype: str
"""
components = text.split("_")
return components[0].lower() + "".join(x.title() for x in components[1:])
|
262084033f52018e97789bab76eeb2aec36afdab
| 79,616
|
def to_bow(plain_text_doc, dict, preproc, model=None):
"""
Convert plain-text doc into Gensim BoW format (list of (token_id, token_count))
using, maybe, some transformation model
"""
if model:
return model[dict.doc2bow(preproc.proc(plain_text_doc))]
else:
return dict.doc2bow(preproc.proc(plain_text_doc))
|
b1e69b5b7ae51315d29dc05b6543a7508de817fe
| 79,623
|
import random
def random_weighted_choice(element_list, weight_attribute='weight'):
"""Returns a random element from list taking its weight into account."""
total = sum(getattr(e, weight_attribute) for e in element_list)
random_pick = random.SystemRandom().uniform(0, total)
temp = 0
for element in element_list:
element_weight = getattr(element, weight_attribute)
if element_weight == 0:
continue
if temp + element_weight >= random_pick:
return element
temp += element_weight
assert False, 'Failed to make a random weighted choice.'
|
75694dfb36f9ae664cfc8e9f32b95461aedcb4de
| 79,627
|
def create_target_attribute_lists(white_names, black_names, attributes_A, attributes_B):
"""
Creating list of attributes and target words
attributes: A - fear, B - joy
target: X - white names, Y - black names
"""
# Creating sentences and converting them to embeddings
X, Y = [], []
races = ['white', 'black']
for i, race in enumerate(races):
if i == 0:
X.append("This person is " + race)
X.append("This is a " + race + " person")
X.append("A " + race + " person is here")
# X.append("Many " + race + " people are there")
X.append("The " + race + " person is walking")
# X.append("The " + race + " client sold all of their paintings")
# X.append("The " + race + " person called 911")
X.append("The " + race + " person is brushing their teeth")
# X.append("The student is " + race)
X.append("The " + race + " person is running late")
# X.append("The " + race + " employee gave feedback")
# X.append("The " + race + " worker told them they were replacing the sidewalk")
X.append("The teacher was speaking to the " + race + " student")
X.append("I have a " + race + " neighbor")
# X.append("I overheard a " + race + " person on the phone")
else:
Y.append("This person is " + race)
Y.append("This is a " + race + " person")
Y.append("A " + race + " person is here")
# Y.append("Many " + race + " people are there")
Y.append("The " + race + " person is walking")
# Y.append("The " + race + " client sold all of their paintings")
# Y.append("The " + race + " person called 911")
Y.append("The " + race + " person is brushing their teeth")
# Y.append("The student is " + race)
Y.append("The " + race + " person is running late")
# Y.append("The " + race + " employee gave feedback")
# Y.append("The " + race + " worker told them they were replacing the sidewalk")
Y.append("The teacher was speaking to the " + race + " student")
Y.append("I have a " + race + " neighbor")
# Y.append("I overheard a " + race + " person on the phone")
A = []
B = []
for emotion in attributes_A:
A.append("They make me feel " + emotion)
A.append("They feel " + emotion)
A.append("The situation makes them feel " + emotion)
for emotion in attributes_B:
B.append("They make me feel " + emotion)
B.append("They feel " + emotion)
B.append("The situation makes them feel " + emotion)
assert(len(X) == len(Y))
assert(len(A) == len(B))
return A, B, X, Y
|
a3f1d7e6476cbe95ed1ce97a480fccbec50e5329
| 79,628
|
def remove_negatives(x):
""" Remove all negative and NaN values from x
Parameters
----------
x: np.array
An array
Returns
-------
non_negative_x: np.array
A copy of x which does not contain any negative (or NaN) values. The
shape of non_negative_x depends on the number of negative/NaN values
in x.
"""
x = x[x >= 0]
return x
|
736a6717420e04989ee4850a5ca7a15c806f5809
| 79,629
|
from typing import Union
def seconds_to_formatted_string(seconds: Union[float, int]):
"""
Get a nicely formatted time elapsed string
Parameters
----------
seconds
number of seconds in either float or int, will be rounded to int
Returns
-------
str
formatted string
"""
if seconds < 0:
return '0 seconds'
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
if h:
return f'{h} hours, {m} minutes, {int(s)} seconds'
elif m:
return f'{m} minutes, {int(s)} seconds'
else:
return f'{int(s)} seconds'
|
04cccee2f21be6a30086885df80b24b14c775b1f
| 79,637
|
def sort_data(data_df, sort):
"""Sort data in ascending or descending order
Args:
data_df (df): Dataframe of data to plot
sort (str): 'ascending' or 'descending' order to sort in
Returns:
df: Dataframe with data sorted in the specified order
"""
if sort == "ascending":
data_df = data_df.sort_values(data_df.columns[0], ascending=True)
elif sort == "descending":
data_df = data_df.sort_values(data_df.columns[0], ascending=False)
return data_df
|
902e227bfa57366642e69211f660403d655eaf12
| 79,645
|
def get_file_size(line_count):
""" Description : Returns a human readable file line count. 38734 lines -> 39K
Parameters : Raw line count [line_count]
Returns : Human readable file line count (ex : 12M)
"""
suffixes = [{"lower":1, "upper":999, "suffix":""},
{"lower":999, "upper":999999, "suffix":"K"},
{"lower":999999, "upper":999999999, "suffix":"M"}]
for suffix in suffixes:
if line_count > suffix["lower"] and line_count < suffix["upper"]:
return "{0} {1}".format(round(line_count/suffix["lower"]), suffix["suffix"])
return "N/A"
|
f102dc98ffb44bb03980264fd6cdddb248c2195f
| 79,648
|
def high_prec_to_float(string):
"""Transforms high precision string to float.
Input: High precision string
(e.g HH:MM:SS, HHhMMmSSs, sDD:MM:SS, sDD°MM°SS)
Output: Value as float
"""
#Check if high precision string by looking at the length
if len(string) < 8 or len(string) > 10:
raise ValueError
#ra do not contain signs, dec does. Retrieve sign from dec and remove it
#from string
if string[0] == '+' or string[0] == '-':
sign = int(string[0]+'1')
string = string[1:]
else:
sign = +1
#Perform the actual transformation
float_value = sign*(int(string[0:2])+
int(string[3:5])/60.+
int(string[6:8])/3600.)
return float_value
|
e9b0e7f249c268441563d7609a12e955406055ee
| 79,649
|
import json
def get_fps_from_file(filepath: str) -> float:
"""
Get fps from JSON file generated by FFprobe.
"""
with open(filepath) as jsonf:
js = json.load(jsonf)
fps = 30
for st in js["streams"]:
if st["codec_type"] == "video":
fps = float(st["nb_frames"]) / float(st["duration"])
return fps
|
9655ef68bb0ba7aed76e33cbb297a11ed941ee24
| 79,660
|
def convert2json_type(obj: set) -> list:
"""Convert sets to lists for JSON serialization."""
if isinstance(obj, set):
return list(obj)
else:
raise TypeError(f"Object of type {type(obj)} is not JSON serializable.")
|
0f4c9e536e7bd29126c67dfc75a8e9d560a58abc
| 79,664
|
import csv
def read_csv_as_json(fname: str):
"""
Read CSV file as JSON format, i.e., dict type in Python
"""
with open(fname, 'r') as f:
reader = csv.reader(f)
rows = [row for row in reader]
fields = rows[0]
n = len(fields)
rows.pop(0)
return [{fields[i]: row[i] for i in range(n)} for row in rows]
|
006fecd6a79dd1560f73c8f813506900aedc15b2
| 79,667
|
def validInputSign(sign: str):
"""
Take a sign as argument
If sign is valid, returns a tuple: (True, 'sign in english', 'sign in french')
If sign is not valid, returns a tuple: (False, 'Error message', '')
"""
if sign == "bélier":
sign = "belier"
if sign == "gémeaux":
sign = "gemeaux"
signsEn = ["aries", "taurus", "gemini", "cancer", "leo", "virgo", "libra", "scorpio", "sagittarius", "capricorn", "aquarius", "pisces"]
signsFr = ["belier", "taureau", "gemeaux", "cancer", "lion", "vierge", "balance", "scorpion", "sagittaire", "capricorne", "verseau", "poissons"]
if sign.lower() in signsEn:
return (True, sign.lower(), signsFr[signsEn.index(sign.lower())])
elif sign.lower() in signsFr:
return (True, signsEn[signsFr.index(sign.lower())], sign.lower())
return (False, "ERROR: It seems you have entered a wrong astrological sign.", "")
|
7cfeeae87e5444308a69ca8a2591bf29f65d35af
| 79,668
|
def decode_simple(value):
"""
Return unicode version of value
Simple heuristics: Try UTF-8 first, cp1252 then
:Parameters:
- `value`: The value to decode
:Types:
- `value`: ``str``
:return: The decoded value
:rtype: ``unicode``
"""
try:
return value.decode('utf-8')
except UnicodeError:
return value.decode('cp1252')
|
7b3aa1898c34a863ffa6e0d5c0232ae5882e35b1
| 79,670
|
def GetPatchJobName(patch_job_uri):
"""Return the name of a patch job from its URI."""
return patch_job_uri.split('/')[3]
|
1ec440713488865a52afb4be6febcf00cfdad50f
| 79,671
|
def update_component_ref(component, ref):
"""
Update the reference property for each line in a component.
NOTE: this method mutates the lines in `component`.
Args:
component (`list` of Line): A list of lines
ref (int or str): The new value for the reference property
Returns:
`list` of Line: the updated component.
"""
for line in component:
line.label.ref = ref
return component
|
d75e69616d7d8583b53531d7acc2e3e291474d2d
| 79,679
|
def is_vowel(letter):
"""Return True if the letter is a vowel, False otherwise
>>> is_vowel("a")
True
>>> is_vowel("b")
False
"""
return letter in "aeiou"
|
5f37df74d96f9db0d079bf0c53926105983d8be8
| 79,683
|
from typing import List
def _trapezoid_area(point_left: List[float], point_right: List[float]) -> float:
"""Computes the area under the line between the given control points.
Args:
point_left: ([x, y]) The upper left corner of the trapesoid.
point_right: ([x, y]) The upper right corner of the trapesoid.
Returns:
The area of the trapesoid.
"""
left_x, left_y = point_left
right_x, right_y = point_right
base = abs(right_x - left_x)
height = (left_y + right_y) / 2.
return base * height
|
3f57c8a44f8cf5aecee4b9cf6834ed0a56929ac9
| 79,685
|
import math
def distance_set(coord, curset):
"""
Return the distance from a coordinate to any item in the set
"""
min_dist = math.inf
for c in curset:
min_dist = min(coord.euclidean_distance(c), min_dist)
return min_dist
|
707f5417eb31a016fa634a40658fad74f26412b0
| 79,687
|
import re
def remove_emissions_prefix(x, gas='XXX'):
"""Return x with emissions prefix removed, e.g.,
Emissions|XXX|foo|bar -> foo|bar
"""
return re.sub('^Emissions\|{}\|'.format(gas), '', x)
|
000b00aec9ef658aa3f3c46a8ef4b234d50ef420
| 79,691
|
import torch
def make_positions(tensor, pad_index: int):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at pad_index+1. Padding symbols are ignored.
"""
masked = tensor.ne(pad_index).long()
return torch.cumsum(masked, dim=1) * masked + pad_index
|
074e6dbc8074fef982ac3385374fb7e7ba2dcb71
| 79,692
|
def _get_hpr(M_t, flow_t, M_t1):
"""Get Holding Period Return (HPR)
Formula variable definitions taken from here:
https://en.wikipedia.org/wiki/Time-weighted_return#Time-weighted_return_compensating_for_external_flows
Put in separate function basically just for documentation purposes.
The calculation is simple, but the interpretation needs clarification.
Args:
M_t (scalar or Series): Money at time t, immediately after flow_t
(includes cash flow at time t)
flow_t (scalar or Series): Cash flow immediately prior to calculating
M_T, but at time t
M_t1 (scalar or Series): Money at time t-1
Returns:
[type]: [description]
"""
return (M_t - flow_t)/M_t1
|
a6a366a839e76893bc673c303c0de03756b6412c
| 79,695
|
import torch
def kl_gumbel(logits, num_vertices):
"""Computes the analytical kl(q(z|x)||p(z)) = u + exp(-u) - 1.
q(z|x) is gumbel distributed with location (u) given by logits.
p(z) is gumbel distributed with location zero.
"""
kl_div = logits + torch.exp(-logits) - 1.0
return kl_div.sum((1, 2)) / num_vertices
|
30133283f7c190e83e0eae432fd57ddc8e477e21
| 79,702
|
def command(request):
""" Return the command to run.
"""
return request.param
|
6becb3656e07769fb5472a5cca6b9ba04da8011e
| 79,704
|
def get_commands_file(filename):
"""
This takes a sql script file and breaks it down to commands to be executed separately
returns list(string) of individual commands
"""
with open(filename, "r") as sql_file:
# Split file in list
ret = sql_file.read().split(';')
# drop last empty entry
ret.pop()
return ret
|
efd475bc5b79b5a1410c2f6b29e5ebf0c42aeda0
| 79,705
|
def conv2d_output_size(kernel_size, stride, sidesize):
"""Calculate output size of Conv2D layer given kernel_size, stride, and size of one side of input image. Assumes square input image."""
return (sidesize - (kernel_size - 1) - 1) // stride + 1
|
c6a4e95d4f69006c4e6db191c275cbf3679a6d6f
| 79,716
|
def height_human(float_inches):
"""Takes float inches and converts to human height in ft/inches"""
feet = int(round(float_inches / 12, 2)) # round down
inches_left = round(float_inches - feet * 12)
result = f"{feet} foot, {inches_left} inches"
return result
|
fd58156125e5247037af41b62812a8f08b855cb6
| 79,719
|
def convert_age(age: str):
"""
Convert age string.
All age ranges in format AXX-AXX with X = numeric.
Exception: A80+
"""
age_range = age.split("-")
if len(age_range) > 1:
return {
"start": float(age_range[0][1:3]),
"end": float(age_range[1][1:3]),
}
else:
return {
"start": 80.0,
"end": 120.0,
}
|
7b22029ce52be3f29fb45482ba87f99287c44429
| 79,724
|
def toCMISValue(value):
"""
Utility function to convert Python values to CMIS string values
"""
if value is False:
return 'false'
elif value is True:
return 'true'
elif value is None:
return 'none'
else:
return value
|
68ff2c87743fe5789ff46ab91892b4fe9efacd10
| 79,726
|
def _create_context_response(context_el, status_code):
"""
Function to build the context response model
:param context_el: JSON including the context element attributes
:param status_code: status code received from context manager
:return (dict) Context response mode. The contextResponse in JSON will be like this:
{
"contextResponses" : [
{
"contextElement" : {
"type" : "Room",
"isPattern" : "false",
"id" : "Room1",
"attributes" : [
{
"name" : "temperature",
"type" : "float",
"value" : "23"
}
]
},
"statusCode" : {
"code" : "200",
"reasonPhrase" : "OK"
}
}
]
}
"""
return [{"contextElement": context_el, "statusCode": status_code}]
|
3aa44f5ea0b203df5be00431756d5a793437fb88
| 79,728
|
import uuid
def gen_random_name(prefix):
"""Take random UUID and append specified prefix."""
return f"{prefix}-{str(uuid.uuid4())[:13]}"
|
a3f34191b8841c5df1ba5ec7b25cf0f4fc893351
| 79,730
|
import torch
def featurize_nodes_and_compute_combo_scores(
node_featurizer, reactant_mol, valid_candidate_combos):
"""Featurize atoms in reactants and compute scores for combos of bond changes
Parameters
----------
node_featurizer : callable, rdkit.Chem.rdchem.Mol -> dict
Featurization for nodes like atoms in a molecule, which can be used to update
ndata for a DGLGraph.
reactant_mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance for reactants in a reaction
valid_candidate_combos : list
valid_candidate_combos[i] gives a list of tuples, which is the i-th valid combo
of candidate bond changes for the reaction.
Returns
-------
node_feats : float32 tensor of shape (N, M)
Node features for reactants, N for the number of nodes and M for the feature size
combo_bias : float32 tensor of shape (B, 1)
Scores for combos of bond changes, B equals len(valid_candidate_combos)
"""
node_feats = node_featurizer(reactant_mol)['hv']
combo_bias = torch.zeros(len(valid_candidate_combos), 1).float()
for combo_id, combo in enumerate(valid_candidate_combos):
combo_bias[combo_id] = sum([
score for (atom1, atom2, change_type, score) in combo])
return node_feats, combo_bias
|
fabaa791c2e08dec75a7d63ab9cee1cb261b3a56
| 79,731
|
def returnRainfall(dd):
"""Returns rainfall data in units kg/m2/s"""
rho_fresh = 1000 # freshwater density
rain = []
if 'LIQUID_PRECIPITATION_VOLUME' in dd:
period = dd['LIQUID_PRECIPITATION_DURATION'] # hours
volume = dd['LIQUID_PRECIPITATION_VOLUME'] # mm
rain = volume/(1000*period*3600)*rho_fresh
elif 'PRECIP_VOLUME' in dd:
period = dd['PRECIP_PERIOD'] # minutes
volume = dd['PRECIP_VOLUME'] # mm
rain = volume/(1000*period*60)*rho_fresh
return rain
|
330be74ab99730375e16ecb6814f72500e03c05d
| 79,732
|
def embeddings(idx):
"""
The function helps in renaming embedding layer weights.
Args:
idx: stage number in original model
"""
embed = []
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
f"stage{idx}.patch_embed.proj.weight",
)
)
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
f"stage{idx}.patch_embed.proj.bias",
)
)
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
f"stage{idx}.patch_embed.norm.weight",
)
)
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
f"stage{idx}.patch_embed.norm.bias",
)
)
return embed
|
6ed5c06eb1378db2ed53f73e95b1b1c0e5e1917a
| 79,733
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.