content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
from typing import NamedTuple
def dump_config(fields: NamedTuple):
"""
Convert config structures back to raw configs.
:param fields: the config instance to be dumped
:return: raw configs, a python dict
"""
assert hasattr(fields, '_field_types')
config = {}
for k, v in fields._asdict().items():
if v is None:
continue
if hasattr(v, '_field_types'):
config[k] = dump_config(v)
else:
config[k] = v
return config
|
cb652208c15f685ec424f2584e0927bdda800d3b
| 52,907
|
import json
def parse_command(params):
"""Parses a command with a first string param and a second
json-encoded param"""
name, args = (params + ' ').split(' ', 1)
return name, args and json.loads(args) or []
|
ddf6a4d9c59b474032554eb522051bc62eb7e899
| 52,909
|
def validateWavelengths(wavelengths: list, bbl: list):
"""
Validate wavelengths and bbl.
Parameters
----------
wavelengths : list of int
List of measured wavelength bands
bbl : list of str/int/bool
List of bbl values that say which wavelengths are measured in
good quality (1) and which are not (0)
Returns
-------
list of int
Validated `wavelengths` list
list of int
Validated `bbl` list
Raises
------
ValueError:
Raised if `wavelengths` and `bbl` are of a different length.
"""
# check for inconsistencies
if len(wavelengths) != len(bbl):
raise ValueError(
"Length of wavelengths ({0}) and bbl ({1}) is not equal.".format(
len(wavelengths), len(bbl)))
# remove zero-wavelength at the beginning
if len(wavelengths) == 139:
return wavelengths[1:], bbl[1:]
return wavelengths, bbl
|
795b33b59c026c2dbdea85b602254fbf82adb091
| 52,911
|
def normalize_image(image):
"""Normalizes an image by dividing it by 255
Parameters
----------
image -- a numpy array representing an image
Returns
-------
Normalized image
"""
return image / 255
|
b6927305c69c9da8e92503c8e29d7212804adfe3
| 52,917
|
def revdatelimit(ui, revset):
"""Update revset so that only changesets no older than 'prefetchdays' days
are included. The default value is set to 14 days. If 'prefetchdays' is set
to zero or negative value then date restriction is not applied.
"""
days = ui.configint(b'remotefilelog', b'prefetchdays')
if days > 0:
revset = b'(%s) & date(-%s)' % (revset, days)
return revset
|
07a19e8500ad7724100d0a6b2e0ceb76564579ba
| 52,920
|
def to_perc(value):
"""Convert hex value to percentage."""
return value * 100 / 255
|
b001f57d4f7791a2123124a70e8405c6f84b5a6b
| 52,933
|
def cal_confidence(antecedents_support, combination_support):
"""
calculate confidence of antecedents and consequents
Parameters
----------
antecedents_support : float
support of antecedents.
combination_support : float
support of combination.
Returns
-------
confidence of antecedents and combination.
"""
try:
return combination_support / antecedents_support
except ZeroDivisionError:
return 0
|
f86e06d5969e2cd2f2076c7ca95534906cf66203
| 52,934
|
def listify(obj):
"""Create list containing an object if it is not already a list."""
try:
assert not isinstance(obj, str)
iter(obj)
except (AssertionError, TypeError):
obj = [obj]
return list(obj)
|
e0ac1952bbc4b077652724e5bcc81cf883dc2e5c
| 52,936
|
def isAuth(session):
"""
Verify if a user is authenticated
"""
if 'auth' in session:
if session['auth'] == True:
return True
return False
|
cb833adf39cd26056a02780a699e4220a8d55642
| 52,939
|
def exercise1(n):
"""
Think of a recusive version of the function f(n) = 3 * n, i.e. the multiples of 3.
"""
if n > 0:
return 3 + exercise1(n - 1)
else:
return 0
|
614d65978d40e34398520da0063384f92bd7c3fd
| 52,941
|
def copyfragment(fragment0,newobj):
"""Copy the data in a fragment to another object.
The data in the source fragment 'fragment0' is copied to the
target object 'newobj', and 'newobj' is returned. 'newobj'
should be a fragment object or some subclass of fragment (such
as a 'program' object).
copyfragment can be used to 'mutate' a fragment into (for
example) a program object."""
# Copy attribute data
for item in fragment0.attributes():
newobj[item] = fragment0.get_attribute(item)
# Copy tables
for tbl in fragment0.tables():
newobj.addtable(tbl)
# Copy keytexts
for i in range(0,fragment0.nkeytexts()):
keytext = fragment0.keytext(i)
newobj.addkeytext(keytext.name(),
keytext.junk_text(),
keytext.message())
# Try to copy other attributes that fragment subclasses
# have (such as keywords)
try:
for line in fragment0.keywords():
newobj.addkeyword(line)
except AttributeError:
# Either the source or target doesn't support
# keyword storage
pass
# Return the populated object
return newobj
|
3d1cf53584052af2aefd283137909e6e3b5b8c35
| 52,944
|
def has_even_number(lst):
"""
>>> has_even_number([1, 3, 5])
False
>>> has_even_number([1, 2, 3])
True
"""
has_even = False
for num in lst:
if num % 2 == 0:
has_even = True
break
return has_even
|
16445b6c1061f9fc289224ab2470b7172285e838
| 52,949
|
import re
def meant_to_say(client, channel, nick, message, matches):
"""
A plugin so users can correct what they have said. For example::
<sduncan> this is a foo message
<sduncan> s/foo/bar
<helga> sduncan meant to say: this is a bar message
"""
try:
last = client.last_message[channel][nick]
except KeyError:
return None
old, new, reflags = matches[0]
count = 1
if re.search('g', reflags, re.I):
count = 0
if re.search('i', reflags, re.I):
regex = re.compile(old, re.I)
else:
regex = re.compile(old)
modified = regex.sub(new, last, count)
# Don't respond if we don't replace anything ... it's annoying
if modified != last:
return u'{0} meant to say: {1}'.format(nick, modified)
|
f59ff2555588dbb8b45393b9a6384a6c3351dff7
| 52,950
|
def get_entities_with_offsets(seq, offsets):
"""
Gets entities from sequence
Args:
seq (list): sequence of labels.
offsets (list of integer pair): sequence of offset position
Returns:
list: list of (chunk_type, chunk_start, chunk_end, pos_start, pos_end)
Example:
>>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC']
>>> offsets = [(0,10), (11, 15), (16, 29), (30, 41)]
>>> print(get_entities(seq))
[('PER', 0, 2, 0, 15), ('LOC', 3, 4, 30, 41)]
"""
i = 0
chunks = []
seq = seq + ['O'] # add sentinel
types = [tag.split('-')[-1] for tag in seq]
max_length = min(len(seq)-1, len(offsets))
while i < max_length:
if seq[i].startswith('B'):
# if we are at the end of the offsets, we can stop immediatly
j = max_length
if i+2 != max_length:
for j in range(i+1, max_length):
if seq[j].startswith('I') and types[j] == types[i]:
continue
break
start_pos = offsets[i][0]
end_pos = offsets[j-1][1]-1
chunks.append((types[i], i, j, start_pos, end_pos))
i = j
else:
i += 1
return chunks
|
9c46d424b7490b207bb69858a034de7f0ac8da49
| 52,952
|
def sum_ignore_none(iterable):
"""
Sum function that skips None values
:param iterable: An iterable to sum, may contain None or False values
:return: the sum of the numeric values
"""
return sum([x for x in iterable if x])
|
0409fdca11014eddb3ce8fff1172d8a193c15e25
| 52,953
|
def org_exists(g, org):
"""See if |org| organization exists."""
try:
g.get_organization(org)
except:
return None
return True
|
056280479eed99e1f61401da13d8dc15ddf72eaf
| 52,965
|
def replace_grid_symbols(grid, old_to_new_map):
"""Replaces symbols in the grid.
If mapping is not defined the symbol is not updated.
Args:
grid: Represented as a list of strings.
old_to_new_map: Mapping between symbols.
Returns:
Updated grid.
"""
def symbol_map(x):
if x in old_to_new_map:
return old_to_new_map[x]
return x
new_grid = []
for row in grid:
new_grid.append(''.join(symbol_map(i) for i in row))
return new_grid
|
98858f78e396338c3564e46178080ae948cbd779
| 52,973
|
import pickle
def read_pickle(filespec):
""" read pickle file at filespec """
try:
with open(filespec, 'rb') as f:
answer = pickle.load(f)
except Exception as e:
print('File not loaded properly.\n\n{}'.format(str(e)))
raise
return answer
|
d3d4b220dc167b85e5e3628df39635d9aac5a99b
| 52,976
|
from typing import Optional
import re
def isolate_el_version_in_release(release: str) -> Optional[int]:
"""
Given a release field, determines whether is contains
a RHEL version. If it does, it returns the version value as int.
If it is not found, None is returned.
"""
match = re.match(r'.*\.el(\d+)(?:\.+|$)', release)
if match:
return int(match.group(1))
return None
|
9c727b9e352f2f4151f8b04985d49b0a262de8eb
| 52,977
|
def _read_file(filename: str) -> str:
"""
Reads and returns the content of the file.
:param filename: filename to read
:type filename: str
:return: content of the file
:rtype: str
"""
with open(filename) as file:
return file.read()
|
260860401368ecdb471ab8eac2230dbb7ad818ae
| 52,985
|
def get_sub_indices(seq, sub_seq):
"""
Compute indices of where the first element of sub sequence locates in the sequence.
:param seq: a sequence(list, str, tuple and so on) like:
["a", "b", "c", "b", "c"]
:param sub_seq:
["b", "c"]
:return: a list of indices, where the first element of sub sequence locates in the sequence, like:
[1, 3]
"""
indices_list = []
seq_length, sub_seq_length = len(seq), len(sub_seq)
for i in range(seq_length):
if seq[i:i+sub_seq_length] == sub_seq:
indices_list.append(i)
return indices_list
|
8742cd2416faa7131066de859f0b651fc8026e8a
| 52,987
|
def default(value, default):
"""
Return `default` is `value` is :data:`None`, otherwise return `value`.
"""
if value is None:
return default
return value
|
517ffb3c6f67ad9290d8c44be5bd54a90bc4e37c
| 52,995
|
def transpose(a):
"""Transpose a list of lists"""
# This is mid-level magic. A function can take an arbitrary list of
# arguments like so:
# def function(*args):
# ...
# On the calling end, we can prepend an asterisk to an iterable object
# to indicate we want its contents, rather than itself, to be the arguments
# to a function.
#
# zip() returns a list where the nth element is a tuple of the nth
# elements of each argument.
#
# Thus, the first "row" of output from zip(*a) is the first element of
# each list in a.
return zip(*a)
|
780deed4dffac5cec4793671fc96d823026e4dd9
| 53,003
|
from typing import List
def generate_angles(resolution: int, register_width: int = 10) -> List[int]:
"""Creates a list of rotation angles.
Resolution defined in bits mapped to the bit width of the register.
Parameters
----------
resolution : int
The required resolution between 0 and 2pi between angle values.
register_width : int, optional
The number of bits to encode the angles onto, by default 10
Returns
-------
List[int]
A list of integers encoding angles between 0 and 2pi on a number of
bits equal to the register_width.
Raises
------
ValueError
If the resolution required exceeds the register width available.
"""
if resolution > register_width:
raise ValueError(
f"Resolution ({resolution}) can not be greater than " +
f"register width ({register_width})!"
)
scaling = (2**register_width) / (2**resolution)
angles = []
for i in range(0, 2**resolution):
angles.append(int(i * scaling))
return angles
|
aef44b19b6c358a1a192cfb5e466d51460d85298
| 53,009
|
def encode_resource_type_code(resource_type: int) -> str:
"""Encode a number between 0 and 0xFFFF as a 4-character hexidecimal string"""
return format(resource_type, "04x")
|
76947f380ff5fa9e555e89533fbb97e1ee840472
| 53,010
|
def uniquify(list):
"""
uniqify()
Purpose: Remove duplicate items in a list, preserving the original order
Parameters: list [type=list]
A list of items (must be hashable)
Returns: A list of unique items
"""
seen = {}
result = []
for elem in list:
if elem in seen: continue
seen[elem] = 1
result.append(elem)
return result
|
9f7b2f3af411e42cb2cade4fbadb232af19c35dc
| 53,011
|
import io
import torch
def torch_load_cpu(data):
"""Loads a serialized pytorch object from torch_get_raw() with CPU
placement"""
f = io.BytesIO(data)
ret = torch.load(f, map_location=lambda storage, loc: storage)
f.close()
torch.set_num_threads(1)
return ret
|
6eab7266fb863341d3c05259f3164d927fc51e54
| 53,013
|
def distance(point1, point2):
"""
Distance between two points
:param tuple point1: first point coordinates (1, 2)
:param tuple point2: second point coordinates (3.0, 4)
:return: line segment length
:rtype: float
>>> distance((0, 0) (3, 4.0))
5.0
"""
x1, y1 = point1
x2, y2 = point2
return ((x1-x2)**2+(y1-y2)**2)**(1/2)
|
fc7c33a7b0e54412e4d9aca7f4bc803e78da5d07
| 53,016
|
def load_file(file_path):
"""
Returns a file given a path
"""
return open(file_path, 'r')
|
4bb19811bca9c1cf472648d20439280bcfda09a2
| 53,017
|
def strhex2bin(strhex):
"""
Convert a string representing an hex value into a
string representing the same value in binary format.
"""
dic = { '0':"0000",
'1':"0001",
'2':"0010",
'3':"0011",
'4':"0100",
'5':"0101",
'6':"0110",
'7':"0111",
'8':"1000",
'9':"1001",
'A':"1010",
'B':"1011",
'C':"1100",
'D':"1101",
'E':"1110",
'F':"1111"
}
binstr = ""
for i in strhex:
binstr = binstr + dic[i.upper()]
return binstr
|
e5271572e4777523b28f49766665f00d9f6902e5
| 53,020
|
def __create_dict_entry(key: str, value: str) -> str:
"""
Creates a dict entry in for of a string: {'key': value}
Args:
key (str): key of entry
value (str): value of entry
Returns (str): dict entry
"""
return f'\'{key}\': {value},'
|
0facaddc3d5b15ab4df420914e60ed6d2c17bb4a
| 53,023
|
def cookie_to_dict(cookie):
"""Return cookie attributes as dict."""
attrs = (
'version', 'name', 'value', 'port', 'domain', 'path', 'secure',
'expires', 'discard', 'comment', 'comment_url', 'rfc2109')
attr_dict = {}
for attr in attrs:
attr_val = getattr(cookie, attr, None)
if attr_val is not None:
attr_dict[attr] = attr_val
attr_dict['rest'] = getattr(cookie, '_rest', {})
return attr_dict
|
723a61cfa89a28fc72450f6ddc866eaf0e276b8e
| 53,028
|
def split(semicolon_changed_batch):
"""Split a batch
"""
return semicolon_changed_batch.split(' ')
|
4ec03c6b33a9dda6268b7b617deb07df1bba8aa5
| 53,029
|
def alpha_cut(a, alpha):
"""
Equivalent to `a.alpha_cut(alpha)`.
"""
return a.alpha_cut(alpha)
|
22256aec5fad9d752a9ca0d7c07f2467313b7695
| 53,031
|
def exchange(programs, p1, p2):
"""Returns the updated list of programs after performing an exchange"""
programs[p1], programs[p2] = programs[p2], programs[p1]
return programs
|
7a9aee6dafdcca7f9861938115970a890a598669
| 53,033
|
def _animal_id_suffixed(prev_id: float, num: int, addition_base=0.5) -> float:
"""adds a decimal number to make a new animal_id distinct from prev_id"""
if num == 0:
return prev_id
else:
return prev_id + addition_base ** num
|
03102b7f983308ec596a657a100172bcda2f5e1e
| 53,035
|
def in_circle(x, y, x0, y0, r):
"""
Check if a point (x, y) is in the circle centered at (x0, y0) with
redius r
"""
dr = (x-x0)**2 + (y-y0)**2 - r**2
if dr <= 0:
check = True
else:
check = False
return check
|
c2494488ad4dd8fbf56f268b91275ae52bee0b5d
| 53,037
|
def no_duplicate_values(data, key):
"""
Check if a dict (list) has duplicate keys (elements).
If the input is a list of dictionaries, this will check
the "label" attribute by default
>>> no_duplicate_values({"intents": ["a", "a"]}, "intents")
False
>>> no_duplicate_values({"intents": ["a"]}, "intents")
True
>>> no_duplicate_values({"entities": {"a": []}}, "entities")
True
>>> no_duplicate_values({"entities": {"a": []}, "b": []}, "entities")
True
"""
obj = data.get(key, [])
return sorted(obj) == sorted(set(obj))
|
62f38ec8541cc4b638a8b4698e5dbc4e91cd53b0
| 53,038
|
def _fips_cleaner(code):
"""Standardizes county FIPS codes as 5-digit strings.
Parameters
----------
code : pandas.Series object
A series containing FIPS codes as string, int, or float type.
Returns
----------
pandas.Series
"""
return code.astype(str).str.extract('(^[^/.]*).*', expand=False).str.zfill(5)
|
b347168f433ae43afa29a251cce886168ae4a646
| 53,040
|
def _apply_gt(in_x, in_y, geo_transform):
"""apply geotransform to in_x,in_y
Args:
in_x (int): the x pixel value
in_y (int): the y pixel value
geoTransform (list): a geo-transform list describing a raster
Returns:
list: [geographic-x, geographic-y]
"""
out_x = geo_transform[0] + (in_x+0.5) * geo_transform[1] + (in_y+0.5) * geo_transform[2]
out_y = geo_transform[3] + (in_x+0.5) * geo_transform[4] + (in_y+0.5) * geo_transform[5]
return(out_x, out_y)
|
a8ac6fcde1070bfced67acb38db8eb734b20a9c0
| 53,041
|
def no_red(obj):
"""Evaluate json objects adding numbers not in dicts containing "red"."""
if type(obj) == int:
return obj
if type(obj) == list:
return sum([no_red(item) for item in obj])
if type(obj) == dict:
if 'red' in obj.values():
return 0
return no_red(list(obj.values()))
return 0
|
0df4323c179c23904f9bb880a92684337371fa09
| 53,042
|
from typing import Sequence
import re
def parse_log(log_file: str, fields: Sequence[str]):
"""parse fields from the log file
Args:
log_file (str): the log file path.
fields (Sequence[str]): The wanted fields.
Returns: dict. key is the field name and value is the parsed values.
"""
with open(log_file, "r") as f:
lines = f.readlines()
res = {}
for field in fields:
res[field] = []
for line in lines:
matches = re.findall(r"(\w+): ([0-9.]*[0-9])", line)
for (k, v) in matches:
if k in fields:
res[k].append(float(v))
return res
|
6165b4aeaafa3e3eecb2fcae39a6542cafc1653c
| 53,043
|
def cast_env_to_int_error(what):
"""
Retrieves the "can not cast from environment variable to integer" error
message.
:param what: Environment variable name.
:return: String - Can not cast from environment variable to integer.
"""
return "ERROR: " + what + " value cannot be cast from ENV variable to int"
|
4d356af272bab046e531fb9dfdb6433917869a9b
| 53,045
|
import torch
def unproj_map(width, height, f, c=None, device="cpu"):
"""
Get camera unprojection map for given image size.
[y,x] of output tensor will contain unit vector of camera ray of that pixel.
:param width image width
:param height image height
:param f focal length, either a number or tensor [fx, fy]
:param c principal point, optional, either None or tensor [fx, fy]
if not specified uses center of image
:return unproj map (height, width, 3)
"""
if c is None:
c = [width * 0.5, height * 0.5]
else:
c = c.squeeze()
if isinstance(f, float):
f = [f, f]
elif len(f.shape) == 0:
f = f[None].expand(2)
elif len(f.shape) == 1:
f = f.expand(2)
Y, X = torch.meshgrid(
torch.arange(height, dtype=torch.float32) - float(c[1]),
torch.arange(width, dtype=torch.float32) - float(c[0]),
)
X = X.to(device=device) / float(f[0])
Y = Y.to(device=device) / float(f[1])
Z = torch.ones_like(X)
unproj = torch.stack((X, -Y, -Z), dim=-1)
unproj /= torch.norm(unproj, dim=-1).unsqueeze(-1)
return unproj
|
a8a3b32dbd2e10225473619da07fa6e6f8d8ba6b
| 53,048
|
def so_joins(sqlclass, joins=None):
"""Return a list with all joins from a SQLObject.
The list includes the columns from InheritableSO's bases.
"""
if joins is None:
joins = []
joins.extend(sqlclass.sqlmeta.joins)
if sqlclass._inheritable:
so_joins(sqlclass.__base__, joins)
return joins
|
e75d286afda8b1e45039e2fafcbb92c0347ec4d4
| 53,049
|
def average(lst):
"""
calculate average of a list (of numbers)
"""
return sum(lst) / len(lst)
|
dc18524ac4abf4e7fd98a7fc6e11bace04686747
| 53,052
|
def is_local_filepath(filepath):
"""Determine if the given path is a local path."""
return filepath.startswith("/")
|
4f3029b1581ee7f93b44c47e74b89fa1c0274e2f
| 53,055
|
def calc_seeds_cost(crop, ya, harvest_weight):
"""
Seeds Calculator
Notes
------
:param crop: The crop that is selected to be grown on the farm
:param ya: The expected yield for that crop
:param harvest_weight: The harvest weight selected for that crop
The seeds required are 40% more than the plants harvested. This is to account for error, unsuccessful
propagation or thinning.
:return: The cost per seed, and the number of seeds required to calculate the overall seed cost per year
"""
if crop == 'lettuce':
cost_per_seed = 0.10
else:
raise RuntimeError("Unknown crop: {}".format(crop))
seeds_required = (ya/harvest_weight)*1.4
seeds_cost = seeds_required * cost_per_seed # costs of seeds
return seeds_cost
|
6be080df56e62fa3dbd49f9c8e2ea3d0fcaf0bf2
| 53,056
|
from typing import Tuple
from typing import Any
def replace_value_by_index(xs: Tuple[Any, ...], pos: int, value: Any) -> Tuple[Any, ...]:
"""
Return a new instance of the tuple replacing the specified
position with the new value.
:param xs: A tuple.
:param pos: Zero-based index of the item which should be replaced.
:param value: New value.
"""
return tuple(value if idx == pos else elem for idx, elem in enumerate(xs))
|
7f47b606772e8f666bedc40ef206c93de9e7523b
| 53,069
|
import click
def get_hostname(ctx=None):
""" Get the current API Gateway Hostname or error if not found """
ctx = ctx or click.get_current_context()
config = ctx.obj['config']
hostname = config.get('core.hostname', None)
if hostname is None:
msg = "No hostname configured. Run `cray config set core " + \
"hostname={Cray API URL}`"
raise click.UsageError(msg)
return hostname
|
bfdb690cde9365dfec57a722d276731e7ef94b74
| 53,077
|
import re
def not_both_titles(names_string):
"""Returns a list of names not preceded by [Pp]rof./[Dd]oc. and
followed by ', Ph.D.'
>>> not_both_titles('doc. Josef Tyl, Rudolf Srp, Ph.D., Pavel Vlk, doc. RNDr. Petr Berka, Ph.D., Jan Hora')
['doc. Josef Tyl', 'Rudolf Srp, Ph.D.', 'Pavel Vlk', 'Jan Hora']
"""
# a name that is either preceded by [Pp]rof./[Dd]oc. and followed by Ph.D.
# or other name with potential titles
pat = re.compile(r'''
(?:(?:(?:[Dd]oc\.)|(?:[Pp]rof\.))\s[^,]+,\sPh\.D\.) # it is either
| # or
((?:[A-Za-z]+\.\s)*[A-Z][a-z]+\s[A-Z][a-z]+(?:,\sPh\.D\.)?) # it is
''', re.X)
return [g1 for g1 in pat.findall(names_string) if g1]
|
4b3b358807d022d284aa8fd45ca32928b560bc56
| 53,079
|
def ask_for_int(message, max=1000000000):
"""
support function to ask users for an integer
to use in place of regular input() method
---
message: string asking the user for an integer
returns i: user input integer
"""
while True:
i = input(message)
try:
i = int(i)
if i <= max:
return i
else:
print("please enter and integer less than or equal to " + str(max))
except:
print("please enter an integer")
|
e1eb20d43cf741016fc4f829b629ddcaa58f3e8b
| 53,083
|
def ghid(story):
"""Given a story, extract a github id if one is present"""
if "external_id" in story:
eid = story["external_id"]
if eid is None:
return " "
return eid[eid.rfind("/")+1:]
|
139fddc06cbb44ef507818e720950dce512e1e83
| 53,084
|
def get_metric_order(metric_key: str) -> int:
"""
Returns 'order' of a metric (how to compare it)
:param metric_key: key of the metric
:return: -1 if 'smaller is better' (e.g. loss) and +1 if 'greater is better' (e.g. accuracy)
"""
key = metric_key.strip().lower()
if key.endswith('loss'):
return -1
if key.endswith('acc') or key.endswith('accuracy') or key.endswith('auc'):
return 1
raise ValueError("Could not define ordering of a metric: {}, please provide it manually".format(metric_key))
|
441bbbd6a91922b2d79a07ab2a5210370ee092dd
| 53,088
|
import re
def replace_titles(targets, predictions):
"""Replaces titles with a __unk__ token."""
def replace_fn(text):
return re.sub(r"\@([^@]*\([^@]*\)[^@]*)\@", "__unk__", text)
return (list(map(replace_fn, targets)), list(map(replace_fn, predictions)))
|
e7d4635d497793077f6366f8b4457d5e168e8f4b
| 53,090
|
import string
def convert_special_chars_to_ascii(func_name):
"""
Convert non-alphanumeric characters to their ascii representation.
This is how Haddock generates links for operators.
'!?' => '-33--63-'
"""
if func_name == None:
return None
escaped_func_name = [ c if c not in string.punctuation else '-' + str(ord(c)) + '-'
for c in func_name ]
return ''.join(escaped_func_name)
|
81423591d18bbd25f30b9103599ddaec11e8e7e9
| 53,091
|
def phoneme_tuple(phoneme):
"""Convert phoneme to hashable tuple representation
Arguments
phoneme - The phoneme to convert
Returns
tuple(float, float, string)
The phoneme represented as a tuple
"""
return (phoneme.start(), phoneme.end(), str(phoneme))
|
f5894c1629752a5ed9399bc4dfd9b97a612335f8
| 53,094
|
def getcamera_target(minextents, maxextents):
"""
Compute the center of the DTM in pixel space
Parameters
-----------
minextents (tuple) (xmin, ymin, zmin)
maxextents (tuple) (xmax, ymax, zmax)
Returns
--------
center (tuple) (xcenter, ycenter, zcenter)
"""
xcenter = (maxextents[0] - minextents[0]) / 2.0
ycenter = (maxextents[1] - minextents[1]) / 2.0
zcenter = (maxextents[2] - minextents[2]) / 2.0
return xcenter, ycenter, zcenter
|
0e9eff15e96369ba8db55e230636d3959f99ea57
| 53,100
|
import torch
def aesthetics_reward(aesthetic_scores, selections, num_of_picks):
"""
Given (args):
aesthetic_scores: a sequence of aesthetic scores [1, seq_len]
selections: a tensor of binary values that stores data about the selected (1)
and non-selected frames e.g. [0, 0, 1, 0, 1]
Compute the average aesthetic score for the collection of selected frames.
Return:
aes_reward: scalar that represents the aesthetics reward
"""
aesthetic_scores = aesthetic_scores.squeeze(0)
masked_aesthetic_scores = aesthetic_scores * selections
total_aes_reward = torch.sum(masked_aesthetic_scores)
aes_reward = total_aes_reward / num_of_picks
return aes_reward
|
61de3108fd5caf0937636ccbe8fe34b5debf2e0d
| 53,104
|
import re
def remove_tags(text):
"""Remove SSML tags from text strings
Args:
text: Raw text with SSML tags
Returns:
text_cleaned: Text without SSML tags
"""
return re.compile(r'<[^>]+>').sub('', text)
|
bb889adecf715fbec84cd8e34270e88cd70442f9
| 53,107
|
import re
def compile_regex(kw):
""" Takes a list of strings and returns a regex that
matches each one """
return re.compile(r"(?<![a-zA-Z.])(" + "|".join(kw) + ")(?![a-zA-Z])")
|
1dd9325bacde0653bddbcabef335e87e3a830f51
| 53,115
|
def none_len_sortkey(pair):
"""If input is None, returns -1, otherwise the length of input."""
if pair is None:
return -1
return len(pair)
|
5f75430440157a6e2291558205441ee0a3139fd8
| 53,116
|
def gen_all_strings(word):
"""
Generate all strings that can be composed from the letters in word
in any order.
Returns a list of all strings that can be formed from the letters
in word.
This function should be recursive.
"""
# base case with no length word
if len(word) == 0:
return [""]
# recursive case
head = word[0]
tail = word[1: ]
# keep track of a master list while generating sub list
master_list = []
sub_list = gen_all_strings(tail)
# add sub list to master list
master_list.extend(sub_list)
# for each sub list word add to master list a combination of all
# head character positions in sub word
for sub_word in sub_list:
for index in range(len(sub_word) + 1):
master_list.append(sub_word[:index] + head + sub_word[index: ])
return master_list
|
21660702b6af20fdaae8d4905a67ed8d5ee36af9
| 53,117
|
def int_scale_range(values, out_value, in_value=1.0):
"""Scale a range of values
Args:
values (array): range to convert
out_value (int): max out value
kwargs:
in_value (int): input range max value
"""
return [int(value / float(in_value) * out_value) for value in values]
|
767ed3b860c6049ec0353529a377354a5c3b8b03
| 53,119
|
def movimentacoes_da_conta(numero_conta, movimentacoes):
"""
Recebe uma lista de movimentações e um numero de conta
e retorna todas as movimentações da conta recebida
"""
minhas_movimentacoes = []
for movimento in movimentacoes:
if(numero_conta == movimento[0]):
minhas_movimentacoes.append(movimento)
return minhas_movimentacoes
|
ae65e2b5eb2b0fb993935e9c436679da0ad98e22
| 53,121
|
def _check_count(counter):
"""
Get the most common dimension, and check if at least two thirds of the
values agree.
"""
dim, count = counter.most_common(1)[0]
if count >= (2 / 3) * sum(counter.values()):
return True, dim
else:
return False, dim
|
4163cfcb65a0b3011504039faf44c88589acade6
| 53,135
|
def get_html_lines(html_files):
"""
Parameters
----------
html_files: list of files
list of html files created by get_html_files()
Return
------------
list of strs
all lines from html_files in one list
"""
html_lines = []
for file in html_files:
lines = file.readlines()
html_lines = html_lines + lines
return html_lines
|
9804e666d1d7321b44a2b17ed780a0fa261ceae1
| 53,138
|
def is_julia_version_greater_eq(Main, version="1.6"):
"""Check if Julia version is greater than specified version."""
return Main.eval(f'VERSION >= v"{version}"')
|
81e4bc648374ea1e2b1a28f1b558c512c31a7c1a
| 53,139
|
def calc_new_dimensions(max_size: int, width, height):
"""
Calculate new minimum dimensions and corresponding scalar
:param max_size: int
:param width: int
:param height: int
:return: tuple - new dimensions and minimum scalar
"""
width_scalar = max_size / width
height_scalar = max_size / height
best_fit_scalar = min(width_scalar, height_scalar)
dimensions = (int(width * best_fit_scalar), int(height * best_fit_scalar))
return dimensions, best_fit_scalar
|
6f1849d0d6941fb18dd5938ecc8c872f92aaef98
| 53,143
|
import torch
def xywh_to_xyxy(boxes, width=1.0, height=1.0):
"""
Convert bbox from xywh format to xyxy format.
Parameters
----------
boxes : Tensor[N, 4])
They are expected to be in (x, y, w, h) format
width : float
DeNorm bbox from range [0, 1] to image size
height : float
DeNorm bbox from range [0, 1] to image size
Returns
-------
boxes: Tensor[N, 4])
in (x, y, x, y) format
"""
boxes[:, 0] -= boxes[:, 2] / 2
boxes[:, 1] -= boxes[:, 3] / 2
boxes[:, 2] += boxes[:, 0]
boxes[:, 3] += boxes[:, 1]
boxes = torch.clamp(boxes, min=0, max=1)
boxes[:, 0] *= width
boxes[:, 2] *= width
boxes[:, 1] *= height
boxes[:, 3] *= height
return boxes
|
a49a0cbe192b29b59284f679148c6950650d0e05
| 53,146
|
def reg_extract(openfield, address):
"""
Extract data from openfield. 'ip:port:pos' or with option 'ip2:port:pos2,reward=bis2a'
:param openfield: str
:param address: str
:return: tuple (ip, port, pos, reward)
"""
options = {}
if "," in openfield:
# Only allow for 1 extra param at a time. No need for more now, but beware if we add!
parts = openfield.split(",")
openfield = parts.pop(0)
for extra in parts:
key, value = extra.split("=")
options[key] = value
ip, port, pos = openfield.split(":")
reward = options["reward"] if "reward" in options else address
source = options["source"] if "source" in options else None
if source and source != address:
raise ValueError("Bad source address")
return ip, port, pos, reward
|
9094bc647e120007a092d8dfbfdcba78eb018101
| 53,149
|
def get_num_char_vars(data, missing=False, limit=None):
""" Return lists of column names for numeric and character columns
data: a pandas DataFrame
missing: boolean for whether to return only names of columns with
missing values
limit: the upper bound of non-missing values to be included
"""
num_vars = []
char_vars = []
if missing == True:
if not limit:
limit = data.shape[0]
else:
limit = data.shape[0]+1
for col in data.columns:
if data[col].count() < limit:
if data[col].dtypes == "object":
char_vars.append(col)
else:
num_vars.append(col)
return num_vars, char_vars
|
e988c75a281bde3030004035023c1c56b990e416
| 53,155
|
import inspect
def find_subclasses(mod, cls):
"""Find all the subclasses in a module.
Parameters
----------
mod : module
cls : class
Returns
-------
dictonary in which key, item = subclass name, subclass
"""
return dict([(name, obj) for name, obj in inspect.getmembers(mod)
if inspect.isclass(obj) and issubclass(obj, cls)])
|
dfa505486218841f6900397edd46f997e090a6ef
| 53,159
|
def parse_electoral_tweets_data(data_path):
"""Extract electoral tweets and emotions from the dataset.
Args:
data_path (str): Path to annotated tweets file.
Returns:
([(str, str)]): Tweets and their emotion labels zipped.
"""
data = []
with open(data_path) as data_file:
data_file.__next__()
previous_row = ''
for line in data_file:
line = previous_row + ' ' + line
row = line.rstrip('\t\n').split('\t')
if len(row) < 29:
# Some rows in the data file are improperly newlined
previous_row = line
continue
else:
previous_row = ''
tweet = row[13]
emotion = row[15]
data.append((tweet, emotion))
return data
|
b6a67cf5ff62c0337c49b1ef5e5bbd8a19773ffb
| 53,161
|
def update_gamma(gamma, iteration, eps=1e-4):
"""Update `gamma` for forward-backward splitting."""
if iteration % 20 == 0:
gamma /= 2.
return max(gamma, eps)
|
a439b7f254644233512be8f479b08d207bed40e7
| 53,162
|
def GetStartDeployMessage(conn_context,
resource_ref,
operation='Deploying container to',
resource_kind_lower='service'):
"""Returns a user mesage for starting a deploy.
Args:
conn_context: connection_context.ConnectionInfo, Metadata for the run API
client.
resource_ref: protorpc.messages.Message, A resource reference object for the
resource. See googlecloudsdk.core.resources.Registry.ParseResourceId for
details.
operation: str, what deploy action is being done.
resource_kind_lower: str, resource kind being deployed, e.g. "service"
"""
msg = ('{operation} {operator} {resource_kind} '
'[{{bold}}{resource}{{reset}}] in {ns_label} [{{bold}}{ns}{{reset}}]')
msg += conn_context.location_label
return msg.format(
operation=operation,
operator=conn_context.operator,
resource_kind=resource_kind_lower,
ns_label=conn_context.ns_label,
resource=resource_ref.Name(),
ns=resource_ref.Parent().Name())
|
4f448a3e9c1500015c06efa376fe5f33a714a9ce
| 53,163
|
def main() -> int:
""" main function """
print("Hello, world!")
return 0
|
16dd0f41bd74675f1f18f84254be9ab58c42dc94
| 53,166
|
import json
def load_json(filepath):
"""open and read json, return read values"""
with open(filepath, "r") as f:
return json.load(f)
|
2134516692487ff7f202a51f0f712ce1aec8119f
| 53,171
|
def get_features_from_df(df, features):
"""
Helper function to get feature columns of a dataframe.
Args:
df (pd.DataFrame) A pd.DataFrame where the features are extracted.
features (list of str) Name of the features columns in df.
Returns:
feature_df A pd.DataFrame that only contains the features used in ML.
"""
feature_df = df[features]
return feature_df
|
feb1592c2fb36f872d3e352560b2774d1af088ef
| 53,172
|
def increment_revision(title):
"""
Increment revision n. at the end of the given title:
"abc" --> "abc (2)"
"abc (2)" --> "abc (3)"
etc ..
Used for cloning objects
"""
new_title = ''
try:
start = title.rfind('(')
if title.endswith(')') and start >= 0:
n = int(title[start + 1:-1])
new_title = title[:start].strip() + ' (%d)' % (n + 1)
except:
pass
if len(new_title) <= 0:
new_title = title + ' (2)'
return new_title
|
c6238173a06c8b576a92788ce405aa545c59ec92
| 53,173
|
def in_scope(graph, node, parent):
""" Returns True if `node` is in the scope of `parent`. """
scope_dict = graph.scope_dict()
scope = scope_dict[node]
while scope is not None:
if scope == parent:
return True
scope = scope_dict[scope]
return False
|
1a6c7f308e11f95b2d6f7e59905b6a947a3a15fc
| 53,175
|
def ypred_to_bool(ypred, threshold):
"""Convert predicted probabilities to boolean values"""
ypred_bool = (ypred > threshold).astype(int)
return ypred_bool
|
19f39a7ac5e49c654be392629a1e6fdc43d41770
| 53,176
|
from typing import List
def flatten(l: List) -> List:
""" Flatten the given list """
return [item for subl in l for item in subl]
|
88bc0016b861254114693649e33a0aa4e6eaf5b9
| 53,180
|
def simContinue(state, timeLimit = None) :
"""Test if simulation should continue to next state:
If time based: if simulation is after time limit
If HP based: if enemy has HP left
"""
if timeLimit is not None :
return state['timeline']['timestamp'] <= timeLimit
else :
return state['enemy']['hp'] > 0
|
5a5cb8a9724d1a06839a819c72c186a0ed47e3dc
| 53,190
|
def social_optimum_cd(alpha, c):
"""
Cobb-Douglas utility social optimum
"""
if alpha > 1 - c:
return 1 / (1 - alpha)
else:
return 1 / c
|
6f86a95e75feac843cb4adb8ead2d3821607d836
| 53,194
|
from typing import List
def merge_sort(x: List) -> List:
"""Merge sort divides a list into two smaller lists, and recursively repeats the process on the two smaller lists
till lists of single elements are obtained. These smaller lists are then combined to form a single sorted list of
the original elements. It has an average time complexity of Θ(nlogn). Time complexity for the worst case is
O(nlogn). Time complexity for the best case is Ω(nlogn).
>>> merge_sort([4, 2, 3, 1, 0, 5])
[0, 1, 2, 3, 4, 5]
:param x: list to be sorted
:return: new sorted list
"""
length = len(x)
if length <= 1:
return x
mid_idx = length // 2
left = merge_sort(x[0:mid_idx])
right = merge_sort(x[mid_idx:length])
result = []
while len(left) > 0 and len(right) > 0:
if left[0] <= right[0]:
result.append(left.pop(0))
else:
result.append(right.pop(0))
result.extend(left)
result.extend(right)
return result
|
995e8d1d275a295b7ddc694bb7d2faaac5f2a17c
| 53,195
|
def generateSplitDates(years):
"""
Generates train/test split dates.
:param years: tuple, (start year, end year, prediction year), (YYYY, YYYY, YYYY)
Returns tuples, (train start, train end), (test start, test end). All dates formatted as YYYY-MM-DD.
"""
train_dates = ('{}-01-01'.format(years[0]), '{}-12-31'.format(years[1]))
test_dates = ('{}-01-01'.format(years[2]), '{}-12-31'.format(years[2]))
return train_dates, test_dates
|
8b8a86246ef9f121cfb3308438723203e846b7f9
| 53,200
|
def _context(response):
"""
Get a context dictionary for a serializer appropriate for the given response.
"""
return {'request': response.wsgi_request}
|
b86e37c6edf6968ee725392523cc6bcc1a3b437b
| 53,205
|
def remove_suffix_ness(word):
"""Remove the suffix from the word while keeping spelling in mind.
:param word: str - of word to remove suffix from.
:return: str - of word with suffix removed & spelling adjusted.
For example: "heaviness" becomes "heavy", but "sadness" becomes "sad".
"""
word = word[:-4]
if word[-1] == 'i':
word = word[:-1] + 'y'
return word
|
ccd65b52d3d77cd0d86c58662f52cd7baa8b4ec1
| 53,207
|
from typing import List
def lbs_to_kg(lst: List[int]) -> List[float]:
"""
:param lst: list of ints in lbs
:return: list of floats in kg
"""
kg_per_lb = 0.453592
kg_lst = [weight * kg_per_lb for weight in lst]
return kg_lst
|
46a3d10f27a5769fdaef6b361fe2086ef2fad757
| 53,211
|
def CompareResults(manifest, actual):
"""Compare sets of results and return two lists:
- List of results present in ACTUAL but missing from MANIFEST.
- List of results present in MANIFEST but missing from ACTUAL.
"""
# Collect all the actual results not present in the manifest.
# Results in this set will be reported as errors.
actual_vs_manifest = set()
for actual_result in actual:
if actual_result not in manifest:
actual_vs_manifest.add(actual_result)
# Collect all the tests in the manifest that were not found
# in the actual results.
# Results in this set will be reported as warnings (since
# they are expected failures that are not failing anymore).
manifest_vs_actual = set()
for expected_result in manifest:
# Ignore tests marked flaky.
if 'flaky' in expected_result.attrs:
continue
if expected_result not in actual:
manifest_vs_actual.add(expected_result)
return actual_vs_manifest, manifest_vs_actual
|
be33569bda03cb598a4490088154213edc93a726
| 53,214
|
def filter_divisors(iterable, reverse=False):
"""Removes all elements that divide any other.
Returns list generated by given iterable consisting of elements that are
maximal by divisibility.
Input argument must be in decreasing order
Args:
iterable: Input iterable of integers in decreasing order.
reverse: If true, returns list in increasing order. Default is false.
Returns:
List without divisors.
"""
ret = []
if not reverse:
def append_left(element): ret.insert(0, element)
add_element = append_left
else:
add_element = ret.append
for element in iterable:
if not any(x % element == 0 for x in ret):
add_element(element)
return ret
|
a331d5820335faa9337da81e975ce8202d276edf
| 53,219
|
import copy
def copy_graph(g):
"""
Create a copy of the given graph.
:param g: input graph as dictionary
:return: a copy of the graph
>>> copy_graph({'edgeSet': [{'right':[4,5,6]}], 'entities': [], 'tokens':[]}) == {'edgeSet': [{'right':[4,5,6]}], 'entities': [], 'tokens':[]}
True
>>> copy_graph({}) == {'edgeSet':[], 'entities':[]}
True
"""
new_g = {'edgeSet': copy.deepcopy(g.get('edgeSet', [])),
'entities': copy.copy(g.get('entities', []))}
if 'tokens' in g:
new_g['tokens'] = g.get('tokens', [])
if 'filter' in g:
new_g['filter'] = g['filter']
return new_g
|
15da8e328eb6d921928bf4163a967c265374e140
| 53,221
|
def update_a_use_package_tools_auto(main, file):
""" Migrates AppVeyor to use bincrafters-package-tools auto detection instead of custom build.py
"""
if main.replace_in_file(file, "python build.py", "bincrafters-package-tools --auto"):
main.output_result_update(title="AppVeyor: Migrate to use bincrafters-package-tools --auto")
return True
return False
|
48dfbeef81e00a4e7ccb7e5b25764d5a54e61edb
| 53,222
|
def inverse_transform_point_cloud(point_cloud, ego_vehicle_matrix):
# <Copyright 2019, Argo AI, LLC. Released under the MIT license.>
"""Undo the translation and then the rotation (Inverse SE(3) transformation)."""
translation = ego_vehicle_matrix[:3, 3]
rotation = ego_vehicle_matrix[:3, :3]
point_cloud = point_cloud.copy()
point_cloud -= translation
return point_cloud.dot(rotation)
|
602b2a2e6a5babb87220cdcd98746823ea5d7dce
| 53,225
|
def associative_backdate(now, backdate):
"""Correct non-associative relative-delta month math
:param now: datetime start point
:param backdate: relativedelta value to move back from now.
Asking for a relative delta back in time for a period of months and
then adding the same number of months does NOT always produce the
same starting value. For example May 30 - 3 months => Feb 28/29 depending
on leap year determination. Adding 3 months to Feb 28 always produces
May 28.
Work around this problem by returning a backdated datetime value, that is
by reducing now by the backdate, and also return a ``nowish`` datetime
value, either equal to now or adjusted to make the pair associative,
such that:
nowish - backdate == result + backdate == nowish
Therefore nowish will typically be equal to now, but altered in boundary
cases for tests needing this associative property.
:returns: backdated_datetime, nowish
"""
result = now - backdate
return result, result + backdate
|
8ba36900c06710e659f878e4bbc364caeb4eaf67
| 53,230
|
from typing import Tuple
def get_dim(
width: float = 398.3386,
fraction_of_line_width: float = 1,
ratio: float = (5 ** 0.5 - 1) / 2,
) -> Tuple[float, float]:
"""Return figure height, width in inches to avoid scaling in latex.
Default width is `src.constants.REPORT_WIDTH`.
Default ratio is golden ratio, with figure occupying full page width.
Args:
width (float, optional): Textwidth of the report to make fontsizes match.
Defaults to `src.constants.REPORT_WIDTH`.
fraction_of_line_width (float, optional): Fraction of the document width
which you wish the figure to occupy. Defaults to 1.
ratio (float, optional): Fraction of figure width that the figure height
should be. Defaults to (5 ** 0.5 - 1)/2.
Returns:
fig_dim (tuple):
Dimensions of figure in inches
Example:
Here is an example of using this function::
>>> dim_tuple = get_dim(fraction_of_line_width=1, ratio=(5 ** 0.5 - 1) / 2)
"""
# Width of figure
fig_width_pt = width * fraction_of_line_width
# Convert from pt to inches
inches_per_pt = 1 / 72.27
# Figure width in inches
fig_width_in = fig_width_pt * inches_per_pt
# Figure height in inches
fig_height_in = fig_width_in * ratio
return (fig_width_in, fig_height_in)
|
e2021d09f82c439f282c527806d78a6119bcc47f
| 53,231
|
def task_to_run_key_try_number(to_run_key):
"""Returns the try number, 1 or 2."""
return to_run_key.integer_id() & 15
|
4b4e659ccbcbc5942e0123604494ce819d323a79
| 53,234
|
def _parse_disambiguate(disambiguatestatsfilename):
"""Parse disambiguation stats from given file.
"""
disambig_stats = [0, 0, 0]
with open(disambiguatestatsfilename, "r") as in_handle:
for i, line in enumerate(in_handle):
fields = line.strip().split("\t")
if i == 0:
assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']
else:
disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])]
return disambig_stats
|
c7f9b03ad00100d4bf5642f9781bcf9dffb19add
| 53,236
|
def cifrar(palabra, cifrador):
"""Recibe un string con texto sin cifrar y un diccionario que contiene el cifrador.
Regresa el string cifrado.
"""
#se declara e instancia una variable de tipo string donde se va a asignar el resultado del cifrado.
string_cifrado = ''
#loop que recorre cada caracter de la palabra.
for char in palabra:
#se asegura que el caracter sea parte de las llaves dentro de nuestro diccionario.
if char in cifrador.keys():
string_cifrado += cifrador[char]
#de lo contrario, simplemente se agrega el caracter normal.
else:
string_cifrado += char
return string_cifrado
|
068a089097546529cd0bc36068ea7b5c5e009f6d
| 53,238
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.