content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def cf_ni_ratio(cf_df, financials_df):
"""Checks if the latest reported Operating CF (Cashflow) is larger than the latest reported NI (Net Income).
cf_df = Cashflow Statement of the specified company
financials_df = Financial Statement of the specified company
"""
cf = cf_df.iloc[cf_df.index.get_loc("Total Cash From Operating Activities"),0]
net_income = financials_df.iloc[financials_df.index.get_loc("Net Income"),0]
if (cf > net_income):
return True
else:
return False
|
f0997b38950e4fadcd2a9bb8d1da7e648c970bc9
| 49,330
|
def retrieve_files(s3, short_name, date_str):
"""Retrieve a list of files from S3 bucket.
Parameters
----------
s3: S3FileSystem
reference to S3 bucket to retrieve data from
short_name:
string short name of collection
date_str: str
string date and time to search for
Returns
-------
list: list of strings
"""
files = s3.glob(f"podaac-ops-cumulus-protected/{short_name}/{date_str}*.nc")
return files
|
e362b1be22f56dae4c17280038eb2f5ff1315ae4
| 49,334
|
from typing import List
from typing import Dict
def get_readme_download_url(files: List[Dict[str, str]]) -> str:
"""
Takes in a response from the github api that lists the files in a repo and
returns the url that can be used to download the repo's README file.
"""
for file in files:
if file["name"].lower().startswith("readme"):
return file["download_url"]
return ""
|
5c394d247b0ab06099b3d3a540fa2f5101b385e6
| 49,338
|
import threading
def _start_thread(target, *args, **kwargs) -> threading.Thread:
"""Helper to simplify starting a thread."""
thread = threading.Thread(target=target, args=args, kwargs=kwargs)
thread.start()
return thread
|
a5d9e1feb1b02ee9b1ec3aaa46758cd3d27e1880
| 49,340
|
def count_trees(data, route):
"""
>>> count_trees([
... [False, False, True, True, False, False, False, False, False, False, False],
... [True, False, False, False, True, False, False, False, True, False, False],
... [False, True, False, False, False, False, True, False, False, True, False],
... [False, False, True, False, True, False, False, False, True, False, True],
... [False, True, False, False, False, True, True, False, False, True, False],
... [False, False, True, False, True, True, False, False, False, False, False],
... [False, True, False, True, False, True, False, False, False, False, True],
... [False, True, False, False, False, False, False, False, False, False, True],
... [True, False, True, True, False, False, False, True, False, False, False],
... [True, False, False, False, True, True, False, False, False, False, True],
... [False, True, False, False, True, False, False, False, True, False, True]
... ], (3, 1))
7
"""
dx, dy = route
height = len(data)
width = len(data[0])
x = 0
y = 0
trees = 0
while y < height:
if data[y][x]:
trees += 1
x = (x + dx) % width
y += dy
return trees
|
1a81a8ccb3b7dd87f04b7713d08370297cb7b5fa
| 49,342
|
def combination_of_two_lists(lst1, lst2):
"""
["f", "m"] and ["wb", "go", "re"] => ["f_wb", "f_go", "f_re", "m_wb", "m_go", "m_re"]
"""
return [e1 + "_" + e2 for e1 in lst1 for e2 in lst2]
|
3f966ff5bb535d95c232162f76705b2b9f6311c6
| 49,344
|
def letter_for(label):
"""Return the letter for a given label."""
return "ABCDEFGHIJ"[label]
|
6c0692f7451db6fd45141339f702665ba3ac2bb4
| 49,352
|
def read_coverages(input_file):
"""Return coverage values."""
coverages = []
with open(input_file, 'r') as input_handle:
for line in input_handle:
coverages.append(float(line.rstrip()))
return coverages
|
6820da83d0ecb24cdad1169b0521505d59d3dcbf
| 49,353
|
def bgr01(r, g, b):
"""Rescales BGR to a 0-1 scale."""
return (b / 255.0, g / 255.0, r / 255.0)
|
c7d9d5cecfec3311ad8f5d3b2baf619b5bbe83cf
| 49,357
|
def to_world_canvas(world_point, canvas_extents, world_extents):
"""Transforms a point from world coord system to world canvas coord system."""
x = int(world_point[0] / world_extents[0] * canvas_extents[0])
y = int(canvas_extents[1] - 1 - world_point[1] / world_extents[1] * canvas_extents[1])
return (x, y)
|
5dec7f87fae35542b5798f88b0353c9b593e88fb
| 49,369
|
import struct
def py_int2byte(val):
"""
Converts Python int value to byte.
"""
return struct.pack('B', val)
|
d6ab7467c615a23ccb416db2d1150a3909d9f1ff
| 49,372
|
def flatten_dict(dd, separator='.', prefix=''):
"""Recursive subroutine to flatten nested dictionaries down into a single-layer dictionary.
Borrowed from https://www.geeksforgeeks.org/python-convert-nested-dictionary-into-flattened-dictionary/
Parameters
----------
dd : dict
dictionary to flatten
separator : str, optional
separator character used in constructing flattened dictionary key names from multiple recursive
elements. Default value is '.'
prefix : str, optional
flattened dictionary key prefix. Default value is an empty string ('').
Returns
-------
a version of input dictionary *dd* that has been flattened by one layer
"""
return {prefix + separator + k if prefix else k: v
for kk, vv in dd.items()
for k, v in flatten_dict(vv, separator, kk).items()
} if isinstance(dd, dict) else {prefix: dd}
|
f0b356aa0c516b88e83d8c8820fc53f4db69623d
| 49,376
|
def known_mismatch(hashes1, hashes2):
"""Returns a string if this is a known mismatch."""
def frame_0_dup_(h1, h2): # asymmetric version
return ((h1[0] == h2[0]) and
(h1[2:] == h2[2:]) and
(h1[1] != h2[1] and h2[1] == h1[0]))
def frame_0_dup(h1, h2):
return frame_0_dup_(h1, h2) or frame_0_dup_(h2, h1)
def frame_0_missing(h1, h2):
return (h1[1:] == h2[:-1]) or (h2[:1] == h1[:-1])
for func in [frame_0_dup, frame_0_missing]:
if func(hashes1, hashes2):
return func.__name__
return None
|
dfd3985a26e53147147b2485203964863db8600c
| 49,377
|
def to_unicode(s):
"""Return the parameter as type which supports unicode, possibly decoding
it.
In Python2, this is the unicode type. In Python3 it's the str type.
"""
if isinstance(s, bytes):
# In Python2, this branch is taken for both 'str' and 'bytes'.
# In Python3, this branch is taken only for 'bytes'.
return s.decode('utf-8')
return s
|
34353f67dedad93220cf1b567c739dccba610ecf
| 49,384
|
def time_check(message):
"""
check if the duration for the quick search preference is a valid input
"""
time = 0
try:
time = int(message)
except Exception as e:
return False
if time < 1 or time > 8:
return False
return True
|
745bcc45986d2b149b062d4864ae9cf8eb060901
| 49,387
|
def _same_side(pos, p, q, a, b):
"""Indicates whether the points a and b are at the same side of segment p-q"""
dx = pos[p][0] - pos[q][0]
dy = pos[p][1] - pos[q][1]
dxa = pos[a][0] - pos[p][0]
dya = pos[a][1] - pos[p][1]
dxb = pos[b][0] - pos[p][0]
dyb = pos[b][1] - pos[p][1]
return (dy*dxa - dx*dya > 0) == (dy*dxb - dx*dyb > 0)
|
e8635b29322a53d6928677fb9379a04440ef6e0e
| 49,389
|
def apply_opcode4(code_list, opcode_loc, parameter_mode_dict):
"""When you've determined that the opcode is 4 - which means to return a
value in the location of its only parameter as an output - you can use this
function to adjust code_list.
Parameters
----------
code_list : list
The opcode
opcode_loc : int
The index of the opcode in code_list
parameter_mode_dict : dict
A dictionary indicating for the following value after an opcode of 3
whether they should be considered in position (0) or immediate (1)
modes
Returns
-------
code_list : list
The whole programme
output : int
The value in the location determined by the parameter of the opcode
"""
opcode, param1 = code_list[opcode_loc:opcode_loc+2]
# If the mode is 1 then the parameter should be interpreted as it stands.
# If the mode is 0 then we need to get the value at that location in the
# code list
if parameter_mode_dict[1] == '0':
param1 = code_list[param1]
# Return that value as an output
output = param1
return code_list, output
|
52b9e57185f7eb6fc01266b91d1cfea86f048a43
| 49,390
|
def sz_to_ind(sz, charge, nsingle):
"""
Converts :math:`S_{z}` to a list index.
Parameters
----------
sz : int
Value :math:`S_{z}` of a spin projection in the z direction.
charge : int
Value of the charge.
nsingle : int
Number of single particle states.
Returns
-------
int
Value of the list index corresponding to sz.
"""
szmax = min(charge, nsingle-charge)
return int((szmax+sz)/2)
|
c6016a5f21f4a4e9904046260b94b1f4d6d397ba
| 49,391
|
def map_activity_model(raw_activity):
""" Maps request data to the fields defined for the Activity model """
return {
'activity_id': raw_activity['key'],
'description': raw_activity['activity'],
'activity_type': raw_activity['type'],
'participants': raw_activity['participants'],
'accessibility': raw_activity['accessibility'],
'price': raw_activity['price'],
'link': raw_activity['link']
}
|
080370bc0ddbc3c7487e0c6789612c7bc5298258
| 49,392
|
import math
def wrap_to_pi(angle):
"""
Wrap the given angle to pi: -pi <= angle <= pi.
:param angle: float, Angle to wrap in rads
:return: float, Wrapped angle
"""
while angle > math.pi:
angle -= 2 * math.pi
while angle < -math.pi:
angle += 2 * math.pi
return angle
|
613c81b3404ece1326e19639bc8684cafdd4a990
| 49,394
|
def check_module( mymodule, indent='' ):
"""
Returns a string describing the version number, import path and
other administrative information contained in a given module.
:Parameters:
mymodule: Python module
The module to be checked. It must already have been imported.
indent: str, optional, default=''
A string used to pad the left side of the report for indentation
:Returns:
moduleinfo: str
A string describing the module.
"""
_strg = "%s%s" % (indent, repr(mymodule))
if hasattr(mymodule, '__name__'):
_strg += "\n%s - name: %s" % (indent, str(mymodule.__name__))
if hasattr(mymodule, '__project__'):
_strg += "\n%s - project: %s" % (indent, str(mymodule.__project__))
if hasattr(mymodule, '__package__'):
_strg += "\n%s - package: %s" % (indent, str(mymodule.__package__))
if hasattr(mymodule, '__author__'):
_strg += "\n%s - author: %s" % (indent, str(mymodule.__author__))
if hasattr(mymodule, '__version__'):
_strg += "\n%s - version: %s" % (indent, str(mymodule.__version__))
if hasattr(mymodule, '__path__'):
_strg += "\n%s - imported from: %s" % \
(indent, str(mymodule.__path__))
if hasattr(mymodule, '__file__'):
_strg += "\n%s - initialised from: %s" % \
(indent, str(mymodule.__file__))
if hasattr(mymodule, '__all__'):
nall = len(mymodule.__all__)
_strg += "\n%s - length of _all_ list: %d" % (indent, nall)
if hasattr(mymodule, '__doc__'):
if mymodule.__doc__ is not None:
_strg += "\n%s - (%d character doc string)" % \
(indent, len(mymodule.__doc__))
else:
_strg += "\n%s - (null doc string)" % indent
else:
_strg += "\n%s - (no doc string)" % indent
return _strg
|
d86b87d1b031bed13610605c1b82920b28f31c23
| 49,397
|
def get_bytes_used(current_time, process_durations):
"""
Return bytes used at given time.
>>> get_bytes_used(12, [2, 3, 4])
13
>>> get_bytes_used(14, [2, 3, 4])
14
:type current_time: int
:param current_time: Array index
:type process_durations: list
:param process_durations: List containing process durations to exhaust 1 byte.
:return: bytes_used
:rtype: int
"""
bytes_used = 0
for p_time in process_durations:
if p_time > current_time:
# Since the array is sorted we can break early
# and avoid unnecessary calculations.
break
bytes_used += current_time // p_time
return bytes_used
|
b87e4bc1f7050904a58ef6124beded518fda3094
| 49,400
|
def solutionClosed(n: int, p: float) -> float:
"""
A closed-form solution to solutionRecursive's recurrence relation.
Derivation:
Let q = (-2p + 1).
h[0] = 1,
h[n] = q h[n-1] + p.
By iterating,
h[1] = q + p,
h[2] = q (q + p) + p = q^2 + pq + p,
h[3] = q (q^2 + pq + p) + p = q^3 + pq^2 + pq + p,
h[n] = q^n + p(q^(n-1) + q^(n-2) + ... + q^0).
Because the term p(q^(n-1) + q^(n-2) + ... + q^0) is a geometric series,
h[n] = q^n + p(1 - q^n)/(1 - q).
Substituting q = (-2p + 1) and simplifying,
h[n] = ((-2p + 1)^n + 1)/2.
"""
return ((-2*p + 1)**n + 1)/2
|
2f3bac0cd6981989dac4570180a215e20966dc11
| 49,407
|
def has_lower_letters(password):
"""Return True if password has at least one lower letter."""
return any(char.islower() for char in password)
|
d55a37e994e289886efdce6e815a430777572b97
| 49,410
|
import yaml
def load(file: str = "lang/en_US.yaml") -> dict:
"""Loads the language file and returns it"""
global lang
with open(file, encoding="utf-8") as f:
lang = yaml.safe_load(f)
return lang
|
a34cb9d9561b2edf1597f542a07d50b683a8fbff
| 49,411
|
def extract_property_from_uri(uri: str) -> str:
"""
从property uri中提取出property name
:param uri: 如 <http://www.kg.com/kg/ontoligies/ifa#createTime>
:return: 如 'createTime'
"""
separator_idx = uri.rfind('#')
if separator_idx == -1:
raise ValueError
return uri[separator_idx + 1:]
|
6c0902a954a247c7c843f15be6e3d98bf3a3721f
| 49,416
|
def is_supported_value_type(value):
"""
checks if the given value type is supported.
Supported Types:
- strings
- bytes
- numbers
- tuples
- lists
- dicts
- sets
- booleans
- None
"""
if (
isinstance(value, (str, bytes, int, float, bool)) or
value is None
):
return True
if isinstance(value, tuple):
for sub_value in value:
if not is_supported_value_type(sub_value):
return False
return True
if isinstance(value, list):
for sub_value in value:
if not is_supported_value_type(sub_value):
return False
return True
if isinstance(value, dict):
for sub_key, sub_value in value.items():
if not is_supported_value_type(sub_key):
return False
if not is_supported_value_type(sub_value):
return False
return True
if isinstance(value, set):
for sub_value in value:
if not is_supported_value_type(sub_value):
return False
return True
return False
|
fa9d8ae96dcde739e73d8108cfeefef8bd88451b
| 49,419
|
import six
def _is_possible_token(token, token_length=6):
"""Determines if given value is acceptable as a token. Used when validating
tokens.
Currently allows only numeric tokens no longer than 6 chars.
:param token: token value to be checked
:type token: int or str
:param token_length: allowed length of token
:type token_length: int
:return: True if can be a candidate for token, False otherwise
:rtype: bool
>>> _is_possible_token(123456)
True
>>> _is_possible_token(b'123456')
True
>>> _is_possible_token(b'abcdef')
False
>>> _is_possible_token(b'12345678')
False
"""
if not isinstance(token, bytes):
token = six.b(str(token))
return token.isdigit() and len(token) <= token_length
|
186447c08cb64178e1f99fabc5523f2a92762ca5
| 49,420
|
def add_releaseyear(dataframe):
"""extract release year from release date"""
dataframe['release_year'] = dataframe['release_date'].apply(lambda x: str(x)[:4])
return dataframe
|
b679361ec35d99611ca64207afd24b542868f179
| 49,422
|
def interpolate_grids(data2, interpolation_scheme):
""" Interpolate all variables in data to a different grid.
Possible interpolations are:
T -> U; T -> V; U -> V; V -> U.
Parameter
---------
data : xarray.Dataset
Dataset containing 4D ('time_counter','x','y','z') and
3D ('time_counter','x','y','z') variables.
interpolation_scheme: strings
Valied values are: t2u, t2v, u2v, v2u
Returns
-------
xarray.Dataset
"""
data = data2.copy()
if interpolation_scheme not in ["t2u", "t2v", "u2v", "v2u"]:
raise ValueError('Interpolation scheme is unknown. Valid values are:\
t2u, t2v, u2v, v2u')
if interpolation_scheme == "t2v":
for v in data.data_vars:
data[v] = (data[v] + data[v].shift(y=-1)) * 0.5
if interpolation_scheme == "t2u":
for v in data.data_vars:
data[v] = (data[v] + data[v].shift(x=-1)) * 0.5
if interpolation_scheme == "u2v":
for v in data.data_vars:
data[v] = (data[v] + data[v].shift(x=1) +
data[v].shift(y=-1) +
data[v].shift(x=1, y=-1)) * 0.25
if interpolation_scheme == "v2u":
for v in data.data_vars:
data[v] = (data[v] + data[v].shift(x=-1) +
data[v].shift(y=1) +
data[v].shift(x=-1, y=1)) * 0.25
return data
|
11d340e701a74fcd74d898501a57a192aca3ccb4
| 49,423
|
import unicodedata
def maketrans_remove(accents=("COMBINING ACUTE ACCENT", "COMBINING GRAVE ACCENT")):
""" Makes a translation for removing accents from a string. """
return str.maketrans("", "", "".join([unicodedata.lookup(a) for a in accents]))
|
516114526f6d7d36b2b454cd07e40302bd7a83f7
| 49,424
|
import struct
def unpackbyte(b):
"""
Given a one-byte long byte string, returns an integer. Equivalent
to struct.unpack("B", b)
"""
(ret,) = struct.unpack("B", b)
return ret
|
8d4a79bda22554604637e1ca934a85b3a6f71cdb
| 49,426
|
import math
def slurm_format_memory(n):
"""Format memory in bytes for use with slurm."""
if n >= 10 * (1024 ** 3):
return "%dG" % math.ceil(n / (1024 ** 3))
if n >= 10 * (1024 ** 2):
return "%dM" % math.ceil(n / (1024 ** 2))
if n >= 10 * 1024:
return "%dK" % math.ceil(n / 1024)
return "1K"
|
7a454fee5754503ad30d3b0cb8e8261adac7d19f
| 49,427
|
import time
from datetime import datetime
def base_record() -> dict:
"""
Return a basic record with the audit flags we use in all records.
Args:
None
Returns:
(dict): dict with audit fields populated.
"""
return {"time": time.time(), "time_str": datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")}
|
49a16ac37947ccff482a1400c054f87abfb2553b
| 49,431
|
from typing import List
def reverse(data: List[int]) -> List[int]:
"""Given a list of integers, returns the same list in reverse
order.
Initialize empty List Reversed
While there are Numbers in Data:
Remove last Number in Data
Append Number to Reversed
Return Reversed
"""
_reversed = []
while data:
current = data.pop()
_reversed.append(current)
return _reversed
|
595a6815c1823141875d7cac7e5cdb5a2bb8b934
| 49,432
|
def get_non_hidden_fields(form):
"""
Returns all the visible fields of the form.
"""
return form.visible_fields()
|
a7003b12f1ca89414db299114a06bd819b3f4932
| 49,435
|
def is_namedtuple_cls(cls):
"""Test if an object is a namedtuple or a torch.return_types.* quasi-namedtuple"""
try:
if issubclass(cls, tuple):
bases = getattr(cls, "__bases__", []) or [None]
module = getattr(cls, "__module__", None)
return module == "torch.return_types" or (
bases[0] is tuple and hasattr(cls, "_make") and hasattr(cls, "_fields")
)
except TypeError:
pass
return False
|
73698f8836b902405241df045aa01800173414c8
| 49,437
|
def sort_rat(name_list: list, ratings: list, reverse: bool = True):
"""
Sort ratings associated with names. The sort is stable.
:param name_list: The list of names to be associated with the ratings respectively
:param ratings: The list of ratings to sort
:param reverse: Descending by default. False means ascending.
:return: The sorted list of names and their ratings and rankings
"""
if len(name_list) != len(ratings):
raise ValueError("# of names %d does not equal to # of ratings %d" % (len(name_list), len(ratings)))
result = list()
for i, name in enumerate(name_list):
result.append((name, ratings[i]))
def compare(name_rat):
return name_rat[1]
result.sort(key=compare, reverse=reverse)
return [(r[0], r[1], i + 1) for i, r in enumerate(result)]
|
cfb944d067c41f45c4e558fdeb55905de0ec01b4
| 49,438
|
def sigmoid(x):
"""
Sigmoid(x) = 1 / (1+e^-x)
"""
return 1 / (1 + (-x).exp())
|
1d60e8061e239b4de471ae4a38eb337365f498b5
| 49,441
|
def copy_df(df):
"""
DESCRIPTION
-----------
Deep copy a pandas dataframe.
PARAMETERS
----------
df : pd.DataFrame
A pandas dataframe instance
RETURNS
-------
A deep copy of a given pandas dataframe
MODIFICATIONS
-------------
Created : 4/26/19
"""
return df.copy(deep=True)
|
a130830820a9aef0c2419580a2493bb1f14111df
| 49,447
|
def strip_locals(data):
"""Returns a dictionary with all keys that begin with local_ removed.
If data is a dictionary, recurses through cleaning the keys of that as well.
If data is a list, any dictionaries it contains are cleaned. Any lists it
contains are recursively handled in the same way.
"""
if isinstance(data, dict):
return {key: strip_locals(value) for key, value in data.items()
if not (isinstance(key, str) and key.startswith('local_'))}
elif isinstance(data, list):
data = [strip_locals(item) for item in data]
return data
|
92fb36a2f3bb618b7b706dd33a3347666d0fa07e
| 49,448
|
def budget_equalities(sequence:str, price_vars:list, budget_vars:list)->list:
"""
Given a picking-sequence, create symbolic equalities determining that the
total price of each agent's bundle equals the agent's budget.
Currently works only for 2 agents.
:param sequence: a string determining the picking-sequence, e.g. "ABA".
:param price_vars: a list of symbols, e.g. symbols('p3,p2,p1')
:param budgets: a list of symbols, e.g. symbols('a,b')
:return: a list of constraints, as expressions that should be equal to 0.
>>> budgets = symbols("a,b")
>>> price_vars = symbols('p5,p4,p3,p2,p1')
>>> budget_equalities("ABABA", price_vars, budgets)
[a - p1 - p3 - p5, b - p2 - p4]
>>> budget_equalities("AABBB", price_vars, budgets)
[a - p4 - p5, b - p1 - p2 - p3]
"""
num_agents = len(budget_vars)
if num_agents!=2:
raise ValueError("Currently only 2 agents are supported")
price_equality_a = budget_vars[0]
price_equality_b = budget_vars[1]
for i in range(len(sequence)):
picker = sequence[i]
if picker=="A":
price_equality_a -= price_vars[i]
elif picker=="B":
price_equality_b -= price_vars[i]
else:
raise ValueError("Found picker {} but only two agents are supported".format(picker))
return [price_equality_a,price_equality_b]
|
ead48548766afc53e7583b65d88bfc8a10906382
| 49,450
|
def get_recipes_in_node(node):
"""Gets the name of all recipes present in the run_list of a node"""
recipes = []
for elem in node.get('run_list'):
if elem.startswith("recipe"):
recipe = elem.split('[')[1].split(']')[0]
recipes.append(recipe)
return recipes
|
9d2cc0aac4b819428a5984e3852035650afeedf4
| 49,453
|
from pathlib import Path
from typing import Tuple
from datetime import datetime
def parse_dates_from_filename(file: Path) -> Tuple[datetime, datetime]:
"""
Extracts the start and end time from an existing file.
:param file:
:return: tuple of start and end time
"""
split_names = file.stem.split("__")
if len(split_names) < 4:
raise ValueError(f"Cannot extract datetimes from file name: {file.name}")
start_time = split_names[2]
end_time = split_names[3].replace("_temp", "")
start_time = datetime.strptime(start_time, "%Y_%m_%dT%H_%M_%SZ")
end_time = datetime.strptime(end_time, "%Y_%m_%dT%H_%M_%SZ")
return start_time, end_time
|
a03212ed2df9826cc631cda9e63157d6675f82a8
| 49,456
|
def weekend_init(M, i, t):
"""
Determines days to treat as weekend using midnight threshold parameters
:param M: Model
:param i: period
:param t: tour type
:return: list of ints; either [1, 7] or [6, 7]
"""
result = []
lens = [M.lengths[k] for k in M.tt_length_x[t]]
max_len = max(lens)
if i + max_len - 1 >= M.midnight_thresh[t]:
result.append(6)
else:
result.append(1)
result.append(7)
return result
|
59e049e4d22f03e042aafa1799fe9f4e199babb4
| 49,457
|
import json
def canonical_json(o):
"""
Dumps an object as canonical JSON string.
Canonical JSON does not contain an space (except in strings) and
have all the keys sorted.
Args:
o: The object to dump.
Return:
The canonical JSON string.
"""
return json.dumps(o, sort_keys=True)
|
74a6e880987cc1aaccd19a3d68f63ecf0d9835d3
| 49,458
|
def _count_trailing_zeros(mask: int) -> int:
"""count the trailing zeros of a bit mask. Used for shifting.
Args:
mask (int): bit mask, eg 0b00111000
Returns:
int: number of trailing zeros
"""
if mask == 0:
raise ValueError("mask is all zeros")
count = 0
for i in range(mask.bit_length()):
if mask & 1:
return count
count += 1
mask >>= 1
return count
|
3f8ce2c9a5a5143715e2fe52aabdeed3ae692f17
| 49,465
|
def relative_x(xarr, yarr, *args):
"""First value in xarr is subtracted from each x
"""
return (xarr - xarr[0], yarr) + args
|
8c8310b41b6de8a3d9a1846bda2e23a3d67474c8
| 49,473
|
def get_state(person):
"""Feed a person dict return the state only"""
return person['state']
|
b670382b80ccd5f14d6ff2b6cd98ef28330f3c76
| 49,478
|
def kwargsGet(kwargs, key, replacement):
"""As kwargs.get but uses replacement if the value is None."""
if key not in kwargs:
return replacement
elif key in kwargs and kwargs[key] is None:
return replacement
else:
return kwargs[key]
|
bcb5da19757f685d6ba5b7669c474e5cbc9a13f0
| 49,479
|
def set_creo_version(client, version):
"""Set the version of Creo you are running.
This function only needs to be called once per creoson session.
This function must be called if you are doing certain functions
in Creo 7 due to deprecated config options.
Needed for functions:
familytable_replace
file_assemble
file_regenerate
feature_delete
feature_resume
feature_suppress
Args:
client (obj):
creopyson Client.
version (int):
Creo version.
Returns:
None.
"""
data = {"version": int(version)}
return client._creoson_post("creo", "set_creo_version", data)
|
ddedcbb614f9fd545538804ba306b375f26bb1c8
| 49,483
|
def rank_items(items):
""" Get a rank for each item that is computed by price/weight """
for item in items:
item['rank'] = (item['price'] * 1.0) / (item['weight'] * 1.0) # I use 1.0 to get floats
return items
|
2237664c7822afc76ddb6b32923e5fd98630b885
| 49,485
|
from typing import List
from typing import Dict
def get_entities_fields(entity_title: str, entities: List[str]) -> List[Dict]:
"""
Builds an entity from given entity title and entities list
Args:
entity_title (str): Title of the entity.
entities (List[str]): List of the entities.
Returns:
(List[Dict]): List of dict containing the entity. List is needed because it is the expected format by Slack API.
"""
return [{
"title": f'{entity_title}',
"value": '\n'.join(entities),
"short": False
}]
|
201702971b3bb58285f3adf10e492a6b83f62d27
| 49,486
|
def ZhangJohnson(tokens, index, history):
"""
These features were taken from Zhang and Johnson (2003). They correspond
to the combination B+D+E+F in their paper.
Feature 'C' was not included, since
CRFsuite cannot handle 2nd order CRFS (which can use the i-1 and (i-2)th
labels) or features of the form label_(i-1) & tok_i.
Feature 'G' (part-of-speech tags), feature 'H' (chunking tags) and
features 'I' & 'J' (dictionaries and gazetteers specific to the CONLL 2003
shared task) are not included.
References
----------
- Turian J, Rativ L, Bengio Y. Word representations: a simple and general
method for semi-supervised learning. Proceedings of the 48th annual meeting
of the association for computational linguistics. Association for
Computational Linguistics, 2010.
- Zhang, T. and Johnson D. A robust risk minimization based named entity
recognition system. Proceedings of the seventh conference on natural
language learning at HLT-NAACL 2003-Volume 4, Association for Computational
Linguistics, 2003.
"""
start_pad2 = ('[START2]', '[START2]','[START2]')
start_pad1 = ('[START1]', '[START1]','[START1]')
end_pad2 = ('[END2]', '[END2]','[END2]')
end_pad1 = ('[END1]', '[END1]','[END1]')
tokens = [start_pad2, start_pad1] + list(tokens) + [end_pad1, end_pad2]
history = ['[START2]', '[START1]'] + history
index += 2 # Shift index to accommodate padding in front of list.
# Feature set 'B': Tokens in a window of 2
word = tokens[index][0]
prevword = tokens[index - 1][0]
prev2word = tokens[index - 2][0]
nextword = tokens[index + 1][0]
next2word = tokens[index + 2][0]
# Feature set 'D': Initial capitalization of tokens in a window of 2
capitalized = word[0] == word.capitalize()[0]
prevcapitalized = prevword[0] == prevword.capitalize()[0]
prev2capitalized = prev2word[0] == prev2word.capitalize()[0]
nextcapitalized = nextword[0] == nextword.capitalize()[0]
next2capitalized = next2word[0] == next2word.capitalize()[0]
# Feature set 'E': All capitalization, all digitals, or digitals containing
# punctuation (for center word only).
allcaps = word.isupper() #word == word.upper()
all_digits = word.isdigit()
all_letters = word.isalpha()
# NOTE: Zhang and Johnson use say "digitals containing punctuations"; we
# interpret this in the same way as Turian, Ratinov and Bengio (2010)
# to mean "all non-letters"
# Feature set 'F': Token prefix (lengths 3 & 4), and suffix (1 - 4)
prefix3 = word[:3]
prefix4 = word[:4]
suffix1 = word[-1:]
suffix2 = word[-2:]
suffix3 = word[-3:]
suffix4 = word[-4:]
features = {
'word': word,
'prevword': prevword,
'prev2word': prev2word,
'nextword': nextword,
'next2word': next2word,
'capitalized': capitalized,
'prevcapitalized': prevcapitalized,
'prev2capitalized': prev2capitalized,
'nextcapitalized': nextcapitalized,
'next2capitalized': next2capitalized,
'allcaps': allcaps,
'all_digits': all_digits,
'all_letters': all_letters,
'prefix3': prefix3,
'prefix4': prefix4,
'suffix1': suffix1,
'suffix2': suffix2,
'suffix3': suffix3,
'suffix4': suffix4,
}
return features
|
f63f22692a8198bba05091b8fdd62cd22f714159
| 49,488
|
import json
import requests
def download_rail_route(rr_file, verbose=False):
"""
Downloads rail route data from railrouter.sg repo.
Source: https://github.com/cheeaun/railrouter-sg
params
------
rr_file (str): filename in the data directory in the repo
verbose (bool): print downloaded route and pattern info
returns
-------
rr_data (dict): the core component from the raw data
patterns (list): a list of patterns
"""
url = 'https://raw.githubusercontent.com/' \
'cheeaun/railrouter-sg/master/data/v2/'
rr_data = json.loads(requests.get(url + rr_file).text)['routes'][0]
patterns = rr_data['patterns']
if verbose:
print("[Downloaded]")
print("Route:", rr_data['name'])
print("Patterns:")
print('\n'.join(
[(str(i) + ': ' + p['name']) for i, p in enumerate(patterns)]))
return rr_data, patterns
|
93d5d16a5b7ba1afb11fc26c3d8a9df4a301dafa
| 49,490
|
import re
def valid_uniprot_ac_pattern(uniprot_ac):
"""
Checks whether Uniprot AC is formally correct according to
https://www.uniprot.org/help/accession_numbers
This is no check whether it actually exists.
:param uniprot_ac: Accession code to be checked
"""
ac_pat = "[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2}"
if re.match(ac_pat, uniprot_ac):
return True
else:
return False
|
8478f998d1b8ff2b7f8343317056a9c2c0b36021
| 49,504
|
def AIC(params, log_likelihood_fun, data):
"""Calculates the AIC (akaike criterion)
Parameters
_________
params : tuple
MLE parameters for distribution
log_likelihood_fun : function
calculates the log likelihood for the desired distribution
data : array
empirical dataset to calculate log likelihood with respect to
Returns
_________
output : float
AIC value
"""
L = log_likelihood_fun(params, data);
return -2*(L) + 2*len(params)
|
48f812b68fa44320f83032ec927c4d900ae44620
| 49,507
|
def five_options_around(page):
""" Create five page numbers around current page for discovery pagination. """
if page.number <= 3:
return list(range(1, min(5, page.paginator.num_pages) + 1))
elif page.number >= (page.paginator.num_pages - 2):
return list(range(max((page.paginator.num_pages - 4), 1),
page.paginator.num_pages + 1))
else:
return list(range(max(1, (page.number - 2)),
min((page.number + 2), page.paginator.num_pages) + 1))
|
b65b635df3fe0abd4c5674b5b8c41b47a5630f79
| 49,515
|
import base64
def parse_authorization_header(auth_header):
""" Parse auth header and return (login, password) """
auth_str = auth_header.split(' ')[1] # Remove 'Basic ' part
auth_str = base64.b64decode(auth_str).decode() # Decode from base64
auth_str = auth_str.split(':')
return auth_str[0], auth_str[1]
|
cc2db762ddf7b4ce0669a81f24bce9517785350a
| 49,516
|
def calc_kcorrected_properties(frequency, redshift, time):
"""
Perform k-correction
:param frequency: observer frame frequency
:param redshift: source redshift
:param time: observer frame time
:return: k-corrected frequency and source frame time
"""
time = time / (1 + redshift)
frequency = frequency * (1 + redshift)
return frequency, time
|
3a7c151a6777a0e11022e38e73ded45fc7a6706c
| 49,518
|
def tokenize_options(options_from_db, option_name, option_value):
"""
This function will tokenize the string stored in database
e.g. database store the value as below
key1=value1, key2=value2, key3=value3, ....
This function will extract key and value from above string
Args:
options_from_db: Options from database
option_name: Option Name
option_value: Option Value
Returns:
Tokenized options
"""
options = []
if options_from_db is not None:
option_str = options_from_db.split(',')
for fdw_option in option_str:
k, v = fdw_option.split('=', 1)
options.append({option_name: k, option_value: v})
return options
|
88127c5e693064ef0c3c087b18bbf213c8a6184f
| 49,521
|
def check_int(num):
"""Check if arguement is integer.
Arg:
num (int): The only arguement.
Returns:
bool: The return value. True if num is indeed an integer, False otherwise.
"""
if num is not None:
try:
int(num)
return True
except ValueError:
return False
else:
return False
|
556bc91dbdece439bd66f7f8fa2182d2f9683c67
| 49,522
|
def find_min_max(shape):
"""Finds min/max coordinates for a given shape and returns a tuple
of the form (minx, maxx, miny, maxy)
shape: list with points"""
minx = miny = 1000
maxx = maxy = -1000
for x, y in shape:
if x < minx:
minx = x
if x > maxx:
maxx = x
if y < miny:
miny = y
if y > maxy:
maxy = y
return (minx, maxx, miny, maxy)
|
133bc42b9193a950869a784a5cf74b960e79eda1
| 49,523
|
def create_dict(breadcrumbs, value=None):
"""
Created a dict out of the breadcrumbs in a recursive manner.
each entry in the breadcrumb should be a valid dictionary key.
If value is None, the last string within' the breadcrumbs becomes the
final value.
:param breadcrumbs:
:param value:
:return:
"""
if value is not None:
if not breadcrumbs:
return value
elif len(breadcrumbs) == 1:
return breadcrumbs[0]
return {breadcrumbs[0]: create_dict(breadcrumbs[1:], value)}
|
f2f4d2be32aa96b31703cc548edff3999bd42bd3
| 49,525
|
from typing import Dict
from typing import Any
from typing import List
async def get_sorted_agenda_items(
agenda_items: Dict[int, Dict[str, Any]]
) -> List[Dict[str, Any]]:
"""
Returns all sorted agenda items by id first and then weight, resulting in
ordered items, if some have the same weight.
"""
return sorted(
sorted(agenda_items.values(), key=lambda item: item["id"]),
key=lambda item: item["weight"],
)
|
d661488d7e508e86a974d98ec558856f5810a3e4
| 49,527
|
def _melt_keep_index(df, value_name="value"):
""" Fully melt a dataframe keeping index, setting new index as all but `value` """
id_vars = df.index.names
return (
df.reset_index()
.melt(id_vars=id_vars, value_name=value_name)
.set_index([*id_vars, df.columns.name])
)
|
94bfc7f663d62db2c36f15bc9f09a078cc978174
| 49,529
|
def findall_text(node, path):
"""Find all n.text elements from a path.
"""
return [n.text for n in node.findall(path)]
|
2a40b2442e50e58a64320539153e27acfde15a8f
| 49,533
|
import struct
def add(text: str) -> str:
"""
Add surrogate pairs to characters in the text. This makes the indices match how most platforms
calculate string length when formatting texts using offset-based entities.
Args:
text: The text to add surrogate pairs to.
Returns:
The text with surrogate pairs.
"""
return "".join(
"".join(chr(y) for y in struct.unpack("<HH", x.encode("utf-16le")))
if (0x10000 <= ord(x) <= 0x10FFFF)
else x
for x in text
)
|
ffcaa7216686d2da09fae65719ed1a244a755f94
| 49,534
|
import torch
def tsn_sample(num_tokens, num_samples, training):
""" num_tokens >= num_samples
args:
num_tokens: int, num of total tokens
num_samples: int, num of sampled tokens
training: bool
returns:
indexes: tensor, sampled indexes of frames
"""
if num_samples == 1: return torch.tensor([0], dtype=torch.long)
base = torch.floor(
(num_tokens - 1) * torch.arange(0, num_samples).to(torch.float) / (num_samples - 1))
if training:
offset_range = base[1:] - base[:-1]
base[:-1] += torch.rand((offset_range.size()[0],)) * offset_range
indexes = torch.floor(base)
else:
indexes = base
return indexes.to(torch.long)
|
8821aa111e17c231a8ffc61afd43c9fd71e3efcc
| 49,535
|
def getCacheThumbName(path):
"""
Returns a thumb cache filename.
:param path: string or unicode -- path to file
Example::
thumb = xbmc.getCacheThumbName('f:\videos\movie.avi')
"""
return str()
|
f3b467d542842ab011bbbf22cee86399cf80d1e6
| 49,537
|
import torch
def mae(predictions, actuals):
"""
computes mean absolute error
- predictions: predictions computed from call to model.forward(...) (Tensor)
- actuals: actual values (labels) (Tensor)
@returns:
computed mae = sum(abs(predictions - actuals)) / actuals.size(0)
"""
diff = actuals - predictions
mae_err = torch.mean(torch.abs(diff))
return mae_err.detach().numpy()
|
00cb15d83a06427947bcabb85e48ef4ffa0d2677
| 49,539
|
def split_path(path):
"""//foo/bar/baz -> ['foo', 'bar', 'baz']"""
return [x for x in path.split('/') if x != '']
|
13376639f9597d598c2b69a844e232a7c059fc3a
| 49,540
|
def s2b(s):
"""portable way to convert string to bytes. In 3.x socket.send and recv require bytes"""
return s.encode()
|
92a2b6301858c80d856d80e1200f00b17e2e2327
| 49,542
|
from typing import Optional
def distribute_about_center(index: int, size: int,
max_loc: float = 1.0,
max_size: Optional[int] = None):
"""
Get the coordinate from between 0 and 1 of an item given its index in a
collection.
:param index: The 0-based index of the item in its collection.
:param size: The number of items in the item's collection.
:param max_loc: The maximum location of an item.
:param max_size: The maximum number of items that can appear in any
collection.
Use if all items in all collections should be equally
spaced.
Leave as None to give each collection its own spacing.
"""
if max_size is None:
max_size = size
spacing = max_loc / (max_size - 1 if max_size > 1 else 1)
min_loc = (max_loc / 2) - ((size - 1) * spacing) / 2
return min_loc + index * spacing
|
4dfae63fc3b7639c08f37ece62e8afad352f8d23
| 49,543
|
import fnmatch
def _matches(file, glob):
""" check if file matches a glob"""
return fnmatch.fnmatch(file, glob)
|
5a104720f76910bcb280350c440d4bea8d156b81
| 49,548
|
from datetime import datetime
import re
def get_date_from_string(date_string):
"""Transforms the specified date string into a proper datetime object. """
format_string = '%Y-%m-%dT%H:%M:%S%z'
regexp = r"(\+\d{1,2})(:)(\d{1,2})"
data = datetime \
.strptime(re.sub(regexp, r"\1\3", date_string), format_string) \
.replace(tzinfo=None)
return data
|
f6e6d713cecef451b226e57ddae550fccc91cae8
| 49,553
|
def shieldsio_markdown_badge_generator(badge_label=None,
badge_hover=None,
badge_link=None,
metric='count',
badge_color=None,
badge_style='flat-square'):
"""Generates badge generator for a badge hosted on shields.io.
Params:
badge_label - the label in the badge itself. For example "algorithms".
this label is displayed in the left box of the badge.
badge_hover - the hover text displayed when going over the badge. Used
in the Markdown markup for the badge.
badge_link - the link of the badge (in the markup).
metric - the name of the property in the saved JSON that contains the
data that should be displayed in the right box of the badge.
badge_color - the color of the badge.
badge_style - badge style. May be: 'plastic', 'flat', 'flat-square'.
"""
def _get_badge_url(data_url):
url = 'https://img.shields.io/badge/dynamic/json?'
params = {
'label': badge_label,
'query': metric,
'url': data_url,
'color': badge_color,
'style': badge_style,
}
for param, value in params.items():
if value is not None:
url += '&' + '{key}={value}'.format(key=param, value=value)
return url
def _get_markup(hover, badge_url):
hover = badge_hover or hover
return '[]({badge_link})'.format(
hover=hover,
badge_url=badge_url,
badge_link=badge_link)
def _generator(value):
url = _get_badge_url(value['data_url'])
return {
'provider': 'https://shields.io',
'url': url,
'markup': _get_markup(hover=value['label'], badge_url=url)
}
return _generator
|
2401eb53f0725b3594274b4b258e3dfe2c04b2d5
| 49,555
|
def convert_twos_compliment(bin_str):
""" Converts a string of binary numbers to their two's compliment integer representation """
length = len(bin_str)
if(bin_str[0] == '0'):
return int(bin_str, 2)
else:
return int(bin_str, 2) - (1 << length)
|
b5b62eb0f74aecd35de880d11470855e189fd713
| 49,567
|
import re
def clean_text(text):
"""去掉文本的空白字符、标点符号"""
return re.sub(r'[? 、;!,。“”?.~…,$\r\n《》——]|(<.*>)', '', text.strip())
|
b5da8949a30ccf1216508b94b1088c0a3dd8f547
| 49,568
|
import random
def random_choice(bucket):
"""Safely get a random choice from a list.
If the list is zero-length, this just returns an empty string rather than
raise an exception.
Parameters:
bucket (list): A list to randomly choose from.
Returns:
str: The random choice. Blank string if the list was empty.
"""
if len(bucket) == 0:
return ""
return random.choice(bucket)
|
2b781ede8fff9c2f455eebab9bf64cdf01717bee
| 49,572
|
import torch
def mul(input, other, *args, **kwargs):
"""
Multiplies each element of the input ``input`` with the scalar ``other`` and
returns a new resulting tensor.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.mul(
... ttorch.tensor([1, 2, 3]),
... ttorch.tensor([3, 5, 11]),
... )
tensor([ 3, 10, 33])
>>> ttorch.mul(
... ttorch.tensor({
... 'a': [1, 2, 3],
... 'b': {'x': [[3, 5], [9, 12]]},
... }),
... ttorch.tensor({
... 'a': [3, 5, 11],
... 'b': {'x': [[31, -15], [13, 23]]},
... })
... )
<Tensor 0x7f11b139ca58>
├── a --> tensor([ 3, 10, 33])
└── b --> <Tensor 0x7f11b139cb00>
└── x --> tensor([[ 93, -75],
[117, 276]])
"""
return torch.mul(input, other, *args, **kwargs)
|
027789f0ef85cb68e374994065a4eb19f815eb52
| 49,573
|
def add_entry_list_strings(list_string, entry):
"""
add a new entry to a list of strings, including some newlines
"""
list_string.append(entry)
list_string.append('\n')
return list_string
|
49e5beafa3748adfae397c8f44d5ef6abcdaef78
| 49,574
|
def interval_intersection_width(a, b, c, d):
"""returns the width of the intersection of intervals [a,b] and [c,d]
(thinking of these as intervals on the real number line)"""
return max(0, min(b, d) - max(a, c))
|
fc913c1d2510e9141ab40f4d56f4ce952f3d7d04
| 49,577
|
def filter_dict(data, keys):
"""Filter dict :data by given :keys"""
result = {}
for key in keys:
if key in data:
result[key] = data[key]
return result
|
80cc8a7226c8b7929588191a48a5d3f1ce07bbbd
| 49,584
|
def format_schema_errors(e):
"""Format FlaskJsonSchema validation errors"""
return {
"error": e.message,
"errors": [validation_err.message for validation_err in e.errors],
}
|
9197f83adc03d031681ed097dbc96206dbdd8ac9
| 49,586
|
import hashlib
def _make_stmt_name(func):
"""
Create the prepared query name which is a string up-to 63 characters made up of
- an underscore
- the md5 hash of the <module.function> name
- an underscore
- up-to 29 characters of the function name
"""
m = hashlib.md5()
func_name = func.__name__
fully_qualified_name = f"{func.__module__}.{func.__name__}"
m.update(bytes(fully_qualified_name, encoding="utf-8"))
stmt_name = "_"
stmt_name += m.hexdigest()
stmt_name += "_" + func_name[:29]
return stmt_name
|
bad6f2530b7dbae2a814ab6eba030e6f0040a58f
| 49,592
|
def extract_headers(response):
"""Extract relevant headers from a response into a dictionary."""
result = {}
for key in response.headers.keys():
if isinstance(key, bytes):
key = key.decode('utf-8')
value = response.headers.get(key)
if isinstance(value, bytes):
value = value.decode('utf-8')
if key in ('Date', 'Content-Length', 'Etag','Last-Modified'):
continue
result[key] = value
return result
|
fde2f4d9c4e449f95c57fcc20dae3d003ec6bc9b
| 49,596
|
from typing import Any
from typing import Union
def issubclass_(
cls: Any,
types: Union[type[Any], tuple[type[Any], ...]],
) -> bool:
"""Like `issubclass`, but do not raise error if value is not `type`."""
return isinstance(cls, type) and issubclass(cls, types)
|
f300d79f12a74ac549b6e7d66e1d865449b0eca9
| 49,599
|
def find_difference_sum_of_squares_and_square_of_sum(num: int) -> int:
"""Find the difference between sum of squares and square of sum for the first
`num` natural numbers.
Args:
num (int): First natural numbers.
Returns:
int: Difference.
"""
sum_of_squares = sum(x ** 2 for x in range(1, num + 1, 1))
square_of_sum = sum(range(1, num + 1, 1)) ** 2
return square_of_sum - sum_of_squares
|
3f0d5fbc75451f8c883d0aedc3b5a91cbffb1a77
| 49,606
|
def hello(name):
"""
Function that returns a greeting for whatever name you enter.
Usage:
>>> hello('Emiel')
'Hello, Emiel!'
"""
return ''.join(["Hello, ", name, '!'])
|
ffbdf2ee0869b3fd20f0cb85eff8eae748e30d28
| 49,609
|
def merge(source, destination):
"""
Merges 2 dicts recursively.
If leaves are lists, they are extended.
If leaves are ints, floats, strs, they are concatenated
into a string, if the leaves are not the same.
"""
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
merge(value, node)
elif isinstance(value, list):
destination[key] += value
else:
if destination[key] != value:
destination[key] = f'{destination[key]},{value}'
else:
destination[key] = value
return destination
|
7f771e83c5f89fa641fa79f863f4382ac5cc0ead
| 49,610
|
import re
def filter_content(content):
"""Filter content to make sure it can be built into json object
"""
filtered_content = re.sub(r'[^\x00-\x7f]', r' ', content.strip("'").
replace("\n", "\\n").
replace("\r", "").
replace("\"", "'"))
return filtered_content
|
267be82061ddc3614e8eb340a0eab065dd2a7b23
| 49,615
|
def cost_req_met(card, opponent_cost):
"""Check if a card meets cost requirements"""
if card.intrigue:
if (opponent_cost - card.cost) > 0:
if (opponent_cost - card.cost) % card.intrigue == 0:
return True
else:
if card.cost == opponent_cost:
return True
return False
|
8a6dfc891b338a4dadcc412b08ad1b16fdc14dce
| 49,621
|
import torch
from typing import Optional
def squash(inputs: torch.Tensor, dim: Optional[int] = -1) -> torch.Tensor:
"""
Apply `squash` non-linearity to inputs
Args:
inputs (T): input tensor which is to be applied squashing
dim (int, optional): dimension to be applied squashing. Defaults to -1
Returns:
T: squashed tensor
"""
# Calculate squared norm of inputs
squared_norm = torch.sum(torch.pow(inputs, 2), dim=dim, keepdim=True)
# Squash non-linearity to inputs
return (squared_norm / (1 + squared_norm)) * (inputs / (torch.sqrt(squared_norm) + 1e-8))
|
91ee55ecedf0a8fac4098c37d9352775db85cc3c
| 49,625
|
def build_id(Z: int, A: int, state: str = "") -> int:
"""
Builds a canonical nuclide id from atomic number, atomic mass, and
and energy state.
Parameters
----------
Z : int
Atomic number.
A : int
Atomic mass.
state : str
energy state.
Returns
-------
int
Canonical nuclide id.
Examples
--------
>>> rd.utils.build_id(1,2)
10020000
>>> rd.utils.build_id(28,56,'m')
280560001
"""
if state != "":
if state == "m":
state_int = 1
elif state == "n":
state_int = 2
else:
raise ValueError(state + " is not a valid energy state.")
else:
state_int = 0
canonical_id = (Z * 10000000) + (A * 10000) + state_int
return canonical_id
|
6686ac4841e6272ff7a683b4d2b267f95e1615be
| 49,626
|
def read_expected_result(filename):
"""
Get the expected result from the given file. The expected outcome is the % message following after
%Expected outcome:
:param filename: The name of the file
:return: The expected result.strip().
:rtype: str
"""
result = ""
with open(filename) as f:
reading = False
for l in f:
l = l.strip()
if l.startswith('%Expected outcome:'):
reading = True
elif reading:
if l.lower().startswith('% error'):
return l[len('% error'):].strip()
elif l.startswith('%'):
result = result + "\n" + l[1:]
else :
reading = False
return result.strip()
|
98522127ecfeb9a32b9bf429eab35fccd90cc849
| 49,631
|
def make_field(name, _values, **kwargs):
"""
specialization of make_parameters for parameters that define fields
(aka color inputs). In this case the values is a list of name, type pairs
where types must be one of 'rgb', 'lut', 'depth', 'value', or 'luminance'
May also be given an set of valueRanges, which have min and max values for
named 'value' type color selections.
"""
values = _values.keys()
img_types = _values.values()
valid_itypes = ['rgb', 'lut', 'depth', 'value', 'luminance', 'normals']
for i in img_types:
if i not in valid_itypes:
raise RuntimeError(
"Invalid typechoice, must be one of %s" % str(valid_itypes))
default = kwargs['default'] if 'default' in kwargs else values[0]
if default not in values:
raise RuntimeError("Invalid default, must be one of %s" % str(values))
typechoice = 'hidden'
label = kwargs['label'] if 'label' in kwargs else name
properties = dict()
properties['type'] = typechoice
properties['label'] = label
properties['values'] = values
properties['default'] = default
properties['types'] = img_types
if 'valueRanges' in kwargs:
properties['valueRanges'] = kwargs['valueRanges']
return properties
|
f5eb045d17b75e4fb315a43c1b5611a82827fda9
| 49,642
|
import logging
def str_to_float(value: str) -> float:
"""
Return float from string or raise/log error.
:param value: The string value.
:return: The float value.
"""
try:
int_val = float(value)
return int_val
except ValueError as e:
logging.error(f"Cannot convert string value to float: {value=}")
raise e
|
152be429a66cbf4062ef9b288e0d71d89b2f7352
| 49,658
|
def empty_cells(state):
"""
Each empty cell will be added into cells' list
:param state: the state of the current board
:return: a list of empty cells
"""
cells = []
for x, row in enumerate(state):
for y, cell in enumerate(row):
if cell == 0:
cells.append([x, y])
return cells
|
b3e3944d9dd3c699ff2bfbffbd66407e1bda7d4a
| 49,659
|
def compute_best_test_losses(data, k, total_queries):
"""
Given full data from a completed nas algorithm,
output the test error of the best architecture
after every multiple of k
"""
results = []
for query in range(k, total_queries + k, k):
test_losses = [d[-1] for d in data[:query]]
best_test_loss = sorted(test_losses)[0]
results.append((query, best_test_loss))
return results
|
024dc22fa93c6577193da484d6de499be4ecbf00
| 49,662
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.