content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def bson2bool(bval: bytes) -> bool:
"""Decode BSON Boolean as bool."""
return bool(ord(bval)) | dea3c0ba8b3c0078fca754b8ee71e8f2feb23c31 | 104,768 |
def part1(counts):
"""Part 1 asks us to add up the number of occurrences of any totally
safe ingredient.
Assumes `counts` has already had its unsafe ingredients discarded.
"""
return sum(counts.values()) | d83141780c26f50008d5a299cdea3e3cedb4bd4e | 104,772 |
def make_sgml_safe(s, reverse=False, keep_turn=True):
""" return a version of the string that can be put in an sgml document
This means changing angle brackets and ampersands to '-LAB-',
'-RAB-', and '-AMP-'. Needed for creating ``.name`` and
``.coref`` files.
If keep_turn is set, <TURN> in the input is turned into [TURN], not turned into -LAB-TURN-RAB-
"""
if not reverse and keep_turn:
s = s.replace("<TURN>", "[TURN]")
for f, r in [("<", "-LAB-"),
(">", "-RAB-"),
("&", "-AMP-")]:
if reverse:
r, f = f, r
s = s.replace(f, r)
return s | 8b7dedff97fe7e892be1c359164a94c8b415150b | 104,773 |
def pv_f(fv,r,n):
"""Objective: estimate present value
fv: fture value
r : discount period rate
n : number of periods
formula : fv/(1+r)**n
e.g.,
>>>pv_f(100,0.1,1)
90.9090909090909
>>>pv_f(r=0.1,fv=100,n=1)
90.9090909090909
>>>pv_f(n=1,fv=100,r=0.1)
90.9090909090909
"""
return fv/(1+r)**n | 7496b8c5f30fed9afa02ef464c015cf1dde98b42 | 104,774 |
def cardValue(card):
"""Returns the value of a given card."""
if card[0] >= '2' and card[0] <= '9':
return int(card[0])
elif card[0] == 'T':
return 10
elif card[0] == 'J':
return 11
elif card[0] == 'Q':
return 12
elif card[0] == 'K':
return 13
elif card[0] == 'A':
return 14 | 56c3a4485d8f36b5616a853c842889c81568c9b5 | 104,777 |
import dis
import io
def distext(func):
"""
Like dis.dis, but returns the text
"""
file = io.StringIO()
dis.dis(func, file=file)
file.seek(0)
text = file.read()
return text | f2092df033052f4f37a0258aa47b8a5b3c6cc187 | 104,782 |
def cut_apply(variable, value, cut_type):
"""Create a function to apply a cut on a variable within the events dataframe.
Args:
variable (str): variable name to cut on
value (float): cut value
type (str): type (greater or lower)
Returns:
function: function to apply the cut to the dataframe
"""
def cut_func(event):
if cut_type == "greater":
return event[variable] >= value
elif cut_type == "lower":
return event[variable] <= value
else:
raise NotImplementedError
return cut_func | c5413980c0bc424f5d6aabc8d0861fe1860c2f7e | 104,783 |
def calc_risk_return_ratio(self):
"""
Calculates the return / risk ratio. Basically the
Sharpe ratio without factoring in the risk-free rate.
"""
return self.mean() / self.std() | a0728b5a7638b62ce4ab4d5c5723287f13b463bc | 104,785 |
import re
def extract_delex_placeholders(utt):
"""Extracts delexicalized placeholders from the utterance."""
pattern = '(name|near)_place'
return set(re.findall(pattern, utt)) | 341eb32b13726ea3ec7191c861814e9a5ff573f6 | 104,787 |
import torch
def depth_softargmin(cost, gamma):
"""Return a depth map with shape [N, 1, H, W].
Arguments:
cost: 3D cost volume tensor of shape [N, D, H, W]
gamma: discrete depth values [D]
Returns:
a depth map with shape [N, 1, H, W]
"""
return torch.sum(cost * gamma[:, None, None], 1, keepdim=True) | cf757a4d632368ed32e293d385f55f7c678022fc | 104,793 |
def IsValidFilename(filename):
"""IsValidFilename(filename) -> bool
Determines if the given filename is a valid filename. It is invalid if it
contains any of the characters found in the string return by
GetInvalidString().
arguments:
filename
string corresponding to the filename to check.
returns:
bool corresponding to whether the given filename is valid.
"""
return ((filename.find("\\") == -1) and (filename.find("/") == -1) and
(filename.find(":") == -1) and (filename.find("*") == -1) and
(filename.find("?") == -1) and (filename.find("\"") == -1) and
(filename.find("<") == -1) and (filename.find(">") == -1) and
(filename.find("|") == -1)) | 82f8eb34b54bae194839e7555dc0044bb33c6fb7 | 104,795 |
import torch
def sphere_distance_torch(x1, x2, diag=False):
"""
This function computes the Riemannian distance between points on a sphere manifold.
Parameters
----------
:param x1: points on the sphere N1 x dim or b1 x ... x bk x N1 x dim
:param x2: points on the sphere N2 x dim or b1 x ... x bk x N2 x dim
Optional parameters
-------------------
:param diag: Should we return the whole distance matrix, or just the diagonal? If True, we must have `x1 == x2`.
Returns
-------
:return: matrix of manifold distance between the points in x1 and x2 N1 x N2 or b1 x ... x bk x N1 x N2
"""
if diag is False:
# Expand dimensions to compute all vector-vector distances
x1 = x1.unsqueeze(-2)
x2 = x2.unsqueeze(-3)
# Repeat x and y data along -2 and -3 dimensions to have b1 x ... x ndata_x x ndata_y x dim arrays
x1 = torch.cat(x2.shape[-2] * [x1], dim=-2)
x2 = torch.cat(x1.shape[-3] * [x2], dim=-3)
# Expand dimension to perform inner product
x1 = x1.unsqueeze(-2)
x2 = x2.unsqueeze(-1)
# Compute the inner product (should be [-1,1])
inner_product = torch.bmm(x1.view(-1, 1, x1.shape[-1]), x2.view(-1, x2.shape[-2], 1)).view(x1.shape[:-2])
else:
# Expand dimensions to compute all vector-vector distances
x1 = x1.unsqueeze(-1).transpose(-1, -2)
x2 = x2.unsqueeze(-1)
inner_product = torch.bmm(x1, x2).squeeze(-1)
# Clamp in case any value is not in the interval [-1,1]
# A small number is added/substracted to the bounds to avoid NaNs during backward computation.
inner_product = inner_product.clamp(-1.+1e-15, 1.-1e-15)
return torch.acos(inner_product) | 59075a9060714aaf1438c06242c28c786ff25f95 | 104,797 |
def capitalcase(string: str) -> str:
"""Capitalize the first letter of a string.
The casing of all the other letters will remain unaltered, e.g.,
``"fooBar"`` → ``"FooBar"``.
Args:
string (:obj:`str`):
The string to capitalize.
Returns:
:obj:`str`: The capitalized string.
"""
if not string:
return ""
return f"{string[0].upper()}{string[1:]}" if string else "" | d21901bc8e492ef2cb000658da3edc4ae4999243 | 104,799 |
def dict_combine(*dict_list) -> dict:
"""Return the union of several dictionaries.
Uses the values from later dictionaries in the argument list when
duplicate keys are encountered.
In Python 3 this can simply be {**d1, **d2, ...}
but Python 2 does not support this dict unpacking syntax.
Returns:
dict: the dict from combining the dicts
"""
return {k: v for d in dict_list for k, v in d.items()} | 8d45bc362cb4aa505c47141173d907bc1ea7b15b | 104,800 |
import ipaddress
def is_same_subnet(addr1, addr2, subnet):
"""
Check whether two given addresses belong to the same subnet
"""
if ipaddress.ip_network((addr1, subnet), strict=False) == ipaddress.ip_network(
(addr2, subnet), strict=False
):
return True
return False | 742d173cd55999c37d8274a4fcacfd4b780f597e | 104,803 |
def _get_table_tags(dynamodb_client, table_arn):
"""
Extracts tags from the given DynamoDB table ARN
:param dynamodb_client: DynamoDB client to work with
:param table_arn: ARN of the source DynamoDB table
:return: List of Tags
"""
tags = []
kwargs = {}
while True:
response = dynamodb_client.list_tags_of_resource(
ResourceArn=table_arn,
**kwargs
)
tags += response['Tags']
if 'NextToken' not in response:
break
kwargs['NextToken'] = response['NextToken']
# Add the Source Table as a tag (just for the record)
tags.append(
{'Source_Table': table_arn}
)
return tags | 5b6dc97c1c0ae109968f3c5fdd18508acb58e1e8 | 104,805 |
def launch_modes(launch_mode_conf):
"""Set of launch modes in which the tests will actually be run.
The value of this fixture depends on the value of `launch_mode_conf`:
- 'inprocess' -> {'inprocess'}
- 'subprocess' -> {'subprocess'}
- 'both' -> {'inprocess', 'subprocess'}
- None -> {'inprocess'}
"""
if launch_mode_conf == 'both':
return {'inprocess', 'subprocess'}
if launch_mode_conf is not None:
return {launch_mode_conf}
return {'inprocess'} | ee9c37cc4d3a03f48ac58c3f9e0d5cf4ba49fd0d | 104,810 |
def convert_device_size(unformatted_size, units_to_covert_to):
"""
Convert a string representing a size to an int according to the given units
to convert to
Args:
unformatted_size (str): The size to convert (i.e, '1Gi'/'100Mi')
units_to_covert_to (str): The units to convert the size to (i.e, TB/GB/MB)
Returns:
int: The converted size
"""
units = unformatted_size[-2:]
abso = int(unformatted_size[:-2])
conversion = {
"TB": {"Ti": abso, "Gi": abso / 1000, "Mi": abso / 1e6, "Ki": abso / 1e9},
"GB": {"Ti": abso * 1000, "Gi": abso, "Mi": abso / 1000, "Ki": abso / 1e6},
"MB": {"Ti": abso * 1e6, "Gi": abso * 1000, "Mi": abso, "Ki": abso / 1000},
"KB": {"Ti": abso * 1e9, "Gi": abso * 1e6, "Mi": abso * 1000, "Ki": abso},
"B": {"Ti": abso * 1e12, "Gi": abso * 1e9, "Mi": abso * 1e6, "Ki": abso * 1000},
}
return conversion[units_to_covert_to][units] | e173bf96c0bd095148b73991a256ecbd5a6a3949 | 104,814 |
def implemented(add_network=True, add_xgboost=True):
"""
Hard coded list of algorithms. Certain algorithms (such as e.g. keras)
are only sent in if flag allows it.
"""
algorithms = [
# Scikit-learn algorithms
('adaboost', 'MetaAdaBoostClassifier'),
('gbm', 'MetaGradBoostingClassifier'),
('logistic_regression', 'MetaLogisticRegressionClassifier'),
('nearest_neighbors', 'MetaKNearestNeighborClassifier'),
('naive_bayes', 'MetaBernoulliNBayesClassifier'),
('naive_bayes', 'MetaGaussianNBayesClassifier'),
('naive_bayes', 'MetaMultinomialNBayesClassifier'),
('random_forest', 'MetaRandomForestClassifier'),
('sgdescent', 'MetaSGDClassifier'),
('svm', 'MetaSVMClassifier'),
('tree', 'MetaDecisionTreeClassifier')]
# Keras
if add_network:
algorithms.append(('neural_network', 'MetaNeuralNetworkClassifier'))
# Xgboost
if add_xgboost:
algorithms.append(('xgb', 'MetaXGBoostClassifier'))
return algorithms | 6ecdccadae3f3051ab040d3af45898d31ad5287d | 104,816 |
def build_kwargs_read(spec: dict, ext: str) -> dict:
"""Builds up kwargs for the Pandas read_* functions."""
col_arg_names = {'.parquet': 'columns',
'.xls': 'usecols',
'.xlsx': 'usecols',
'.csv': 'usecols'}
kwargs = {}
if 'columns' in list(spec['input'].keys()):
kwargs[col_arg_names[ext]] = list(spec['input']['columns'].keys())
return kwargs | f691d964e9661407a26da6dfc8763d7c35ff658d | 104,817 |
def indiceStringToList(string):
"""
for example:
turns 1,2,63,7;5,2,986;305,3;
into [[1,2,63,7], [5,2,986], [305,3], []]
"""
output = [[int(char) for char in row.split(',') if char != ''] for row in string.split(';')]
return output | 3da7009df96266caa27fe3725110dcd8cab35903 | 104,821 |
import re
def remove_control_characters(text):
"""Removes all control characters
Control characters are not supported in XML 1.0
"""
return re.sub(r"[\x00-\x08\x0B-\x0C\x0E-\x1F]", "", text) if text else text | 75d0423ba51a3b6fd1ddd7e626cfc2a8978885d9 | 104,822 |
def is_merge(complete, part1, part2):
"""Checks if part1 & 2 can be merged into complete maintaining order
of characters."""
if len(part1) + len(part2) != len(complete):
return False
if part1 in complete:
ix = complete.find(part1)
remaining = complete[0:ix] + complete[ix + len(part1):]
if remaining == part2:
return True
if part2 in complete:
ix = complete.find(part2)
remaining = complete[0:ix] + complete[ix + len(part2):]
if remaining == part1:
return True
p1ix = 0
p2ix = 0
ix = 0
while ix < len(complete):
if p1ix < len(part1) and part1[p1ix] == complete[ix]:
p1ix += 1
ix += 1
continue
elif p2ix < len(part2) and part2[p2ix] == complete[ix]:
p2ix += 1
ix += 1
continue
else:
return False
return True | 2eb7d3884f205c6a1542aed43c2bad2f72be690e | 104,826 |
def GetClosestSupportedMotionBuilderVersion(Version: int):
"""
Get the closest MotionBuilder version that is supported
"""
if Version < 2018:
return 2018
if Version == 2021:
return 2022
return Version | 19b55726850ebc1c78b826c0b225e9fbc5108cef | 104,830 |
def format_terminal_output(result, stdout_key='stdout', stderr_key='stderr'):
"""
Output a formatted version of the terminal
output (std{out,err}), if the result contains
either.
:param stdout_key: where stdout is recorded
:param stderr_key: where stderr is recorded
:param result: result to inspect
:return: formatted output message
"""
output_message = ''
if stdout_key in result:
# this is most likely a shell/command/raw failure
if len(result[stdout_key]) > 0:
output_message += '{}\n{}\n'.format('Output to stdout:', result[stdout_key])
if stderr_key in result:
if len(result[stderr_key]) > 0:
output_message += '{}\n{}\n'.format('Output to stderr:', result[stderr_key])
if stdout_key in result and len(result[stdout_key]) == 0 and stderr_key in result and len(result[stderr_key]) == 0:
output_message = 'No output was written to stdout or stderr!'
return output_message | fa94357cf22a570b8f86640c58d73cdc362bdfe7 | 104,835 |
def open_file(directory, name):
"""
Create a file object to interact with
w+ : create the file if it does not exist with a unique file name
:param directory:
:param name:
:return:
File Object
"""
return open(f'{directory}/{name}', "w+") | 7e68be543dbb28886be2eb066de0826050a42926 | 104,839 |
def template_class(this_object):
"""Dump the object's class."""
cls = this_object.__class__
return '<code>%s</code>' % str(cls) | fe54a334d20cc1811f51c8768e08ebb54b61db4b | 104,842 |
def worker_should_exit(message):
"""Should the worker receiving 'message' be terminated?"""
return message['should_exit'] | 485cca233544a2759453da73a451105cfc9f633e | 104,843 |
def replace_null(df, value, columns="*"):
"""
Replace nulls with specified value.
Parameters
----------
columns : optional list of column names to consider. Columns specified in subset that do not have
matching data type are ignored. For example, if value is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
value : Value to replace null values with. If the value is a dict, then subset is ignored and value
must be a mapping from column name (string) to replacement value. The replacement
value must be an int, long, float, or string.
return df : dataframe with replaced null values.
"""
if columns == "*":
columns = None
if isinstance(columns, str):
columns = [columns]
if columns is not None:
assert isinstance(columns, list), "Error: columns argument must be a list"
assert isinstance(value, (int, float, str, dict)), "Error: value argument must be an int, long, float, string, or dict"
return df.fillna(value, subset=columns) | 679fb4f84a3ccec7ec738f8ab43d5439f6e38668 | 104,845 |
from typing import List
import pickle
def load_pickle(dataset_dir: str) -> List:
"""Loading (reading) a pickle file
Parameters
----------
dataset_dir: str
It must be string file that shows the directory of the dataset.
Returns
-------
List
"""
assert isinstance(dataset_dir, str), "The dataset_dir must be a string object"
fp = open(dataset_dir, 'rb')
return pickle.load(fp) | e026258f59432e6e9dbe47e29f6224c22e407a14 | 104,850 |
def use_aws_kms_store(session, region, kmskeyid, access_id, secret,
encryption_pwd=None, old_encryption_pwd=None,
return_type=None, **kwargs):
"""
Sets the encryption password globally on the VPSA. This password is used
when enabling the encryption option for a volume. CAUTION: THIS PASSWORD
IS NOT STORED ON THE VPSA - IT IS THE USER'S RESPONSIBILITY TO MAINTAIN
ACCESS TO THE PASSWORD. LOSS OF THE PASSWORD MAY RESULT IN UNRECOVERABLE
DATA.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type region: str
:param region: The AWS KMS region code to set. Required.
:type kmskeyid: str
:param kmskeyid: The AWS KMS key id to set. Required.
:type access_id: str
:param access_id: The AWS KMS access id to set. Required.
:type secret: str
:param secret: The AWS KMS secret password to set. Required.
:type encryption_pwd: str
:param encryption_pwd: The master encryption password to set. Required.
:type old_encryption_pwd: str
:param old_encryption_pwd: Old master encryption password. Required if
setting a new password and older password is already set. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
body_values = {'region': region, 'kmskeyid': kmskeyid,
'access_id': access_id, 'secret': secret}
if encryption_pwd:
body_values['encryption_pwd'] = encryption_pwd
if old_encryption_pwd:
body_values['old_encryption_pwd'] = old_encryption_pwd
path = '/api/settings/encryption.json'
return session.post_api(path=path, body=body_values, return_type=return_type, **kwargs) | de478ab6633f0ab179511d24d2bc1e4b35408d4e | 104,851 |
import re
def rfc3339_datetime_re(anchor=True):
"""
Returns a regular expression for syntactic validation of ISO date-times, RFC-3339 date-times
to be precise.
>>> bool( rfc3339_datetime_re().match('2013-11-06T15:56:39Z') )
True
>>> bool( rfc3339_datetime_re().match('2013-11-06T15:56:39.123Z') )
True
>>> bool( rfc3339_datetime_re().match('2013-11-06T15:56:39-08:00') )
True
>>> bool( rfc3339_datetime_re().match('2013-11-06T15:56:39.123+11:00') )
True
It anchors the matching to the beginning and end of a string by default ...
>>> bool( rfc3339_datetime_re().search('bla 2013-11-06T15:56:39Z bla') )
False
... but that can be changed:
>>> bool( rfc3339_datetime_re( anchor=False ).search('bla 2013-11-06T15:56:39Z bla') )
True
>>> bool( rfc3339_datetime_re( anchor=False ).match('2013-11-06T15:56:39Z bla') )
True
Keep in mind that re.match() always anchors at the beginning:
>>> bool( rfc3339_datetime_re( anchor=False ).match('bla 2013-11-06T15:56:39Z') )
False
It does not check whether the actual value is a semantically valid datetime:
>>> bool( rfc3339_datetime_re().match('9999-99-99T99:99:99.9-99:99') )
True
If the regular expression matches, each component of the matching value will be exposed as a
captured group in the match object.
>>> rfc3339_datetime_re().match('2013-11-06T15:56:39Z').groups()
('2013', '11', '06', '15', '56', '39', None, 'Z')
>>> rfc3339_datetime_re().match('2013-11-06T15:56:39.123Z').groups()
('2013', '11', '06', '15', '56', '39', '123', 'Z')
>>> rfc3339_datetime_re().match('2013-11-06T15:56:39.123-08:30').groups()
('2013', '11', '06', '15', '56', '39', '123', '-08:30')
"""
return re.compile(
('^' if anchor else '') +
'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(?:\.(\d+))?(Z|[+-]\d{2}:\d{2})' +
('$' if anchor else '')) | fabcd4259bc9d63b02fe1a322ed941d0559d9a75 | 104,853 |
def BVNeg(a):
""" Create a negation (two's complement) of a bit-vector
See also the __neg__ overload (unary - operator) for BitVecRef.
>>> x = BitVec('x', 32)
>>> BVNeg(x)
-x
"""
return -a | 6d888fadb1a93a06362cce11ba4b84f59492b0e6 | 104,857 |
import math
def VD_5111(jointType, boltHead, d, d3, lGew, Es, EBI):
"""
5.1.1.1 Axial Resilience
Joint type :
ESV - Tapped blind hole joint
DSV - Through bolted joint with nut
Bolt head :
socket
other
F : axial load
d : bolt diameter
d3 : bolt minor diameter of the thread
lGew : lenght of the free loaded thread
Es : Bolt Young's modulus
EBI : Young's modulus of the component with internal treatd
"""
# Elastic resilience of the engaged thread
lG = 0.50 * d # (5.1/6)
Ad3 = (math.pi / 4.0) * d3**2 # (5.1/7)
delta_G = lG / (Es * Ad3) # (5.1/5)
# The elastic resilience of the nut or tapped thread region
# ESV Tapped blind hole joint
if jointType == 'ESV':
EM = EBI
lM = 0.33 * d # (5.1/11)
# bolted and stud bolted joints
# DSV Through bolted joint with nut
else:
EM = Es
lM = 0.40 * d # (5.1/10)
AN = (math.pi / 4.0) * d**2 # (5.1/9)
delta_M = lM / (EM * AN) # (5.1/8)
# Elastic resilience of the engaged thread and of the nut
# or tapped thread region
delta_GM = delta_G + delta_M # (5.1/4)
# The resilience of the unengageg loaded part of the thread
delta_Gew = lGew / (Es * Ad3) # (5.1/12)
# The elastic resilience of the head of standardized
# hexagon head bolts and hexagon socket screws
if boltHead == 'socket': lSK = 0.40 * d # (5.1/15)
else : lSK = 0.50 * d # (5.1/14)
delta_SK = lSK / (Es * AN) # (5.1/13)
# Elastic resilience of the bolt
# delta_S = deltaSK + deltai + deltaGew + deltaGM # (5.1/3)
#delta_S = deltaSK + deltaGew + deltaGM
#
return delta_SK, delta_Gew, delta_GM
# | dcdcb1fc67bf0857ae8e9724f5376b19fb6649cf | 104,859 |
def get_site_url(site):
"""Get a ``site`` URL
:param str site: the site to get URL for
:return: a valid site URL
:raise ValueError: when site is empty, or isn't well formatted
The ``site`` argument is checked: its scheme must be ``http`` or ``https``,
or a :exc:`ValueError` is raised.
If the ``site`` does not have a scheme, ``http`` is used. If it doesn't
have a TLD, a :exc:`ValueError` is raised.
"""
site = site.strip() if site else ''
if not site:
raise ValueError('What site do you want to check?')
if not site.startswith(('http://', 'https://')):
if '://' in site:
protocol = site.split('://')[0] + '://'
raise ValueError('Try it again without the %s' % protocol)
site = 'http://' + site
domain = site.split('/')[2].split(':')[0]
if '.' not in domain:
raise ValueError('I need a fully qualified domain name (with a dot).')
if domain.endswith(('.local', '.example', '.test', '.invalid', '.localhost')):
raise ValueError("I can't check LAN-local or invalid domains.")
return site | c8a49fa57212e6b6819bb00e331f75b5ba0d0642 | 104,863 |
def pretty_print_seconds(seconds, n_labels=0, separator=" "):
"""
Converts a number of seconds to a readable time string.
Parameters
----------
seconds : float or int
number of seconds to represent, gets rounded with int()
n_labels : int
number of levels of label to show. For example, if n_labels=1,
result will show the first of days, hours, minutes, seconds with
greater than one. This allows you to round, e.g, 1 day 2 hours
3 minutes 4 seconds to 1 day 2 hours (with n_labels=2). Default
value of 0 gives all levels. If n_labels is negative, then the last
value is shown as a decimal, instead of an int, with 2 decimals of
precision.
separator : string
separator between levels of the time decomposition
"""
ordered_keys = ['day', 'hour', 'minute', 'second']
divisors = {
'day': 86400,
'hour': 3600,
'minute': 60,
'second': 1
}
s = int(seconds)
def decompose_seconds(s):
parts = {}
fractional_parts = {}
for k in ordered_keys:
fractional_parts[k] = float(s) / divisors[k]
parts[k], s = divmod(s, divisors[k])
return parts, fractional_parts
def make_seconds(parts):
return sum([parts[p] * divisors[p] for p in parts.keys()])
parts, fractional_parts = decompose_seconds(s)
decimalize_final = (n_labels < 0)
first_key = "second"
for key in ordered_keys:
if parts[key] > 0:
first_key = key
break
first_key_index = ordered_keys.index(first_key)
n_labels_real = len(ordered_keys) - first_key_index
if n_labels != 0 and abs(n_labels) < n_labels_real:
n_labels_real = abs(n_labels)
max_label_index = first_key_index + len(ordered_keys) - n_labels_real
if max_label_index >= len(ordered_keys):
max_label_index = len(ordered_keys) - 1
max_label = ordered_keys[max_label_index]
if first_key != "second" and n_labels > 0:
# round it!
if fractional_parts[max_label] - parts[max_label] >= 0.5:
parts[max_label] += 1
else:
pass
for key in ordered_keys[max_label_index + 1:]:
parts[key] = 0
new_s = make_seconds(parts)
parts, frac_parts = decompose_seconds(new_s)
part_labels = {k: k if parts[k] == 1 else k + "s"
for k in ordered_keys}
label_count = 0
output_str = ""
for key in ordered_keys[first_key_index:]:
part = parts[key]
label_str = part_labels[key]
frac = fractional_parts[key]
if part > 0 and label_count < n_labels_real - 1:
output_str += str(part) + " " + label_str + separator
label_count += 1
elif label_count == n_labels_real - 1:
if decimalize_final and key != 'second':
output_str += "%.2f %s" % (frac, key+'s')
else:
output_str += str(part) + " " + label_str
label_count += 1
return output_str | 19d6a38ad849ee8396093a987c1fb89f7165d987 | 104,866 |
def negate(func, name=None, doc=None):
"""Create a function which merely negates the result of 'func'.
N = func.negate(f)
N(x) # equivalent to... not f(x)
The name and docstring of the newly created function can be given through the 'name' and
'doc' arguments.
"""
def negate_func(*args, **kwargs):
return not func(*args, **kwargs)
negate_func.__name__ = name or "negate({})".format(func.__name__)
negate_func.__doc__ = doc
return negate_func | a603e11e2c42658b42f1d1270f85b0d1cac5c579 | 104,868 |
def extract_file_name(file_path=None, include_suffix=True):
"""
Extract the file name with the file path string.
file_path: str
The file path
include_suffix: boolean
If True, includes full file name with suffix. If False returns the
file name without the suffix (e.g. "myfile.zip" vs. "myfile").
"""
if not file_path:
return None
if include_suffix:
return file_path.split('/')[-1]
return file_path.split('/')[-1].split('.')[0] | 0b544413dc085f5b54fee7af1dc982a23412ff92 | 104,870 |
def if_affirmative(needle: str):
"""
Returns True if needle is an affirmative string, False otherwise.
"""
assert isinstance(needle, str)
needle = needle.strip().lower()
return needle in {"true", "yes", "1", "ok"} | f1719d91240c486f879b9d2b10083957d53134e9 | 104,871 |
def get_vi_dtype(vi):
"""
This function returns value_info's data type
:param vi: graph.value_info
:return: graph.value_info.type.tensor_type.elem_type
"""
return vi.type.tensor_type.elem_type | e5139fe18eb7b0f2c91fd732dd6c361c5acb6ca4 | 104,875 |
def parse_course_info(course):
"""
Parse information of a course that is retrieved from Moodle.
:param course: A json statement, received as response on a Moodle call.
:type course: dict(str, list(dict(str, int)))
:return: The name of a course.
:rtype: str
"""
course_name = course['courses'][0]['fullname']
return course_name | 67e27ffaf2d4a17556d9d2a6eb7185f7030f95c1 | 104,882 |
from typing import List
def score(
source_confidence: List[float], score: List[float], mentioned_feeds_count: int
) -> int:
"""
Function calculates final score for the IoC
Parameters:
source_confidence (List[float], 0..1) — a list `source_confindence` of all CTI feeds where the IoC has been mentioned.
score (List[float], 0..1) — a list `score` of all CTI feeds where the IoC has been mentioned.
mentioned_feeds_count (int) — count of feeds that mentioned the IoC.
Returns:
IoC final score (int, 0..100)
"""
x: float = 0
y: float = 0
for i in range(0, mentioned_feeds_count):
x += (source_confidence[i] ** 2) * score[i]
y += source_confidence[i]
return round(x / y * 100) | 0d268c5b23c8f82d1f507b2923acd41775d021aa | 104,884 |
import base64
import logging
def check_b64(b64):
""" Function checks if string is base64 and not empty
:param b64: base64 string
:raises TypeError: if input is not base64
:raises ValueError: if input is empty
"""
try:
if (base64.b64encode(base64.b64decode(b64)) == b64):
return True
else:
pass
except:
logging.info('not recieving base64 input')
raise TypeError("input is not base64") | f3b451d3eeafaaef3473e5c6aa52731913866758 | 104,885 |
def get_srid(df):
"""Return srid from `df.crs`."""
if df.crs is not None:
return df.crs.to_epsg() or 0
return 0 | 63fd02217a9aee9e567a8d0decfe53bf23aad199 | 104,886 |
def merge_sort(arr):
"""
Returns the list 'arr' sorted in nondecreasing order in O(nlogn) time.
"""
r = len(arr)
if 1 < len(arr):
q = int(r/2)
L = merge_sort(arr[0:q])
R = merge_sort(arr[q:])
print(L,R)
i,j,k = 0,0,0
while i < len(L) and j < len(R):
if L[i] < R[j]:
arr[k] = L[i]
i = i+1
k = k+1
elif R[j] < L[i]:
arr[k] = R[j]
j = j+1
k = k+1
if i >= len(L):
while j < len(R):
arr[k] = R[j]
j = j+1
k = k+1
elif j >= len(R):
while i < len(L):
arr[k] = L[i]
i = i+1
k = k+1
print(arr,'\n')
return arr | 41678767f6daec2cd441e5f0cfb67ce21714cedd | 104,888 |
def _getWordCount(start, length, bits):
"""
Get the number of words that the requested
bits would occupy. We have to take into account
how many bits are in a word and the fact that the
number of requested bits can span multipe words.
"""
newStart = start % bits
newEnd = newStart + length
totalWords = (newEnd-1) / bits
return totalWords + 1 | a182c1fa89b3b95f1a654aeac8570eae42b88fa1 | 104,891 |
def _between_symbols(string, c1, c2):
"""Grab characters between symbols in a string.
Will return empty string if nothing is between c1 and c2."""
for char in [c1, c2]:
if char not in string:
raise ValueError("Couldn't find character {} in string {}".format(
char, string))
return string[string.index(c1)+1:string.index(c2)] | 26e2d84eaaedcc4ca7b0b0a890368b6864994da0 | 104,902 |
from typing import Union
def _chunk_size_value(value: str) -> Union[int, str]:
"""Validate/cast chunk size."""
if value == "auto":
return value
else:
try:
size = int(value)
if size > 0 or size == -1:
return size
except ValueError:
pass
raise ValueError(f"Invalid chunk size: {value}") | 5cb64710c6bcdc1899ed8c6c3778533323860123 | 104,905 |
def tag_usage(err=''):
""" Prints the Usage() statement for the program """
m = '%s\n' %err
m += ' Default usage is to update 1 CR with a tag string:\n'
m += ' '
m += ' tagcr -t <CR> <tag value> \n'
return m | 89294ba569477dbf9f732bc25824018b07eee253 | 104,908 |
def __doc__(self):
"""Return the docstring for the class."""
cls_name = self.__class__.__name__
supercls_str, all_subclasses = {
"DictWithID": ["dict", ["MassSolution"]],
"OrderedDictWithID": ["OrderedDict", ["EnzymeDict"]],
}[cls_name]
return """A specialized {1} with an additional ID attribute.
The {0} class is essentially a subclass of an {1} with a string identifier.
The {2} objects are all subclasses of {0}.
Parameters
----------
id: str, None
The identifier to associate with the {0}.
data_dict: dict, optional
If provided, the new {0} will contain the key:value pairs from
"data_dict". Otherwise the new {0} is initialized as empty.
""".format(
cls_name, supercls_str, str(tuple(all_subclasses))
) | 8ef1c00bc8044d1a8cc421b9e1b7843a5ad60952 | 104,916 |
def expand(host):
""" expand the host (be it a single name or a dotted field).
It should be possible to run the unalias or expand in either order and get the
same result.
For example:
mongod --> mongod.0.public_ip
mongod.0 --> mongod.0.public_ip
mongod.0.public_ip --> mongod.0.public_ip
md --> md.0.public_ip
md.0 --> md.0.public_ip
md.0.public_ip --> md.0.public_ip
:param host str the (optionally) dotted host spec.
:returns str the expanded version of the dotted name
:raises ValueError if there are more than 2 dot's
"""
nesting = host.count(".") + 1
if nesting > 3:
raise ValueError("The max level of nesting is 3: '{}'".format(host))
if nesting == 1:
host += ".0.public_ip"
if nesting == 2:
host += ".public_ip"
return host | 1d877973a2d44369b159e3cc7924b34d57f49772 | 104,918 |
def build_target_list(targets, target_ids, target_file):
"""
Build a list of the targets to be processed.
Parameters
----------
targets: str[]
List of the component names of possible targets.
target_ids: int[]
List of the numerical ids of the subset of targets to be processed.
target_file: str
Name of the file listing the comonent names of the subset of targets to be processed.
Returns
-------
List of the numerical ids of the subset of targets to be processed.
"""
if target_ids:
return target_ids
if not target_file:
return None
target_list = []
with open(target_file, 'r') as tgt_file:
for line in tgt_file:
comp_name = line.strip()
if len(comp_name) == 0 or comp_name.startswith('#'):
continue
found = False
for idx, tgt in enumerate(targets):
if tgt == comp_name:
target_list.append(idx+1)
found = True
continue
if not found:
print ("Source {} is not known. Ignoring.".format(comp_name))
# Make sure we don't run everything if a list was supplied but nothing is found
if len(target_list) == 0:
msg = 'ERROR: None of the targets listed in {} were found.'.format(target_file)
print (msg)
raise Exception(msg)
return target_list | 0c585bc0ee16c0634cf85ed96a99c40a00604f63 | 104,919 |
def velocity_from_acceleration(ax, ay, az, t):
"""
Estimate velocity from accelerometer readings.
Parameters
----------
ax : list or numpy array of floats
accelerometer x-axis data
ay : list or numpy array of floats
accelerometer y-axis data
az : list or numpy array of floats
accelerometer z-axis data
t : list or numpy array of floats
accelerometer time points
Returns
-------
vx : list or numpy array of floats
estimated velocity along x-axis
vy : list or numpy array of floats
estimated velocity along y-axis
vz : list or numpy array of floats
estimated velocity along z-axis
Examples
--------
>>> from mhealthx.extractors.dead_reckon import velocity_from_acceleration
>>> from mhealthx.xio import read_accel_json
>>> #input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/accel_walking_outbound.json.items-6dc4a144-55c3-4e6d-982c-19c7a701ca243282023468470322798.tmp'
>>> input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/deviceMotion_walking_outbound.json.items-5981e0a8-6481-41c8-b589-fa207bfd2ab38771455825726024828.tmp'
>>> #input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/deviceMotion_walking_outbound.json.items-a2ab9333-6d63-4676-977a-08591a5d837f5221783798792869048.tmp'
>>> start = 150
>>> device_motion = True
>>> t, axyz, gxyz, uxyz, rxyz, sample_rate, duration = read_accel_json(input_file, start, device_motion)
>>> ax, ay, az = axyz
>>> vx, vy, vz = velocity_from_acceleration(ax, ay, az, t)
"""
vx = [0]
vy = [0]
vz = [0]
for i in range(1, len(ax)):
dt = t[i] - t[i-1]
vx.append(vx[i-1] + ax[i] * dt)
vy.append(vy[i-1] + ay[i] * dt)
vz.append(vz[i-1] + az[i] * dt)
return vx, vy, vz | 8433270bd6de0398e5bc92e98dfc6c7bb476fa1c | 104,921 |
def findStartPos(maze):
"""Returns the position of the start symbol in the grid."""
for row in range(maze.getHeight()):
for column in range(maze.getWidth()):
if maze[row][column] == 'S':
return(column, row)
return(-1, -1) | 620917486d2a3025c91e0ed3134c7e69cc1431a2 | 104,923 |
import six
def invert_dict(d, is_val_tuple=False, unique=True):
"""
Invert a dictionary by making its values keys and vice versa.
Parameters
----------
d : dict
The input dictionary.
is_val_tuple : bool
If True, the `d` values are tuples and new keys are the tuple items.
unique : bool
If True, the `d` values are unique and so the mapping is
one to one. If False, the `d` values (possibly) repeat, so the inverted
dictionary will have as items lists of corresponding keys.
Returns
-------
di : dict
The inverted dictionary.
"""
di = {}
for key, val in six.iteritems(d):
if unique:
if is_val_tuple:
for v in val:
di[v] = key
else:
di[val] = key
else:
if is_val_tuple:
for v in val:
item = di.setdefault(v, [])
item.append(key)
else:
item = di.setdefault(val, [])
item.append(key)
return di | 201c685dec5b177006b539330dbe2f9110197868 | 104,927 |
def any(iterable):
"""
any()
Purpose: Returns true if any element in the iterable evaluates to true otherwise, return false
Parameters: iterable [type=list,tuple]
Any item that can be iterated over
Returns: A boolean specifying whether any elements of the iterable evaluate to true
"""
for element in iterable:
if element: return True
return False | 5fbc793bd90055ea12156a54d551623dc20754d5 | 104,931 |
def _get_style_test_options(filename):
""" Returns (skip, ignores) for the specifies source file.
"""
skip = False
ignores = []
text = open(filename, 'rb').read().decode('utf-8')
# Iterate over lines
for i, line in enumerate(text.splitlines()):
if i > 20:
break
if line.startswith('# styletest:'):
if 'skip' in line:
skip = True
elif 'ignore' in line:
words = line.replace(',', ' ').split(' ')
words = [w.strip() for w in words if w.strip()]
words = [w for w in words if
(w[1:].isnumeric() and w[0] in 'EWFCN')]
ignores.extend(words)
return skip, ignores | 75ed451f8fd1ed1ab7c3d7f36cc2baace8bdb171 | 104,933 |
def cutoff_fn(good_mean: float, good_std: float, bad_mean: float, bad_std: float) -> float:
"""
Calculate the cutoff value
:param good_mean: Mean of the good samples
:param good_std: Standard deviation of the good samples
:param bad_mean: Mean of the bad samples
:param bad_std: Standard deviation of the bad samples
:return: The cutoff value
"""
return (abs(bad_mean - good_mean) / (good_std + bad_std)) * good_std + good_mean | 569fcda1e0deeccc52b06d81b30f9f04b98eec95 | 104,936 |
def padding_method_2(data: bytes, pad_to: int) -> bytes:
"""
Pads data to n blocks using ISO/IEC 9797-1 Padding method 2
"""
data = data + bytes([0x80])
if len(data) % pad_to != 0:
data = data + bytes([0] * (pad_to - (len(data)) % pad_to))
return data | db6895d3662d52feebd9942341f03e5e6b83338a | 104,942 |
def vector_is_zero(vector_in, tol=10e-8):
""" Checks if the input vector is a zero vector.
:param vector_in: input vector
:type vector_in: list, tuple
:param tol: tolerance value
:type tol: float
:return: True if the input vector is zero, False otherwise
:rtype: bool
"""
if not isinstance(vector_in, (list, tuple)):
raise TypeError("Input vector must be a list or a tuple")
res = [False for _ in range(len(vector_in))]
for idx in range(len(vector_in)):
if abs(vector_in[idx]) < tol:
res[idx] = True
return all(res) | dc541fa822363b68bda5518389301365dd71ff7a | 104,945 |
def get_mac_from_raw_query(request_raw_query: str):
"""
Get MAC address inside a matchbox "request raw query"
/path?<request_raw_query>
:param request_raw_query:
:return: mac address
"""
mac = ""
raw_query_list = request_raw_query.split("&")
for param in raw_query_list:
if "mac=" in param:
mac = param.replace("mac=", "")
if not mac:
raise AttributeError("%s is not parsable" % request_raw_query)
return mac.replace("-", ":") | 6e53d7bb80e46a730de39a1b5a74e0b89313713b | 104,946 |
def get_source_detail(sources):
"""
Iterate over source details from response and prepare RiskSense context.
:param sources: source details from response.
:return: List of source details which includes required fields from resp.
"""
return [{
'Name': source.get('name', ''),
'UuID': source.get('uuid', ''),
'ScannerType': source.get('scannerType', '')
} for source in sources] | 7d6bc9987a7dbd963e2ea29178d692afc6243ec4 | 104,947 |
def audio_fname(text):
"""
Function that converts a piece of text to the appropriate filename where
the audio mp3 will be cached.
"""
return f'audio_{hash(text)}.mp3' | e30e41f137cffc83aa4ee5723f13fb37b16a6984 | 104,950 |
def remove_headers(headers, name):
"""Remove all headers with name *name*.
The list is modified in-place and the updated list is returned.
"""
i = 0
name = name.lower()
for j in range(len(headers)):
if headers[j][0].lower() != name:
if i != j:
headers[i] = headers[j]
i += 1
del headers[i:]
return headers | 29302ea455f0f6c914523a5dd35be6b3d000ac29 | 104,951 |
import math
def computeProb(i, j, lf, le, lamb):
"""
Compute numerator
i,j : int
alignment of word i to j
lf, le: int
length of target sentence(lf) and source sentence(le)
lamb: float
Hyper parameters, currently set to be 4.0
"""
h = -1.0 * abs( float(j) / float(le) - float(i) / float(lf))
return math.exp(h * lamb) | cf430fa7dce5dd01ca7619cc578dc95f9a6c899b | 104,955 |
def find_first(data):
"""
>>> find_first([[0, 0], [0, 1]])
(1, 1)
>>> find_first([[0, 0], [0, 0]])
"""
for y_index, y_value in enumerate(data):
for x_index, x_value in enumerate(y_value):
if x_value == 1:
return (x_index, y_index)
return None | f5d2315f3249cb8a787e19009f481b55c56d65df | 104,960 |
import stat
def filetype(mode):
"""
Returns "dir" or "file" according to what type path is.
@param mode: file mode from "stat" command.
"""
if stat.S_ISLNK(mode):
return "link"
elif stat.S_ISDIR(mode):
return "dir"
elif stat.S_ISREG(mode):
return "file"
else:
return "unknown" | 95fb3d4d46a07ddf3fb5968f1892807603944635 | 104,962 |
def iterative_fibonacci(i):
""" Classic iterative implementation of the fibonacci function, where
recursive_fibonacci(i) returns the i-th element of the sequence.
time: O(n)
space: O(n)
"""
if i == 0:
return 0
last = 0
current = 1
for _ in range(1, i):
temp = current + last
last = current
current = temp
return current | de38deaeb991b68eb2eec68c7cd8472575e4178f | 104,968 |
import importlib
def getModelDesc(model=None):
"""Returns a brief description of the selected model.
Parameters
----------
model : str
Name of the model to get the description of
Returns
-------
A string with the description of the model
"""
if model is None:
raise ValueError('Unknown model. No model name is given.')
try:
mdl = importlib.import_module(model)
except ImportError:
try:
mdl = importlib.import_module('radmc3dPy.models.'+model)
except ImportError:
msg = model+'.py could not be imported. The model files should either be in the '\
+ ' current working directory or in the radmc3d python module directory'
raise ImportError(msg)
if callable(getattr(mdl, 'getModelDesc')):
return mdl.getModelDesc()
else:
raise RuntimeError(model+'.py does not contain a getModelDesc() function.') | c75946da8d0b10949d77b007cbc8a30d0491837a | 104,970 |
def getCorrespondingWindow(index, windows):
"""
Finds the corresponding window to the each predicted anomaly
:param index (type:int): index of predicted anomaly point
:param windows (type list of tuples (start, end)): list of true anomaly windows
:return (typ: tuple): anomaly window if point lies inside it else the preceding window
"""
for i, window in enumerate(windows):
if window[0] <= index and (i == len(windows)-1 or index < windows[i+1][0]):
return window | ab40559e6ed71e55a7ccd6a4cd82f66063dd383b | 104,972 |
def path(key, *path):
"""
Generate a path for redis.
"""
return ':'.join([key] + list(path)) | 632e6a046a48170ffeb770e68203b3f37fec86fe | 104,974 |
def make_coordinate(x_coord, y_coord):
""" Make a coordinate dictionary"""
return \
{
"x": x_coord,
"y": y_coord
} | d37ff106aadb6d4e4733437debf41f0a2b025890 | 104,978 |
import sqlite3
import collections
import itertools
def get_menu(campuses, dates):
"""
Retrieve the menu on the given dates for the given campuses from the database.
Args:
campuses: The campuses for which the menu is retrieved.
dates: The dates for which the menu is retrieved.
Returns:
A nested dictionary with as keys the requested dates and campuses, and for each of these possibilities a
dictionary with as key the type of menu item and as values the menu content and the prices for students and
staff.
"""
conn = sqlite3.connect('menu.db')
c = conn.cursor()
menu = collections.defaultdict(dict)
for date, campus in itertools.product(dates, campuses):
c.execute('SELECT type, item, price_student, price_staff FROM menu WHERE date = ? AND campus = ?', (date, campus))
for menu_type, menu_item, price_student, price_staff in c.fetchall():
menu[(date, campus)][menu_type] = (menu_item, price_student, price_staff)
return menu | 3ff78902218fc3816c0b97afbb6679c61e1af6e9 | 104,979 |
def create_activity(conn, activity):
"""
Create a new activity
:param conn:
:param activity:
:return: project id
"""
sql = ''' INSERT INTO Activities (activity, time_goal, time_done, user_id)
VALUES(?,?,?,?) '''
cur = conn.cursor()
cur.execute(sql, activity)
return cur.lastrowid | 74ee141cbeab8d60d5b8ddbc76a425f9adb3daa8 | 104,983 |
def _correct_searchqa_score(x, dataset):
"""Method to correct for deleted datapoints in the sets.
Args:
x: number to correct
dataset: string that identifies the correction to make
Returns:
The rescaled score x.
Raises:
ValueError: if dataset is none of train, dev, test.
"""
if dataset == 'train':
return x * 90843 / (90843 + 8977)
elif dataset == 'dev':
return x * 12635 / (12635 + 1258)
elif dataset == 'test':
return x * 24660 / (24660 + 2588)
else:
raise ValueError('Unexepected value for dataset: {}'.format(dataset)) | d92b992f98116d069456d7a9f34b2a95d93b5880 | 104,986 |
def istuple(x):
"""Is an object a python tuple?"""
return isinstance(x, tuple) | ffd87a4e01bd689358e444aca01be72cef4bb720 | 104,987 |
def tcpdump_capture(device,
interface,
port=None,
capture_file='pkt_capture.pcap',
filters=None):
"""Capture network traffic using tcpdump.
Note: This function will keep capturing until you Kill tcpdump.
The kill_process method can be used to kill the process.
:param device: lan or wan
:type device: Object
:param interface: interface on which the packets to be captured (eg: eth0)
:type interface: String
:param port: Port number to capture. Can be a single port or range of ports (for https: 443 or 443-433)
:type port: String
:param capture_file: Filename to create in which packets shall be stored. Defaults to 'pkt_capture.pcap'
:type capture_file: String, Optional
:param filters: dictionary of additional filters and filter_values as key value pair (eg: {"-v":"","-c": "4"})
:type filters: dict
:return: Console ouput of tcpdump sendline command.
:rtype: string
"""
base = "tcpdump -i %s -n -w %s " % (interface, capture_file)
run_background = " &"
filter_str = ' '.join([' '.join(i)
for i in filters.items()]) if filters else ''
if port:
device.sudo_sendline(base + "\'portrange %s\' " % (port) + filter_str +
run_background)
else:
device.sudo_sendline(base + filter_str + run_background)
device.expect_exact('tcpdump: listening on %s' % interface)
return device.before | 0badc3c88ac9d6e7b2215b62e071ea34b1bd61e6 | 104,989 |
def get_standard_write(filename: str) -> str:
"""Return standard code for exec().writing."""
return f"""\n\nwith open("{filename}", "w") as dbfile:\n json.dump(data, dbfile)\n\n""" | 669c2263d84975b57dbb481338dc1179961a743c | 104,990 |
def get_page_list(paginator, page=1):
"""
Generate a list of pages used choose from to jump quickly to a page
This generates a list that shows:
* if near the start/end, up to 10 pages from the start/end
* if in the middle first two and last two pages in addition to the
+/- 5 from the current page
* if num_pages<10 - only one list of pages is shown
* if num_pages==11 then the list is statically generated because this
list size results in wierd results for the standard logic that generates
the lists.
"""
if paginator.num_pages < 11:
# special case: only one list of values
pages = (range(1,paginator.num_pages+1),)
elif paginator.num_pages == 11:
# special case: lists are static
pages = ([1,2,3,4,5,6,7,8,9,10], None, [11])
else:
# normal case
start = [i for i in range(1, 11 if page < 8 and page < paginator.num_pages-6 else 3)]
middle = [i for i in range(page-5,page+5)] if page > 7 and page < paginator.num_pages-6 else None
end = [i for i in range(paginator.num_pages-(1 if page < paginator.num_pages-6 else 9), paginator.num_pages+1)]
pages = (start, middle, end)
return pages | 1637d9f7acd2140b45ad0633c306d883197e2aa2 | 104,991 |
def get_sum_rois_volume(list_results_row:list) -> float:
"""sum all ROIs volume
Args:
liste ([list]): [List of every ROI results row from CSV file]
Returns:
[float]: [Return the sum of ROIs volume]
"""
volume = float(0)
for i in range(len(list_results_row)):
volume += float(list_results_row[i][3])
return volume | 6783724f694182bc23dfc59cd201941318695ac6 | 104,999 |
def _resolveDotSegments(path):
"""
Normalise the URL path by resolving segments of '.' and '..'.
@param path: list of path segments
@see: RFC 3986 section 5.2.4, Remove Dot Segments
@return: a new L{list} of path segments with the '.' and '..' elements
removed and resolved.
"""
segs = []
for seg in path:
if seg == u'.':
pass
elif seg == u'..':
if segs:
segs.pop()
else:
segs.append(seg)
if list(path[-1:]) in ([u'.'], [u'..']):
segs.append(u'')
return segs | 7aea457fbbc1485a3c0ed81b13a89e6c3d9fff8c | 105,000 |
def divup(a, b):
"""Divides a by b and rounds up to the nearest integer."""
return (a + b - 1) // b | 7ffd592f4caf53b806c1befaae792bad484fcea6 | 105,001 |
def mjd(year, month, day, hour=0, minute=0, second=0):
"""
Calculate the modified Julian date from an ordinary date and time.
Notes
-----
The formulas are taken from Wikipedia.
Example
-------
>>> mjd(2000, 1, 1)
51544.0
"""
a = (14 - month) // 12
y = year + 4800 - a
m = month + 12 * a - 3
jdn = day + (153 * m + 2) // 5 + 365 * y + \
y // 4 - y // 100 + y // 400 - 32045
jd = jdn + (hour - 12) / 24. + minute / 1400. + second / 86400.
mjd = jd - 2400000.5
return mjd | 10a255f96fdedd5ad2f659211348a03fc88bd7bf | 105,003 |
def get_text_from_response(response):
"""requests Response's text property automatically uses the default encoding to convert it to unicode
However, sometimes it falls back to ISO-8859-1, which is not appropriate. This method checks whether it
could be interpreted as UTF-8. If it is, it uses it. Otherwise, it uses whatever was defined.
"""
if response.encoding is None:
response.encoding = 'utf8'
elif response.encoding == 'ISO-8859-1':
try:
response.content.decode('utf8')
except UnicodeDecodeError:
pass
else:
response.encoding = 'utf8'
return response.text | ee8cd4f9a2dd479020db8b830bc9ac80be79fbbd | 105,004 |
def add_single(arr, val):
""" Return sum of array and scalar. """
return [i + val for i in arr] | f089514decec318d8df8954d423d90c2d5b63e0b | 105,005 |
import re
def load_eval_list_lines(path):
"""
Load and eval lines from given path, each line is a list that will be convert into a string.
Args:
path (str): Dataset file path
Returns:
list: List of lines
"""
lines = []
with open(path, encoding='utf-8') as f:
for line in f.readlines():
tokens = eval(line.strip())
string = ' '.join(tokens)
string = re.sub(r'\s+', ' ', string)
lines.append(string)
return lines | 29e72d2b39c7bfa8c6330038d49d659da3170cfd | 105,009 |
def conv(ip):
"""
Converts IP integer to human readable IP address
:param ip: integer IP Address in int form
:return: string IP Address
"""
return "%d.%d.%d.%d" % (
(ip >> 24) & 255,
(ip >> 16) & 255,
(ip >> 8) & 255,
ip & 255,
) | 6033378c1ab7175343ae3703eafda0d08f1af1b5 | 105,010 |
def get_env_name(model_dir):
"""
Args:
model_dir: str. Will be in the format model-name/env-name_run-date and might end in "/"
Return:
str with the env name as it appears in gym
"""
len_date = 20
if model_dir[-1] == "/":
len_date += 1
env = model_dir[:-20]
s = env.find("/")
env = env[s+1:]
s = env.find("NoFrameskip")
if s > 0:
env = env[:s]
else:
s = env.find("-v")
env = env[:s]
return env | c6ec5ab541d4cdb1ce977e7db35e5bf1fb0f4940 | 105,014 |
def create_demag_params(atol, rtol, maxiter):
"""
Helper function to create a dictionary with the given
demag tolerances and maximum iterations. This can be
directly passed to the Demag class in order to set
these parameters.
"""
demag_params = {
'absolute_tolerance': atol,
'relative_tolerance': rtol,
'maximum_iterations': int(maxiter),
}
return {'phi_1': demag_params, 'phi_2': demag_params} | 9de9db696f5f022569ab31d8fe069e34b2d7dc08 | 105,016 |
import re
def strip_stars(doc_comment):
"""
Version of jsdoc.strip_stars which always removes 1 space after * if
one is available.
"""
return re.sub('\n\s*?\*[\t ]?', '\n', doc_comment[3:-2]).strip() | 29c490a972760f92465845c3f59a5e35a6a39fe0 | 105,022 |
def create_shot_coordinates(df_events):
"""
This function creates shot coordinates (estimates) from the Wyscout tags
Args:
df_events (pd.DataFrame): Wyscout event dataframe
Returns:
pd.DataFrame: Wyscout event dataframe with end coordinates for shots
"""
goal_center_idx = (
df_events["position_goal_low_center"]
| df_events["position_goal_mid_center"]
| df_events["position_goal_high_center"]
)
df_events.loc[goal_center_idx, "end_x"] = 100.0
df_events.loc[goal_center_idx, "end_y"] = 50.0
goal_right_idx = (
df_events["position_goal_low_right"]
| df_events["position_goal_mid_right"]
| df_events["position_goal_high_right"]
)
df_events.loc[goal_right_idx, "end_x"] = 100.0
df_events.loc[goal_right_idx, "end_y"] = 55.0
goal_left_idx = (
df_events["position_goal_mid_left"]
| df_events["position_goal_low_left"]
| df_events["position_goal_high_left"]
)
df_events.loc[goal_left_idx, "end_x"] = 100.0
df_events.loc[goal_left_idx, "end_y"] = 45.0
out_center_idx = (
df_events["position_out_high_center"] | df_events["position_post_high_center"]
)
df_events.loc[out_center_idx, "end_x"] = 100.0
df_events.loc[out_center_idx, "end_y"] = 50.0
out_right_idx = (
df_events["position_out_low_right"]
| df_events["position_out_mid_right"]
| df_events["position_out_high_right"]
)
df_events.loc[out_right_idx, "end_x"] = 100.0
df_events.loc[out_right_idx, "end_y"] = 60.0
out_left_idx = (
df_events["position_out_mid_left"]
| df_events["position_out_low_left"]
| df_events["position_out_high_left"]
)
df_events.loc[out_left_idx, "end_x"] = 100.0
df_events.loc[out_left_idx, "end_y"] = 40.0
post_left_idx = (
df_events["position_post_mid_left"]
| df_events["position_post_low_left"]
| df_events["position_post_high_left"]
)
df_events.loc[post_left_idx, "end_x"] = 100.0
df_events.loc[post_left_idx, "end_y"] = 55.38
post_right_idx = (
df_events["position_post_low_right"]
| df_events["position_post_mid_right"]
| df_events["position_post_high_right"]
)
df_events.loc[post_right_idx, "end_x"] = 100.0
df_events.loc[post_right_idx, "end_y"] = 44.62
blocked_idx = df_events["blocked"]
df_events.loc[blocked_idx, "end_x"] = df_events.loc[blocked_idx, "start_x"]
df_events.loc[blocked_idx, "end_y"] = df_events.loc[blocked_idx, "start_y"]
return df_events | 7f1f9368c59e32f2d83696113318f4f6ef2fa146 | 105,025 |
def decimal_to_any(num: int, base: int) -> str:
"""
Convert a positive integer to another base as str.
>>> decimal_to_any(0, 2)
'0'
>>> decimal_to_any(5, 4)
'11'
>>> decimal_to_any(20, 3)
'202'
>>> decimal_to_any(58, 16)
'3A'
>>> decimal_to_any(243, 17)
'E5'
>>> decimal_to_any(34923, 36)
'QY3'
>>> decimal_to_any(10, 11)
'A'
>>> decimal_to_any(16, 16)
'10'
>>> decimal_to_any(36, 36)
'10'
>>> # negatives will error
>>> decimal_to_any(-45, 8) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: parameter must be positive int
>>> # floats will error
>>> decimal_to_any(34.4, 6) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: int() can't convert non-string with explicit base
>>> # a float base will error
>>> decimal_to_any(5, 2.5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: 'float' object cannot be interpreted as an integer
>>> # a str base will error
>>> decimal_to_any(10, '16') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: 'str' object cannot be interpreted as an integer
>>> # a base less than 2 will error
>>> decimal_to_any(7, 0) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: base must be >= 2
>>> # a base greater than 36 will error
>>> decimal_to_any(34, 37) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: base must be <= 36
"""
if isinstance(num, float):
raise TypeError("int() can't convert non-string with explicit base")
if num < 0:
raise ValueError("parameter must be positive int")
if isinstance(base, str):
raise TypeError("'str' object cannot be interpreted as an integer")
if isinstance(base, float):
raise TypeError("'float' object cannot be interpreted as an integer")
if base in (0, 1):
raise ValueError("base must be >= 2")
if base > 36:
raise ValueError("base must be <= 36")
ALPHABET_VALUES = {'10': 'A', '11': 'B', '12': 'C', '13': 'D', '14': 'E', '15': 'F', '16': 'G', '17': 'H',
'18': 'I', '19': 'J', '20': 'K', '21': 'L', '22': 'M', '23': 'N', '24': 'O', '25': 'P',
'26': 'Q', '27': 'R', '28': 'S', '29': 'T', '30': 'U', '31': 'V', '32': 'W', '33': 'X',
'34': 'Y', '35': 'Z'}
new_value = ""
mod = 0
div = 0
while div != 1:
div, mod = divmod(num, base)
if base >= 11 and 9 < mod < 36:
actual_value = ALPHABET_VALUES[str(mod)]
mod = actual_value
new_value += str(mod)
div = num // base
num = div
if div == 0:
return str(new_value[::-1])
elif div == 1:
new_value += str(div)
return str(new_value[::-1])
return new_value[::-1] | 47aace04b2f0b8e6d3a511e4063550ac55533074 | 105,028 |
def get_id(document):
"""
Parameters
----------
document: dict
A covid article
Returns
-------
str
Unique Id of the article
Examples
--------
>>> get_id(toy_covid_article)
'thisismyid'
"""
return document.get('paper_id') | 07f8eb3113f93d98ffdca6230e8eb37eb7b4aab1 | 105,029 |
import hashlib
import six
def create_hash(inp, l=10, algo="sha256", to_int=False):
"""
Takes an arbitrary input *inp* and creates a hexadecimal string hash based on an algorithm
*algo*. For valid algorithms, see python's hashlib. *l* corresponds to the maximum length of the
returned hash and is limited by the length of the hexadecimal representation produced by the
hashing algorithm. When *to_int* is *True*, the decimal integer representation is returned.
"""
h = getattr(hashlib, algo)(six.b(str(inp))).hexdigest()[:l]
return int(h, 16) if to_int else h | 6fe468e8cb8dee587d804f207bd76080731f37df | 105,030 |
def check_obj(obj_out):
"""
Check that there are a minimum amount of stuff written into the .obj. If not, then the ISO level is wrong.
(obj_out is a string containing the output of emmaptosurf)
"""
for line in obj_out.split("\n"):
linesplit = line.split(" ")
if linesplit[0] == "Wrote":
if int(linesplit[1]) < 1000 or int(linesplit[4]) < 1000:
return False
else:
return True | 82da202b4f7ce6a684bf9a9daafba95c561eb865 | 105,031 |
def add(data,r=0.0,i=0.0,c=0.0):
"""
Add Constant
Parameters:
* data Array of spectral data.
* r Constant to add to read channel.
* i Constant to add to imaginary channel.
* c Constant to add to both channels.
"""
data.real = data.real + r + c
data.imag = data.imag + i + c
return data | af4934c72aec0dfe25742494fcec0a7543ec785c | 105,034 |
def other_types_on_host(host_state, instance_type_id):
"""Tests for overlap between a host_state's instances and an
instance_type_id.
Returns True if there are any instances in the host_state whose
instance_type_id is different than the supplied instance_type_id value.
"""
host_instances = host_state.instances.values()
host_types = set([inst.instance_type_id for inst in host_instances])
inst_set = set([instance_type_id])
return bool(host_types - inst_set) | 776cc2eabd35fc4eabc4d2a0842f90bd767b5de6 | 105,035 |
def cost(dic, PhoneCost, EmailCost, OverbondCost):
"""Calculates the cost by summing over all connections in the dictionary dic.
Note that this time only one of the vertices knows of the connection to
avoid redundancy.
"""
price = [OverbondCost, PhoneCost, EmailCost]
result = 0.0
for a in dic: # iterating over all vertices in the clique
for b in dic[a]: # iterating over all neighbours of the vertex a
result += sum([x*y for x,y in zip(dic[a][b],price)])
return result | ed70b1a30a4d84701a0c5e3fbf6e0fb927f43321 | 105,039 |
def traverse(dictionary, keys, default=None):
"""Find a deep value in a dictionary
Args:
dictionary (dict): The event port.
keys (tuple|str): The hiearchical list of keys.
default: The default value if not found.
Returns:
The value, or `default` if not found.
"""
if keys is None:
return default
if type(keys) == str:
keys = (keys,)
for key in keys:
try:
dictionary = dictionary[key]
except KeyError:
return default
return dictionary | a9caa26eee447a0e410595b885828d49c355f3f5 | 105,044 |
def common_event_topics(num_topics=10, num_contracts=10, only_sigs=False):
"""This query aggregates the most common event signatures and their most common contract
addresses.
Usage:
es.search('ethereum', 'log', body=common_event_topics())
Params:
num_topics (int): maximum number of topics to collect
num_contracts (int): maximum number of contract addresses per topic
only_sigs (bool): if true, only collect `topic0`
Returns:
query (dict): an elasticsearch query body dictionary.
"""
return {
"query": {
"match_all": {}
},
"aggs": {
"topic": {
"terms": {
"field": "signature" if only_sigs else "topics",
"size": num_topics
},
"aggs": {
"contract": {
"terms": {
"field": "address",
"size": num_contracts
}
}
}
}
},
"size": 0
} | f55d15847252512f1568340410dccb1f73425950 | 105,045 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.