content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import ast
def _check_vals(item):
"""
Check if item is evaluatable and returns the value of it.
Example 'False' should return False
'ABC' should return 'ABC'
PARAMETERS: String (the value from a key/value pair)
RETURNS : type of evaluated string
"""
try:
val = ast.literal_eval(item)
except:
val = item
return val | 2310fec4c6ba07112e9dc759f492caf66d5dca53 | 37,848 |
import hashlib
import six
import base64
def CalculateMd5Hash(file_path):
"""Calculate base64 encoded md5hash for a local file.
Args:
file_path: the local file path
Returns:
md5hash of the file.
"""
m = hashlib.md5()
with open(file_path, 'rb') as f:
m.update(f.read())
return six.ensure_text(base64.b64encode(m.digest())) | 2a24ce3dd4f6e5f9d9a5d9be5632e6d481fd690b | 37,851 |
def read_connection_data_from_external_file(filepath, separator="="):
"""Reads SQL server connection information from an external file.
Keeping this information external is potentially important for security reasons.
The format of this file should be:
server = [server_name]
database = [database_name]
Arguments:
filepath -- the location of the connection file (e.g. "C:/connection_data/server_connection_data.txt")
separator -- the delimiter (default "=")
Returns:
The server, database as strings
"""
with open(filepath, 'r') as f:
connection_data = f.readlines()
connection_data_dict = dict()
# clean strings and split on delimiter
for entry in connection_data:
entry_cleaned = entry.replace(" ", "").strip("\n") # strip whitespace and trailing new lines
split_string = entry_cleaned.split(separator)
connection_data_dict[ split_string[0] ] = split_string[1]
if "server" not in connection_data_dict or "database" not in connection_data_dict:
raise ValueError(
"""Connection data file must contain server and database_name, formated like:
server = server_name
database = database_name\n""")
exit(0)
server = connection_data_dict["server"]
database = connection_data_dict["database"]
print("Server={}\nDatabase={}".format(server, database))
return server, database | f12a659f302f7a8252f29673fa916b6ac997663f | 37,852 |
import json
def load_filename(loc: str) -> dict:
"""Open a filename, parse it as JSON."""
with open(loc) as fh:
return json.load(fh) | 9b8984926573dbcbff16a7fcee4b20e5e85fe6e0 | 37,853 |
def lin_poly_solve(L, M):
"""
Return the point defined as the intersection of two lines
(given as degree-one polynomials in some field).
"""
x0,x1,x2 = M.parent().gens()
a0 = L.coefficient(x0)
a1 = L.coefficient(x1)
a2 = L.coefficient(x2)
b0 = M.coefficient(x0)
b1 = M.coefficient(x1)
b2 = M.coefficient(x2)
# a0 a1 a2
# b0 b1 b2
return [a1*b2-a2*b1, -a0*b2+a2*b0, a0*b1-a1*b0] | 5ebacff94b80dd4b1b54ac7742d0c9a37db596c8 | 37,854 |
def toRGB(hex_color_str):
"""
transform hex color string to integer tuple.
e.g. r,g,b = toRGB('0xFFFFFF')
"""
return int(hex_color_str[2:4],16)/255., int(hex_color_str[4:6],16)/255., int(hex_color_str[6:8],16)/255. | 5821d5f0d42d1a53982eb81739fe81e47d75fa23 | 37,855 |
def lookup_object(spec):
"""
Looks up a module or object from a some.module:func_name specification.
To just look up a module, omit the colon and everything after it.
"""
parts, target = spec.split(':') if ':' in spec else (spec, None)
module = __import__(parts)
for part in parts.split('.')[1:] + ([target] if target else []):
module = getattr(module, part)
return module | e09c30d1cf3f523b790208dec60ec9fc1a309b1d | 37,856 |
def test():
"""interesting!"""
return 6 | ae932353dcdf1fe54fd326bb35fc5c38be9a8001 | 37,857 |
def default(d, k, i, default=None):
"""Returns d[k][i], defaults to default if KeyError or IndexError is raised."""
try:
return d[k][i]
except (KeyError, IndexError):
return default | c48c57b7698f23e075d45043917e2427e1b39c2d | 37,858 |
import struct
def exifdata_tostring(stored_bytes, datatype, decoder):
"""Convert Exif value to displayable string, based on datatype.
1st parameter = Exif value as a byte string (as stored in Jpeg file)
2nd parameter = Exif datatype, as an integer
3rd parameter = reference to an ExifDecoder object, for decoding
byte strings to integer values based on the byte
alignment setting stored in the Jpeg file
Returns a human-readable string version of the value.
"""
displaystring = ''
if datatype == 1:
# Exif datatype 1 = BYTE (8-bit unsigned integer)
displaystring = str(struct.unpack('B', stored_bytes)[0])
elif datatype == 2:
# Exif datatype 2 = ASCII (7-bit values, null-terminated)
displaystring = ''
mask = 0b01111111
for byte in stored_bytes:
displaystring += chr(byte & mask) # clear 8th bit
elif datatype == 3:
# Exif datatype 3 = SHORT (16-bit unsigned integer)
displaystring = str(decoder.decode_bytes(stored_bytes[0:2]))
elif datatype == 4:
# Exif datatype 4 = LONG (32-bit unsigned integer)
displaystring = str(decoder.decode_bytes(stored_bytes[0:4]))
elif datatype == 5:
# Exif datatype 5 = RATIONAL (fraction expressed as two LONGs,
# first is numerator and second is denominator)
numerator = decoder.decode_bytes(stored_bytes[0:4])
denominator = decoder.decode_bytes(stored_bytes[4:8])
displaystring = str(numerator) + '/' + str(denominator)
elif datatype == 6:
# Exif datatype 6 = SBYTE (8-bit signed integer, 2s complement)
displaystring = struct.unpack_from('b', stored_bytes[0])
elif datatype == 7:
# Exif datatype 7 = UNDEFINED (one 8-bit byte of any value)
displaystring = stored_bytes[0]
elif datatype == 8:
# Exif datatype 8 = SSHORT (signed SHORT, 16-bit signed
# integer, 2s complement notation)
displaystring = str(decoder.decode_bytes(stored_bytes[0:2]))
elif datatype == 9:
# Exif datatype 9 = SLONG (signed LONG, 32-bit signed
# integer, 2s complement notation)
displaystring = str(decoder.decode_bytes(stored_bytes[0:4]))
elif datatype == 10:
# Exif datatype 10 = SRATIONAL (signed rational, a fraction
# expressed as two SLONGs; first is numerator and second is
# denominator)
numerator = decoder.decode_bytes(stored_bytes[0:4])
denominator = decoder.decode_bytes(stored_bytes[4:8])
displaystring = str(numerator) + '/' + str(denominator)
else:
# unknown data type, so just return the raw data
displaystring = str(stored_bytes)
return displaystring | ea25aa6f4b467bbddf3cb8157cdec7fa8dec8ba0 | 37,859 |
def line_1d(x, slope, offset):
"""Return the value of a line with given slope and offset.
Parameters
----------
x : float or iterable of floats
The x-value to calculate the value of the line at.
slope : float
The slope of the line. Must be finite.
offset : float
The y-offset of the line.
Returns
-------
float or iterable of floats
The value at the given `x`, or values if len(x) > 1, in which case it
will be an array with length = len(x).
"""
return slope * x + offset | 9befd9fdd13db12e27be4e000501c09a51c231e8 | 37,860 |
def _rainbow(x):
""" Eq. 3 of sron_colourschemes.pdf """
r = ((0.472 - 0.567 * x + 4.05 * x**2)
/ (1.0 + 8.72 * x - 19.17 * x**2 + 14.1 * x**3))
g = (0.108932 - 1.22635 * x + 27.284 * x**2 - 98.577 * x**3
+ 163.3 * x**4 - 131.395 * x**5 + 40.634 * x**6)
b = 1.0 / (1.97 + 3.54 * x - 68.5 * x**2 + 243. * x**3
- 297. * x**4 + 125. * x**5)
return r, g, b | 8c055c3ad3fe02bb2d874e34d7b00cf0f467e0e4 | 37,862 |
import numpy
def simpson_2d(f):
"""
Approximate the integral of f from a to b in two dimensions,
using Composite Simpson's rule.
@param f: 2D numpy array of function values
@return: approximation of the definit integral
"""
n = int((f.shape[0] - 1) / 2)
i = 2 * numpy.arange(1, n + 1) - 1
j = 2 * numpy.arange(1, n)
I = f[0, 0] + f[-1, -1] + f[0, -1] + f[-1, 0]
I += 4 * (f[0, i].sum() + f[-1, i].sum() + f[0, j].sum() + f[-1, j].sum())
I += 4 * (f[i, 0].sum() + f[i, -1].sum() + f[j, 0].sum() + f[j, -1].sum())
I += 16 * f[i][:, i].sum() + 8 * (f[i][:, j].sum() + f[j][:, i].sum())
I += 4 * f[j][:, j].sum()
return I / 9. | dfc4c2d9c604024e6338f7b677c3951a209f42ca | 37,865 |
from typing import Tuple
def dict_to_full_tree(tree: dict) -> Tuple[dict, str]:
"""Converts a dict of form
{
'id' : str,
'title': str,
'selftext' : str,
'author' : str,
'comments' : {'id' : {'id': str,
'parent_id': str,
'replies': List[str],
'author': str,
'body': str,},
'id1' : ...
...}
}
to the form of:
{
'id': {'id' : str,
'parent_id': str,
'replies': List[str],
'author': str,
'body': str,},
'id1': ...
...
} by adding an entry for the OP's post in tree['comments'].
Returns:
A dict of the specified and the key of the root element of the tree,
i.e. the OP's post.
"""
tree["comments"][tree["id"]] = {}
tree["comments"][tree["id"]]["id"] = tree["id"]
tree["comments"][tree["id"]]["parent_id"] = ""
tree["comments"][tree["id"]]["author"] = tree["author"]
tree["comments"][tree["id"]]["body"] = ("<title> " + tree["title"] +
" </title> " + tree["selftext"])
tree["comments"][tree["id"]]["replies"] = [
id for id, comment in tree["comments"].items()
if comment["parent_id"] == tree["id"]
]
return tree["comments"], tree["id"] | f30e9d677b91554e8a864bbe2cb83e5a0295e534 | 37,866 |
def public(endpoint_func):
"""A simple decorator for skipping auth steps on certain endpoints,
if it's a public page, for instance.
"""
endpoint_func.is_public = True
return endpoint_func | 0781028165a04070d18962bb24f7e98b33672549 | 37,868 |
from pathlib import Path
import os
def get_query_for(resource_name: str) -> str:
"""Get graphQL query for a resource."""
basepath = Path("app/queries/")
files_in_basepath = basepath.iterdir()
for file in files_in_basepath:
if file.name.lower() == f"{resource_name}.graphql".lower():
with open(os.path.join(basepath, file.name)) as query:
return query.read()
else:
raise AttributeError(
f'Query for resource "{resource_name}" cannot be found'
) | 626717647f8ebea61536a568c4d450398302067f | 37,869 |
def pathToLoc(p):
"""Associate path with location.
v1.0 should be specifying location but older YAML uses path
-- this provides back compatibility.
"""
if "path" in p:
p["location"] = p["path"]
return p | da52538477d68e421409ccfadf8220b14174a439 | 37,870 |
def read_annotations(filename, tagset, labeled):
""" Read tsv data and return sentences and [word, tag, sentenceID, filename] list """
with open(filename, encoding="utf-8") as f:
sentence = []
sentence.append(["[CLS]", -100, -100, -100, -100, -100, -100 -1, -1, None])
sentences = []
sentenceID=0
for line in f:
if len(line) > 0:
if line == '\n':
sentenceID+=1
sentence.append(["[SEP]", -100, -100, -100, -100, -100, -100 -1, -1, None])
if len(sentence) > 2:
sentences.append(sentence)
sentence = []
sentence.append(["[CLS]", -100, -100, -100, -100, -100, -100 -1, -1, None])
else:
data=[]
split_line = line.rstrip().split('\t')
data.append(split_line[0])
data.append(tagset[split_line[1]] if labeled else 0)
data.append(tagset[split_line[2]] if labeled else 0)
data.append(tagset[split_line[3]] if labeled else 0)
data.append(tagset[split_line[4]] if labeled else 0)
data.append(tagset[split_line[5]] if labeled else 0)
data.append(sentenceID)
data.append(filename)
sentence.append(data)
sentence.append(["[SEP]", -100, -100, -100, -100, -100, -100 -1, -1, None])
if len(sentence) > 2:
sentences.append(sentence)
return sentences | 4ec6312c875dcca988433d2741d52a28edc67769 | 37,872 |
import logging
def setup_logger(logger_name="capsule", extra_kwargs={}):
"""Simple wrapper to setup a logger and return it.
Used by the LOG constant which is used through the project
Args:
logger_name (str, optional): The name for the logger. Defaults to "capsule".
extra_kwargs (dict, optional): Any extra options to use with the logger. Defaults to {}.
"""
logger = logging.getLogger(logger_name, **extra_kwargs)
logger.setLevel(logging.INFO)
# Setup a StreamHandler to give output to the logs
handler = logging.StreamHandler()
# Establish a log format for messages
handler.setFormatter(logging.Formatter('[capsule:%(module)s] %(message)s'))
# Add handler to logger
logger.addHandler(handler)
return logger | 3ca67726ef0884901762141758a80a4f0086ad75 | 37,873 |
def get_similar_artists(h5,songidx=0):
"""
Get similar artists array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
a=[]
n=0
while (n != h5.root.metadata.similar_artists.nrows):
a.append(h5.root.metadata.similar_artists[songidx+n])
n=n+1
return a | 05650e399be0fa7c7ca0713416a9155ad09cdb77 | 37,874 |
import pickle
def restore_model(model, train_set, model_name, directLong, log_dir_pass):
"""
Function to restore model for training continuation
Args:
model (RGAN|RCGAN): uninstantiated model to restore
train_set (numpy.ndarray|(numpy.ndarry,numpy.ndarray)):
output of "loadData" in "/src/train.py"
model_name (str): type of model to use, either "RGAN" or "RCGAN"
directLong (str): log directory from where to start training
log_dir_pass (str): new log directory to save restored model
Returns:
model (RGAN|RCGAN): model restored with last saved weights
"""
model.generator.load_weights(directLong + "/gen_weights.h5")
model.discriminator.load_weights(directLong + "/dis_weights.h5")
model.combined.layers[-2].set_weights(model.generator.get_weights())
model.combined.layers[-1].set_weights(model.discriminator.get_weights())
# hold back model information
hold_epochs = model.epochs
hold_batch_size = model.batch_size
model.epochs = 1
model.batch_size = 1
# initialize dummy optimizer weights
if model_name == "RGAN":
model.train(train_set[:1], log_dir_pass)
elif model_name == "RCGAN":
model.train((train_set[0][:1], train_set[1][:1]), log_dir_pass)
# return model information
model.epochs = hold_epochs
model.batch_size = hold_batch_size
with open(directLong + "/dis_opt_weights.pickle", "rb") as f:
dis_opt_weights = pickle.load(f)
with open(directLong + "/comb_opt_weights.pickle", "rb") as f:
comb_opt_weights = pickle.load(f)
# load previous optimizer weights
model.discriminator.optimizer.set_weights(dis_opt_weights)
model.combined.optimizer.set_weights(comb_opt_weights)
# clear memory
del dis_opt_weights, comb_opt_weights
return model | 178987c89c546ed12f81db7f0eae02f1ea3e418e | 37,876 |
def decimal_all_finite(x_dec_list):
"""Check if all elements in list of decimals are finite.
Parameters
----------
x_dec_list : iterable of Decimal
List of decimal objects.
Returns
-------
y : bool
True if all elements are finite.
"""
y = all(x.is_finite() for x in x_dec_list)
return y | 610f95d7022047a8b020d5af48ceb3923d168e5d | 37,877 |
def _sanitize_re(str):
"""
When doing searches that starts with *, ?, +, ie. special regexp chars,
MySQLdb and psycopg2 will throw "OperationalError: (1139, "Got error
'repetition-operator operand invalid' from regexp")".
This function returns a regexp string with the starting special chars
escaped.
"""
special_chars = ['*','+','?']
if str[0] in special_chars:
return '\\%s' % str
return str | c8c8af26de6baa27b06b3c81df1fb95a578cc42c | 37,880 |
def coord_shift180(lon):
"""Enforce coordinate longiude to range from -180 to 180.
Sometimes longitudes are 0-360. This simple function will subtract
360 from those that are above 180 to get the more user-friendly
[-180, 180], since slicing is a lot easier.
Parameters
----------
lon: numpy.ndarray
Array of longitudes with values in range [0, 360].
Returns
-------
numpy.ndarray with longitudes in range [-180, 180].
""" # noqa
nlon = len(lon)
# change those above 180.0 to negative
for i in range(nlon):
x = lon[i]
if x > 180:
lon[i] = x - 360
return lon | cdf4ccbfc0e0dbb0201012826b88720934231d32 | 37,881 |
def temperature_over_total_temperature(
mach,
gamma=1.4
):
"""
Gives T/T_t, the ratio of static temperature to total temperature.
Args:
mach: Mach number [-]
gamma: The ratio of specific heats. 1.4 for air across most temperature ranges of interest.
"""
return (1 + (gamma - 1) / 2 * mach ** 2) ** -1 | a1fea37f80df9eff21716f273b6270cfa7fcd302 | 37,882 |
def isPalindrome(s):
"""
:type s: str
:rtype: bool
"""
s = ''.join(c for c in s if c.isalnum())
if not s:
return True
start, end = 0, len(s)-1
while (abs(start-end)>1):
if (s[start].lower() != s[end].lower()):
return False
start += 1
end -= 1
if (start==end):
return True
if ((end-start)==1):
return s[start].lower()==s[end].lower() | c72b0d0be9fe63d9815782f22ef3bc3e60364957 | 37,883 |
from functools import reduce
def words_in_chapters(chapters: list) -> list:
"""
Returns all words in the given chapters, without any special characters (no punctuation or quotation characters)
:param chapters: Chapter objects
:return: all words in the given Chapters as a list of words
"""
return reduce(lambda c1, c2: c1 + c2, map(lambda c: c.words(), chapters)) | ef30472d3096f7b5cf58cab2ab7bdeed41c9fa85 | 37,885 |
import typing
def _exclusion_in(type_, name) -> typing.Tuple: # pylint: disable=unused-argument
""" do not cast Integer based primary keys """
return name, '=' | 6f24245800ce91c8fb947050a7f1d63f53ecba9e | 37,886 |
import json
def htmlsafe_json_dumps(obj, dumper=None, **kwargs):
"""Works exactly like :func:`dumps` but is safe for use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags.
The following characters are escaped in strings:
- ``<``
- ``>``
- ``&``
- ``'``
This makes it safe to embed such strings in any place in HTML with the
notable exception of double quoted attributes. In that case single
quote your attributes or HTML escape it in addition.
"""
if dumper is None:
dumper = json.dumps
rv = dumper(obj, **kwargs) \
.replace(u'<', u'\\u003c') \
.replace(u'>', u'\\u003e') \
.replace(u'&', u'\\u0026') \
.replace(u"'", u'\\u0027')
return rv | d3cdff425da3a7a01369ae89890d1d00ab7fe000 | 37,888 |
def get_host_format(node):
""" Return host entries """
host_format = f"{node['ip_addr']} scale_cluster_quorum={node['is_quorum']} scale_cluster_manager={node['is_manager']} scale_cluster_gui={node['is_gui']} scale_zimon_collector={node['is_collector']} is_nsd_server={node['is_nsd']} is_admin_node={node['is_admin']} ansible_user={node['user']} ansible_ssh_private_key_file={node['key_file']} ansible_python_interpreter=/usr/bin/python3 scale_nodeclass={node['class']}"
return host_format | 2f54de38929da21172451dfd6f004c201072cec0 | 37,889 |
def slices_overlap(slice_a, slice_b):
"""Test if the ranges covered by a pair of slices overlap."""
assert slice_a.step is None
assert slice_b.step is None
return max(slice_a.start, slice_b.start) \
< min(slice_a.stop, slice_b.stop) | db9cfc0dcfa64f6c7c52f2410a140088f9d03b13 | 37,890 |
def get_requirements(fname):
"""
Extracts requirements from requirements-file <fname>
"""
reqs = open(fname, "rt").read().strip("\r").split("\n")
requirements = [
req for req in reqs
if req and not req.startswith("#") and not req.startswith("--")
]
return requirements | 1b091b89cf6835f544560eb8e2235d9bb02f2952 | 37,892 |
import sqlite3
def get_current_db(table_name, db_name):
""" connect to current database and return contents of specified table"""
# Connect to db
db_conn = sqlite3.connect(db_name)
c = db_conn.cursor()
column_name = []
# This enables column access by name: row['column_name']
db_conn.row_factory = sqlite3
rows = c.execute(
f"""
SELECT rowid, * FROM {table_name};
"""
).fetchall()
# get list of column details
column_details = c.execute(
f"""
PRAGMA table_info({table_name});
"""
).fetchall()
# limiting to column names
for item in column_details:
column_name.append(item[1])
db_conn.commit()
db_conn.close()
return rows, column_name | 8ac99c1b26acc955c4c3ac25117aec045a4f1eb5 | 37,893 |
import pickle as pkl
def loadVariable(filePath:str):
"""
Use python module pickle to load variable.
"""
return pkl.load(open(filePath,"rb")) | a3e4af490b1f3c8ddf9173e65c95e35fc54ad849 | 37,895 |
import re
def ros_name_for(s: str):
"""Obtain a ROS-allowed name"""
return re.sub('[^a-zA-Z0-9_]', '_', s) | cc7698d26412508976a9530e5936b402e3bdd0c5 | 37,896 |
import os
def path(*args):
"""
Args:
args: relative path from this directory
('stackrl/envs/data').
Return:
The absolute path to 'stackrl/envs/data/arg0/...' or,
if no arg is given, to this directory.
"""
return os.path.join(
os.path.dirname(__file__),
*args,
) | 893a39f867e0184416c7d9d8a5962d4abaefb41b | 37,897 |
def output_contigs(g):
""" Perform searching for Eulerian path in the graph to output genome assembly"""
V = g[0]
E = g[1]
# Pick starting node (the vertex with zero in degree)
start = list(V.keys())[0]
for k in list(V.keys()):
if V[k].indegree < V[start].indegree:
start = k
contig = start
current = start
while len(E[current]) > 0:
# Pick the next node to be traversed (for now, at random)
next = E[current][0]
del E[current][0]
contig += next.label[-1]
current = next.label
return contig | 2b38c4e172322e1f92db76be890160c7bdc82d55 | 37,898 |
def create_result(m, d, y):
""" Creates result """
result = ''
if m < 10:
result += '0' + str(m)
else:
result += str(m)
result += '/'
if d < 10:
result += '0' + str(d)
else:
result += str(d)
result += '/' + str(y)
return result | c87b9d97e22cfc642ed6bca363f998e3fa35546e | 37,899 |
def change(total, coins):
"""
recursive solution, time consuming and maximum recursion depth exceed
"""
if total == 0:
return 0
count = total
for coin in coins:
if total - coin >= 0:
_count = change(total - coin, coins)
if _count + 1 < count:
count = _count + 1
return count | f4d923327607de005a1149f2fbfc8bd8062ac9e3 | 37,900 |
def camelcase(name):
"""Converts a string to CamelCase.
Args:
name (str): String to convert.
Returns:
str: `name` in CamelCase.
"""
return ''.join(x.capitalize() for x in name.split('_')) | 2a0052d52faf1aabb45ae9909f76482c927ba395 | 37,901 |
import re
def change_ext(filename, new_ext):
"""return a new filename, with the extension changed.
"""
return re.sub(r"\.\w+$", new_ext, filename) | efca714e3856c3e06fb850ac10a5c7a357fa2ca8 | 37,902 |
from typing import Dict
from typing import Callable
def is_current_builder() -> Dict[str, Callable]:
"""Inject a function to evaluate whether or not a result is current."""
def is_current(result: dict) -> bool:
"""Determine whether the result is the current version."""
if result["submitted_date_all"] is None:
return bool(result["is_current"])
try:
return bool(
result["is_current"]
and result["version"] == len(result["submitted_date_all"])
)
except Exception:
return True
return False
return {"is_current": is_current} | 5faa72aa1b7a9f52bf639f3cf307301870740c37 | 37,903 |
def completed(lines):
"""
Check if the output file shows successful completion
"""
return lines[-1][:14] == 'TOTAL RUN TIME' | 2dcb9ff850086f4e7496a46a82c510297f0488d6 | 37,905 |
import torch
import random
def create_sample_batch(speaker_data, batch_size, vector_dim):
"""
Return torch tensors ((input1, input2), target) for siamese
network training (for mutual information).
Constructs each batch to have roughly equal amount of target
and non-target samples to keep training balanced
"""
input1_tensor = torch.zeros(batch_size, vector_dim).float()
input2_tensor = torch.zeros(batch_size, vector_dim).float()
target_tensor = torch.zeros(batch_size, 1).float()
speaker_ids = list(speaker_data.keys())
for i in range(batch_size):
# Throw a coin if we add target or non-target sample
input1 = None
input2 = None
target = None
if random.random() < 0.5:
# Target sample:
# Pick speaker, and then pick two separate vectors
# from that speaker
random_speaker_id = random.choice(speaker_ids)
speaker_vectors = speaker_data[random_speaker_id]
# random.sample takes two unique vectors
input1, input2 = random.sample(speaker_vectors, 2)
target = 1
else:
# Non-target sample
# Pick two different speakers and one vector from
# each
speaker1, speaker2 = random.sample(speaker_ids, 2)
input1 = random.choice(speaker_data[speaker1])
input2 = random.choice(speaker_data[speaker2])
target = 0
# Put sampled vectors to batch
input1_tensor[i] = torch.from_numpy(input1).float()
input2_tensor[i] = torch.from_numpy(input2).float()
target_tensor[i, 0] = target
input1_tensor = input1_tensor.cuda()
input2_tensor = input2_tensor.cuda()
target_tensor = target_tensor.cuda()
return ((input1_tensor, input2_tensor), target_tensor) | 71edfa9f239b84a8414a645d391330f929cd2e2d | 37,909 |
import six
def safe_shadow(text):
"""
Shadow string to first and last char
:param text:
:return:
>>> safe_shadow(None)
'None'
>>>safe_shadow("s")
'******'
>>>safe_shadow("sssssss")
's******s'
>>> safe_shadow(1)
'******'
>>> safe_shadow([1, 2])
'******'
"""
if not text:
return "None"
elif not isinstance(text, six.string_types):
return "******"
elif len(text) > 2:
return "%s******%s" % (text[0], text[-1])
else:
return "******" | 46d63b4a598bbc45ae32335e105539f7c43f1c9e | 37,910 |
def _merge_notebooks_feedback(notebook_ids, checksums):
"""
Returns a list of dictionaries with 'notebook_id' and 'feedback_checksum'.
``notebook_ids`` - A list of notebook IDs.
``checksum`` - A dictionary mapping notebook IDs to checksums.
"""
merged = []
for nb_id in notebook_ids:
if nb_id not in checksums.keys():
checksum = None
else:
checksum = checksums[nb_id]
merged.append({'notebook_id': nb_id, 'feedback_checksum': checksum})
return merged | 6e45fa2f5889b94b9dd6e0164ab7554e284fa3b9 | 37,912 |
def decode_output(outputs):
"""decode outputs into boxes and masks"""
return [], [], [] | 41fbcb020dd2aed1bcc7596552377c188b2eb6a3 | 37,914 |
def linenumber(target):
""" Given a python callable try and determine the source line number where it is defined """
if hasattr(target, "__code__"):
code = getattr(target, "__code__")
return code.co_firstlineno
if hasattr(target, "__func__"):
return linenumber(getattr(target, "__func__"))
if hasattr(target, "__call__"):
return linenumber(getattr(target, "__call__"))
return "??" | be9618ad861d85a66b705dedbb859d76a7604b19 | 37,915 |
import os
import glob
def get_table_defs():
"""Return a dictionary containing the columns for each database
table, as taken from the table_definition text files.
Returns
-------
table_defs : dict
A dictionary whose keys are detector/file_type/extension
configurations (e.g. 'wfc_flt_0') and whose values are lists
of column names for the corresponding table.
"""
# Get table definition files
table_def_directory = os.path.realpath(os.path.join(os.getcwd(),
os.path.dirname(__file__)))
table_def_directory = table_def_directory.replace('utils', 'database/table_definitions/')
table_def_files = glob.glob(os.path.join(table_def_directory, '*.txt'))
table_defs = {}
for table_def_file in table_def_files:
configuration = os.path.basename(table_def_file).split('.txt')[0]
with open(table_def_file, 'r') as f:
contents = f.readlines()
contents = [item.strip() for item in contents]
columns = [item.split(',')[0] for item in contents]
table_defs[configuration] = columns
return table_defs | 2aa26326774d1f437d04b6bc8d9c54aaa40919ec | 37,916 |
def size_format(size):
"""
文件大小格式化
:param size: byte
:return:
"""
if size < 1000:
return '%i' % size + 'size'
elif 1000 <= size < 1000000:
return '%.1f' % float(size/1000) + 'KB'
elif 1000000 <= size < 1000000000:
return '%.1f' % float(size/1000000) + 'MB'
elif 1000000000 <= size < 1000000000000:
return '%.1f' % float(size/1000000000) + 'GB'
elif 1000000000000 <= size:
return '%.1f' % float(size/1000000000000) + 'TB' | e05621272060e3d5e3ced70001a49643172645e2 | 37,917 |
import random
def generate_random(power: int) -> list:
"""
Generate list with 2 ** power random elemnts.
"""
array = [random.random() for i in range(2 ** power)]
return array | b822277c6f4ca0a28339ef2feddfcec485e4d686 | 37,918 |
def findmax(L):
"""
>>> L1 = [ 1, 4, 5, 10 ]
>>> findmax(L1)
10
>>> L2 = [ 1, 3, 9, 33, 81 ]
>>> findmax(L2)
81
"""
return max(L) | 3fbb99b5189ee8a4a4867642ec4ebc73f06c04f0 | 37,920 |
import re
def camel_case(s):
"""Convert a snake_case string (maybe with lowerCaseFirst) to CamelCase."""
tokens = re.sub(r"([A-Z])", r" \1", s.replace("_", " ")).split()
return "".join(w.title() for w in tokens) | d0201daf285ddeeb88d507d156fa97d38a03b388 | 37,921 |
def dayOfWeek(julian):
"""Get day of week from a julian day
:param `julian`: the julian day
:returns: the day of week as an integer and Monday = 1
"""
return int((julian + 1) % 7) | 0aa478cb8d597097a73f998cb6d4de128a06611c | 37,922 |
import re
def is_valid_name(project_name):
"""
judge name is valid
:param project_name:
:return:
"""
if not re.search(r'^[_a-zA-Z]\w*$', project_name):
print('Error: Project Name must begin with a letter and contain only letters, numbers and underscores')
return False
return True | a64948f2133a69fe46b8a640c844c15654306296 | 37,925 |
def recall(tp_count, targets_count):
"""Calculates recall.
:param tp_count: Number of true positives.
:param targets_count: Number of targets.
:return: The recall rate.
"""
if targets_count == 0:
return 0.0
else:
return tp_count / float(targets_count) | 985c5c4567c9be12e4bd248d2a3054a2def1f29f | 37,926 |
from typing import List
import csv
def cast_csv_2_list(csv_str: str, delimiter: str = ',', quote_char: str = '"',
csv_quoting: int = csv.QUOTE_MINIMAL, skipinitialspace: bool = True) -> List[str]:
"""
konvertiere einen csv String in eine Liste von Strings. Ist csv_str nicht vom typ string, so wird der Wert unverändert zurückgegeben
>>> import unittest
>>> cast_csv_2_list('a,b,c')
['a', 'b', 'c']
>>> cast_csv_2_list('a,"b,c",d')
['a', 'b,c', 'd']
>>> cast_csv_2_list('a,"b , c",d')
['a', 'b , c', 'd']
>>> # UNERWARTETES verhalten wenn blank vor dem quotechar wenn nicht skipinitialspace=True gesetzt wird
>>> cast_csv_2_list('a, "x, y" , b')
['a', 'x, y ', 'b']
>>> # UNERWARTETES verhalten wenn blank vor dem quotechar wenn nicht skipinitialspace=True gesetzt wird
>>> cast_csv_2_list('a, "b , c" , b')
['a', 'b , c ', 'b']
>>> # UNERWARTETES verhalten wenn blank vor dem quotechar wenn nicht skipinitialspace=True gesetzt wird
>>> cast_csv_2_list('a, "b , c" , b', skipinitialspace=False)
['a', ' "b ', ' c" ', ' b']
>>> cast_csv_2_list('a,"b,c",b')
['a', 'b,c', 'b']
>>> cast_csv_2_list('a')
['a']
>>> # raise Error if csv_string is None
>>> unittest.TestCase().assertRaises(Exception, cast_csv_2_list, csv_str=None)
>>> # raise Error if csv_string is wrong type
>>> unittest.TestCase().assertRaises(Exception, cast_csv_2_list, csv_str=1)
"""
myreader = csv.reader([csv_str], delimiter=str(delimiter), quotechar=str(quote_char), quoting=csv_quoting, skipinitialspace=skipinitialspace)
#
# verwende csvreader im
# den string zu parsen, str() wegen python2
ls_returnlist = []
for ls_lines in myreader: # es wird immer nur eine Zeile geben
ls_returnlist = ls_lines # diese erste Zeile sind unsere neuen Commands
return ls_returnlist | 8468ca424c3faad6024aef3719db242152fa6b60 | 37,927 |
def intersect_interval(interval1, interval2):
"""Computes the intersection of two intervals.
Parameters
----------
interval1: tuple[int]
Should be `(x1_min, x1_max)`
interval2: tuple[int]
Should be `(x2_min, x2_max)`
Returns
-------
x_intersect: tuple[int]
Should be the intersection. If the intersection is empty returns
`(0, 0)` to represent the empty set. Otherwise is `(max(x1_min,
x2_min), min(x1_max, x2_max))`.
"""
x1_min, x1_max = interval1
x2_min, x2_max = interval2
if x1_max < x2_min:
# If interval1 < interval2 entirely
return (0, 0)
elif x2_max < x1_min:
# If interval2 < interval1 entirely
return (0, 0)
x_min = max(x1_min, x2_min)
x_max = min(x1_max, x2_max)
return (x_min, x_max) | 5db8daefa083b680c89a970224e2fc67a07beb5e | 37,928 |
def read_requirements(filepath):
"""Parses a dependency file"""
with open(filepath) as requirements:
required = requirements.read().splitlines()
required = [item for item in required if not item.startswith('#')]
return required | 78f186130ff765cb1bb8177844f85a8bbf249ba6 | 37,929 |
from typing import Any
import argparse
def parse_args() -> Any:
"""
Define an argument parser and return the parsed arguments
"""
parser = argparse.ArgumentParser(
prog='generator.py',
description='takes a pgn file and produces chess puzzles')
parser.add_argument("--file", "-f", help="input PGN file", required=True, metavar="FILE.pgn")
parser.add_argument("--engine", "-e", help="analysis engine", default="stockfish")
parser.add_argument("--threads", "-t", help="count of cpu threads for engine searches", default="4")
parser.add_argument("--verbose", "-v", help="increase verbosity", action="count")
return parser.parse_args() | 7d96885646d91c4ff2765c466d9aab92faf11b14 | 37,930 |
def stage_feature_module_list():
"""
:return: dictionary with a features as keys and a list of modules to be included
if those features are selected AND there are stages as values
"""
stage_feature_modules = {
"markets": ["system.load_balance.fix_market_participation"]
}
return stage_feature_modules | aad3eacc81d6d4f003270aa19fdc754df678b8bb | 37,931 |
def get_order_by_from_request(request):
"""
Retrieve field used for sorting a queryset
:param request: HTTP request
:return: the sorted field name, prefixed with "-" if ordering is descending
"""
sort_direction = request.GET.get("dir")
field_name = (request.GET.get("sort") or "") if sort_direction else ""
sort_sign = "-" if sort_direction == "desc" else ""
return f"{sort_sign}{field_name}" | 9324e8e03bb8b5a48cd37f3daf6807712b57ad97 | 37,933 |
def turn_strat_into_label(stratum):
"""
Convert age stratification string into a string that describes it more clearly.
Args:
stratum: String used in the model
Returns:
label: String that can be used in plotting
"""
if 'up' in stratum:
return stratum[4: -2] + ' and up'
elif 'to' in stratum:
to_index = stratum.find('to')
return stratum[4: to_index] + ' to ' + stratum[to_index+2:]
elif stratum == '':
return 'All ages'
else:
return '' | fb2ce3810359a150948905913ba48e7627875769 | 37,934 |
def diff(prev_snapshot, next_snapshot):
"""Return a dict containing changes between two snapshots."""
snapshot_diff = {
'left_only': [],
'right_only': [],
'changed': [],
'common': [],
}
for path in set(prev_snapshot.keys()) | set(next_snapshot.keys()):
if path in prev_snapshot and path not in next_snapshot:
snapshot_diff['left_only'].append(path)
elif path not in prev_snapshot and path in next_snapshot:
snapshot_diff['right_only'].append(path)
elif next_snapshot[path].mtime != prev_snapshot[path].mtime:
if next_snapshot[path].isfile:
snapshot_diff['changed'].append(path)
else:
snapshot_diff['common'].append(path)
return snapshot_diff | 040be0018e4b517cfc884a1e4a0f9cc030032fa9 | 37,936 |
def make_table_row(contents, tag="td"):
"""Given an iterable of string contents, make a table row.
Args:
contents: An iterable yielding strings.
tag: The tag to place contents in. Defaults to 'td', you might want 'th'.
Returns:
A string containing the content strings, organized into a table row.
Example: make_table_row(['one', 'two', 'three']) == '''
<tr>
<td>one</td>
<td>two</td>
<td>three</td>
</tr>'''
"""
columns = ("<%s>%s</%s>\n" % (tag, s, tag) for s in contents)
return "<tr>\n" + "".join(columns) + "</tr>\n" | 346301a77954829f6869a47ace6c1de52d787ffa | 37,938 |
def _gauge_pair():
""" Track gauge as a pair of (sum, count) """
return [0, 0] | df91d575a0e96560140e0060695e4a91794c99fc | 37,939 |
def typical_price(data, high_col='High', low_col='Low', close_col='Close'):
"""
Typical Price
Source: https://en.wikipedia.org/wiki/Typical_price
Params:
data: pandas DataFrame
high_col: the name of the HIGH values column
low_col: the name of the LOW values column
close_col: the name of the CLOSE values column
Returns:
copy of 'data' DataFrame with 'typical_price' column added
"""
data['typical_price'] = (
data[high_col] + data[low_col] + data[close_col]) / 3
return data | 3c47cb01d4bd02351269f00c394d73abca862bc8 | 37,940 |
from typing import Optional
def _get_description(experiment_config: dict) -> Optional[str]:
"""Returns the description of the experiment described by
|experiment_config| as a string."""
return experiment_config.get('description') | 2262b8bb5b17e3ecb408bd3f2d133753dd0a1bbe | 37,942 |
def get_family_name_from(seq_name_and_family):
"""Get family accession from concatenated sequence name and family string.
Args:
seq_name_and_family: string. Of the form `sequence_name`_`family_accession`,
like OLF1_CHICK/41-290_PF00001.20. Assumes the family does not have an
underscore.
Returns:
string. PFam family accession.
"""
return seq_name_and_family.split('_')[-1] | d75d48c72ebee14ef43edf9d0ab42b2f85999713 | 37,943 |
def __clean_indicator_info(indicators):
"""
Cleans the strings contained the the indicator / category dataframe
Parameters
----------
indicators : pd.DataFrame()
A dataframe of indicator information
Returns
-------
indicators : pd.DataFrame()
A dataframe of indicator information
"""
### Cleaning 'IndicatorName'
# Simple stuff - double spaces, etc.
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace(' ',' ')
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace('<sub>','')
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace('</sub>',' ')
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace('<sup>','')
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace('</sup>',' ')
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace('μ ',' ')
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace('≥ ',' ')
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace('1 000 000','1,000,000')
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace("1'000'000",'1,000,000')
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace("1000000",'1,000,000')
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace("million population",'1,000,000 population')
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace('100 000','100,000')
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace("100'000","100,000")
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace("100000","100,000")
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace("10'000","10,000")
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace("10 000","10,000")
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace("10000","10,000")
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace("1'000","1,000")
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace("1 000","1,000")
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace("1000","1,000")
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace("\( ","(")
indicators['IndicatorName'] = indicators['IndicatorName'].str.replace(" \)",")")
indicators['IndicatorName'] = indicators['IndicatorName'].str.strip()
### Cleaning 'Category': Standardise spelling and map a couple variants
categories = [x for x in indicators['CATEGORY'].unique() if not x == None]
categories = {category: category.title() for category in categories}
categories['Negelected tropical diseases'] = 'Neglected Tropical Diseases'
categories['Rsud: Governance, Policy And Financing : Prevention'.upper()] = 'RSUD: Governance, Policy And Financing: Prevention'
categories['UHC'] = 'Universal Health Coverage'
categories['HIV/AIDS and other STIs'] = 'HIV/AIDS And Other STIs'
categories['ICD'] = 'ICD'
categories['Noncommunicable Diseases Ccs'] = 'Noncommunicable Diseases'
categories = {key: val.replace('Amr','AMR').replace('Goe','GOe').replace('Rsud','RSUD').replace('And','and')
for (key, val) in categories.items()}
indicators['CATEGORY'] = indicators['CATEGORY'].map(categories)
# Add some categories based on desk research. 13 indicators still not mapped
indicators.loc[indicators['CATEGORY'].isna() & indicators['IndicatorCode'].str.contains('GDO_'), 'CATEGORY'] = 'Global Dementia Observatory'
indicators.loc[indicators['CATEGORY'].isna() & indicators['IndicatorCode'].str.contains('EMF'), 'CATEGORY'] = 'Electromagnetic Fields'
indicators.loc[indicators['CATEGORY'].isna() & indicators['IndicatorCode'].str.contains('FAMILYPLANNINGUNPDUHC'), 'CATEGORY'] = 'Sexual and Reproductive Health'
indicators.loc[indicators['CATEGORY'].isna() & indicators['IndicatorCode'].str.contains('SG_DMK_SRCR_FN_ZS'), 'CATEGORY'] = 'Sexual and Reproductive Health'
indicators.loc[indicators['CATEGORY'].isna() & indicators['IndicatorCode'].str.contains('GHED_'), 'CATEGORY'] = 'Health Financing'
indicators.loc[indicators['CATEGORY'].isna() & indicators['IndicatorCode'].str.contains('_UHC'), 'CATEGORY'] = 'Universal Health Coverage'
indicators.loc[indicators['CATEGORY'].isna() & indicators['IndicatorCode'].str.contains('IHRSPAR_'), 'CATEGORY'] = 'International Health Regulations (2005) Monitoring Framework'
indicators.loc[indicators['CATEGORY'].isna() & indicators['IndicatorCode'].str.contains('NLIS_'), 'CATEGORY'] = 'Nutrition'
indicators.loc[indicators['CATEGORY'].isna() & indicators['IndicatorCode'].str.contains('PHE_'), 'CATEGORY'] = 'Public Health and Environment'
indicators.loc[indicators['CATEGORY'].isna() & indicators['IndicatorCode'].str.contains('RADON'), 'CATEGORY'] = 'Public Health and Environment'
indicators.loc[indicators['CATEGORY'].isna() & indicators['IndicatorCode'].str.contains('SA_'), 'CATEGORY'] = 'Global Information System on Alcohol and Health'
indicators.loc[indicators['CATEGORY'].isna() & indicators['IndicatorCode'].str.contains('SE_'), 'CATEGORY'] = "Global Strategy For Women's, Children's and Adolescents' Health"
indicators.loc[indicators['CATEGORY'].isna() & indicators['IndicatorCode'].str.contains('SG_'), 'CATEGORY'] = "Global Strategy For Women's, Children's and Adolescents' Health"
indicators.loc[indicators['CATEGORY'].isna() & indicators['IndicatorCode'].str.contains('SH_'), 'CATEGORY'] = "Global Strategy For Women's, Children's and Adolescents' Health"
indicators.loc[indicators['CATEGORY'].isna() & indicators['IndicatorCode'].str.contains('SI_'), 'CATEGORY'] = "Global Strategy For Women's, Children's and Adolescents' Health"
indicators.loc[indicators['CATEGORY'].isna() & indicators['IndicatorCode'].str.contains('SP_'), 'CATEGORY'] = "Global Strategy For Women's, Children's and Adolescents' Health"
# Reorder columns
indicators = indicators[['IndicatorCode', 'IndicatorName', 'CATEGORY', 'display_sequence', 'url','DEFINITION_XML']]
return indicators | 3d5f883fc48c03638a4dee8d9746a7269fc04f6e | 37,944 |
def turn_list_to_int(list_):
"""converts elements of a list to integer"""
list_ = [int(i) for i in list_]
return list_ | f4ee0f763c35a7218e9163ff15dab8bfd653b0d3 | 37,945 |
from bs4 import BeautifulSoup
def extract_html_links(text):
"""
Grab any GOV.UK domain-specific links from page text.
:param text: Text within a details sub-section, refer to filtered for keys.
:return: list of links
"""
links = []
try:
soup = BeautifulSoup(text, "html5lib")
links = [link.get('href') for link in soup.findAll('a', href=True)]
except Exception:
print("error")
return [l.replace("https://www.gov.uk/", "/") for l in links
if l.startswith("/") or l.startswith("https://www.gov.uk/")] | c7debea7ef2b3c8d239cde39b40913d9f141d3fb | 37,946 |
def assertContainsExportItems(item_tuples, export_group_schema):
"""
:param item_tuples: list of ("path", "label") tuples representing each export item:
eg: [("form.group.question2", "question_label")]
"""
actual = {
(item.readable_path, item.label)
for item in export_group_schema.items
}
item_set = set(item_tuples)
missing = item_set - actual
extra = actual - item_set
if missing or extra:
def prettify(list_of_tuples):
return '\n '.join(map(str, list_of_tuples))
raise AssertionError("Contains items:\n {}\nMissing items:\n {}\nExtra items:\n {}"
.format(prettify(actual), prettify(missing), prettify(extra))) | 40296743a6e8c7c2e6efec83da58d61c19e89ca4 | 37,947 |
def pow(a, b):
"""
Finds a^b using recursion.
Params:
a (float) - Base
b (int) - Exponent
Returns:
Value of a^b (float)
"""
if(type(b) != int):
print("ERROR: pow() not callable with doubles!")
return 0
if(b == 0):
return 1
else:
return a * pow(a, b-1) | e903a8a430453cc57460a124a2485b7de57473e2 | 37,948 |
import os
def skip_optional(func):
"""function decorator"""
def wrapper(connector_access, local_result_file, metadata):
local_file_path = os.path.join(local_result_file['dir'], local_result_file['name'])
if not os.path.isfile(local_file_path):
if local_result_file.get('optional'):
return
else:
raise Exception('Result file does not exist and is not optional: {}'.format(local_file_path))
return func(connector_access, local_result_file, metadata)
return wrapper | a842bb1c5926241ceda83d1c75ceeeffc7bc6518 | 37,949 |
def twr(rors):
"""The Time-Weighted Return (also called the
Geometric Average Return) is a way of calculating
the rate of return for an investment when there are
deposits and withdrawals (cash flows) during the period.
You often want to exclude these cash flows so that we can
find out how well the underlying investment has performed.
Args:
rors: List of all rors over multiple time periods
Returns:
The time-weighted return of a given investment
Example:
By providing a list of rate of returns you can
calculate the time-weighted return based on that
list:
>>> import malee
>>> malee.aar([.10, -.05, .12])
0.056666666666666664
"""
Rtw = 1
for ror in rors:
Rtw *= 1 + ror
return Rtw - 1 | d9346efca7f8db311643818a8f13f30ab0ee12f5 | 37,951 |
def NoEmbedding(X):
"""Return X without changes."""
return X | c33dd4175999dab4eb3568753f8d4980c809b294 | 37,952 |
def find_sequence_with_x(consensus, single_sequences):
"""
Determine the correct consensus sequence with X replacing consolidated nucleotides, based on the original
sequences to be consolidated
"""
output_sequence = ''
for position, nt in enumerate(consensus):
if nt == 'X':
reference_base, flag = single_sequences[0][position], None
for seq in single_sequences:
if seq[position] != reference_base:
output_sequence += 'X'
flag = 1
break
if not flag:
output_sequence += reference_base
else:
output_sequence += nt
return output_sequence | 9e993c0774f26bf69c2e8dee8b31b693d48224d1 | 37,954 |
def bytes_split(bytes_, length):
"""Split bytes into pieces of equal size."""
n_pieces, excess = divmod(len(bytes_), length)
if excess:
raise ValueError('Bytes of length {} cannot be equally divided into '
'pieces of length {}'.format(len(bytes_), length))
return [bytes_[i * length : (i + 1) * length] for i in range(n_pieces)] | 7c70146e4ebbef70371e7cfb3ce3a1abe6df3c97 | 37,955 |
def diamag_correction(H, H0, Mp, Mpp, m_sample, M_sample, Xd_sample, constant_terms=[], paired_terms=[]):
"""
Calculates a diamagnetic correction of the data in Mp and Mpp and calculates
the corresponding values of Xp and Xpp
Input
H: amplitude of AC field (unit: Oe)
H0: strength of applied DC field (unit: Oe)
Mp: in-phase magnetization (unit: emu)
Mpp: out-of-phase magnetization (unit: emu)
m_sample: sample mass in (unit: mg)
M_sample: sample molar mass (unit: g/mol)
Xd_sample: sample diamagnetic susceptibility in emu/(Oe*mol) from DOI: 10.1021/ed085p532
constant_terms: terms to be subtracted directly from magnetization (unit: emu/Oe)
paired_terms: list of tuples (tup) to be subtracted pairwise from magnetization
The terms must pairwise have the unit emu/Oe when multiplied,
fx. unit of tup[0] is emu/(Oe*<amount>) and unit of tup[1] is <amount>
Output
Mp_molar: molar in-phase magnetization, corrected for diamagnetic contribution (unit: emu/mol)
Mpp_molar: molar out-of-phase magnetization, corrected for diamagnetic contribution (unit: emu/mol)
Xp_molar: molar in-phase susceptibility, corrected for diamagnetic contribution (unit: emu/(Oe*mol))
Xpp_molar: molar out-of-phase susceptibility, corrected for diamagnetic contribution (unit: emu/(Oe*mol))
"""
# Old
#Xp = (Mp - self.Xd_capsule*H - self.Xd_film*film_mass*H)*molar_mass/(sample_mass*H) - Xd_sample*molar_mass
#Xpp = Mpp/(sample_mass*H)*molar_mass
# NEW (documentation in docs with eklahn@chem.au.dk)
# ---------------------------
# Recalculate sample mass into g
m_sample *= 10**-3
# Calculate the molar amount of the sample
n_sample = m_sample/M_sample
sum_of_constants = sum(constant_terms)
sum_of_pairwise = sum([tup[0]*tup[1] for tup in paired_terms])
Mp_molar = (Mp - (sum_of_constants + sum_of_pairwise)*H - Xd_sample*H*n_sample)/n_sample
Mpp_molar = Mpp/n_sample
Xp_molar = Mp_molar/H
Xpp_molar = Mpp_molar/H
return Mp_molar, Mpp_molar, Xp_molar, Xpp_molar | 6b29fd46ff6fd2457b6b3572efc544c3d84956c1 | 37,956 |
def median(a, i, j, k):
"""
Return median of 3 integers from array a.
:param a: Iterable of elements
:param i: start element index
:param j: end element index
:param k: middle element index
:return: return median of values at indices i, j and k.
"""
ai, aj, ak = a[i], a[j], a[k]
med_val = ai + aj + ak - max(ai, aj, ak) - min(ai, aj, ak)
if ai == med_val:
return i
elif aj == med_val:
return j
return k | 968c5b91d1a0bcafa77821d02e96963c7839261e | 37,958 |
from typing import Union
from datetime import datetime
import os
import locale
import re
def to_datetime(date_string: str, language: str) -> Union[datetime, str]:
""" Converts a date string to a datetime object """
locales = {"pt": "pt_BR.utf-8", "en": "en_US.utf-8"}
""" correct problem with locale in Windows platform """
if os.name == 'nt':
locales = {"pt": "Portuguese_Brazil.1252", "en": "Portuguese_Brazil.1252"}
locale.setlocale(locale.LC_TIME, locales[language])
dd_mm_aaaa = "%d/%m/%Y"
mmm_aaaa = "%b/%Y"
formats = [dd_mm_aaaa, mmm_aaaa]
for fmt in formats:
try:
date = datetime.strptime(date_string, fmt)
break
except ValueError:
continue
else:
yyyy = "[0-9]{4}"
if re.match(yyyy, date_string):
year = int(date_string)
month = 12
day = 31
date = datetime(year, month, day)
else:
return date_string # returns original value if cant parse
return date | 582412ec380331a35af951b0390ab6485a048b29 | 37,960 |
def is_permutation_dict(s1, s2):
"""Uses a dictionary to store character counts."""
n1 = len(s1)
n2 = len(s2)
if n1 != n2:
return False
if s1 == s2:
return True
ch_count = {}
for ch in s1:
if ch_count.get(ch, 0) == 0:
ch_count[ch] = 1
else:
ch_count[ch] += 1
for ch in s2:
if ch_count.get(ch, 0) == 0:
return False
else:
ch_count[ch] -= 1
return True | 308653cf26d8b8f6722bbed2e0b82c1b9853d921 | 37,962 |
def rearange_base_link_list(table, base_link_index):
"""Rarange base link to beginning of table"""
value = table[base_link_index]
del table[base_link_index]
table.insert(0, value)
return table | 08d94b515d6c1e1fcaf47fecdda7816d7fcb6470 | 37,963 |
import html
def decode_html_entities(v):
"""Decodes HTML entities from a value, converting them to the respective
Unicode characters/strings."""
if isinstance(v, int):
v = str(v)
return html.unescape(v) | 852fa968ab99e0618eb1d845b6ef74322e137a42 | 37,964 |
import os
def get_relname(path, relative_to):
"""Get relative path name, where '.' is converted to ''."""
path = os.path.relpath(path, relative_to)
if path == '.':
path = ''
return path | e8852c90619a434a87abe8ef8ee89f9b0eee7689 | 37,965 |
def exe_success(return_code: int) -> bool:
"""Check if **return_code** is 0
Args:
return_code (int): Return code of a process.
Returns:
bool: True if return code is equal to 0
"""
return return_code == 0 | cfea5a87f750c3629714832cb0758fcd3c18ad9a | 37,966 |
def extract_pairs_from_lines(lines):
"""Extract pairs from raw lines."""
collected_pairs = []
for i in range(len(lines) - 1):
first_line = lines[i].strip()
second_line = lines[i+1].strip()
if first_line and second_line:
collected_pairs.append([first_line, second_line])
return collected_pairs | 071d75bf422fa2ef61daff301837c85c0e0f3af6 | 37,967 |
def _replace_nan_with_none(
plot_data,
plot_keys):
"""Replaces all instances of nan with None in plot data.
This is necessary for Colab integration where we serializes the data into json
string as NaN is not supported by json standard. Turning nan into None will
make the value null once parsed. The visualization already handles falsy
values by setting them to zero.
Args:
plot_data: The original plot data
plot_keys: A dictionary containing field names of plot data.
Returns:
Transformed plot data where all nan has been replaced with None.
"""
output_metrics = {}
for plot_type in plot_keys:
metric_name = plot_keys[plot_type]['metricName']
if metric_name in plot_data:
data_series_name = plot_keys[plot_type]['dataSeries']
if data_series_name in plot_data[metric_name]:
data_series = plot_data[metric_name][data_series_name]
outputs = []
for entry in data_series:
output = {}
for key in entry:
value = entry[key]
# When converting protocol buffer into dict, float value nan is
# automatically converted into the string 'NaN'.
output[key] = None if value == 'NaN' else value
outputs.append(output)
output_metrics[metric_name] = {data_series_name: outputs}
return output_metrics | 887fff7f110945f8444f5ffec205828edf63f1f6 | 37,968 |
def build_confirmar_cohortes_query(filters, page, request, tipo_unidad_educativa, unidad_educativa_id):
"""
Construye el query de búsqueda a partir de los filtros.
"""
# Filtra que el año de la última cohorte sea menor o igual al año en curso y el estado sea controlado
if tipo_unidad_educativa == 'establecimiento':
return filters.buildQuery().filter(establecimiento__id=unidad_educativa_id)
elif tipo_unidad_educativa == 'anexo':
return filters.buildQuery().filter(anexo__id=unidad_educativa_id)
elif tipo_unidad_educativa == 'extension_aulica':
return filters.buildQuery().filter(extension_aulica__id=unidad_educativa_id)
return filters.buildQuery() | b5a6a9fcc390695d25435e282520a2ef3b6b81c9 | 37,969 |
def filterKeys(schema):
"""
Saves only input and output dataset info
"""
newSchema = {}
newSchema['RequestStatus'] = schema.get('RequestStatus', "")
newSchema['InputDataset'] = schema.get('InputDataset', "")
newSchema['OutputDatasets'] = schema.get('OutputDatasets', "")
if schema['RequestType'] in ['StepChain', 'TaskChain']:
joker = schema['RequestType'].split('Chain')[0]
numInnerDicts = schema[schema['RequestType']]
# and now we look for this key in each Task/Step
for i in range(1, numInnerDicts + 1):
innerName = "%s%s" % (joker, i)
if 'InputDataset' in schema[innerName]:
newSchema['InputDataset'] = schema[innerName]['InputDataset']
return newSchema | 0aef9cffb732a02c31185ce5cf1f090d71e7170e | 37,970 |
def time2str(t):
"""Extract two string of YYYYMMDD and HH from a datetime object
Args:
t (datetime.datetime)
"""
YYYYMMDD = str(t)[0:10].replace('-', '')
HH = str(t)[11:13]
return YYYYMMDD, HH | a3ac6398ba748b1d399773a7fe935d95a62ec41e | 37,971 |
def reverse(text):
"""<string> -- Reverses <string>."""
return text[::-1] | a649991f3f989874e5ae88befd5205e38722e2ac | 37,972 |
def vcorrcoef(x, y):
"""
Compute correlation coefficients.
:param x,y : 2D tensor: (batch, d)
:return score: tensor: (batch)
"""
x_sub_mean = x - x.mean(dim=1).view(x.shape[0], 1)
y_sub_mean = y - y.mean(dim=1).view(y.shape[0], 1)
r_num = (x_sub_mean * y_sub_mean).sum(dim=1)
r_den = ((x_sub_mean ** 2).sum(dim=1) * (y_sub_mean ** 2).sum(dim=1)).sqrt()
return r_num / r_den | e6cc4a669b101a0d0e8d26767158c0907a052b48 | 37,973 |
def bytearray_to_long(byte_array):
"""
Converts a byte array to long.
:param byte_array:
The byte array.
:returns:
Long.
"""
total = 0
multiplier = 1
for count in range(len(byte_array) - 1, -1, -1):
byte_val = byte_array[count]
total += multiplier * byte_val
multiplier *= 256
return total | 7dee1685dacd7e693a6cc50bf0ac704f78aa42bd | 37,974 |
def identity(*args):
"""
Return whatever is passed in
"""
return args if len(args) > 1 else args[0] | f0f1276beb43a13c49311974013caa330588e734 | 37,975 |
def WordStartWithUppercase(word):
"""Return whether a word starts with uppercase letter"""
if len(word) == 0:
return False
firstChar = word[0]
return firstChar == firstChar.upper() | 71b775fa9168abac586470e2a454194ce9103efe | 37,976 |
def greatest_sum_of_subarrays(nums):
"""
:param nums: array
:return: max sum of subarray
"""
max = sub_sum = float('-inf')
for n in nums:
if sub_sum + n <= n:
sub_sum = n
else:
sub_sum += n
if sub_sum > max:
max = sub_sum
return max | bd2a427f3f9d24a4cdd9b5871b50a71d75440f5b | 37,977 |
import os
def get_files(dirname, reverse=False):
""" Return list of file paths in directory """
# Get list of files
filepaths = []
for basename in sorted(os.listdir(dirname)):
filename = os.path.join(dirname, basename)
print(filename)
if os.path.isfile(filename):
filepaths.append(filename)
return filepaths | 561a56d6280c3677f8d940fcb843580f80e3128e | 37,979 |
def heading_level(line):
"""Return heading level of line (1, 2, 3, or 0 for normal)"""
for i in range(4):
if line[i] != '#':
return i
return 3 | 47a21dd52d827b33dc467c02a31ce16c85b2b41b | 37,980 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.