content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import torch
def compute_jacobian(x, fx):
"""Function to compute jacobian
Args:
x:
fx:
Returns:
Jacobian
"""
b = x.size(0)
m = fx.size(-1)
J = []
for i in range(m):
grad = torch.zeros(b, m)
grad[:,i] = 1.
grad = grad.to(x.device)
g = torch.autograd.grad(outputs=fx, inputs = x, grad_outputs = grad, create_graph=True, only_inputs=True)[0]
J.append(g.view(x.size(0),-1).unsqueeze(-1))
J = torch.cat(J,2)
return J
|
d304ad3c2d26779a02a4f35a1e7f4e9e5844788d
| 15,956
|
import math
def calc_rec_cycle(number):
"""calculate recurring cycle of 1/n"""
result = 0
i = 10 ** (int(math.log10(number)) + 1)
s = set()
while True:
if i == number or i == 0:
result = 0
break
if i < number:
result += 1
i *= 10
continue
# i > n
r = i % number
#print('r',r)
if r not in s:
result += 1
s.add(r)
else:
break
i = r * 10
return result
|
8d49c5d4511c33e98b596c23d669192a8f1be91d
| 15,957
|
from datetime import datetime
def _get_date(element):
""" This function extracts the date the image was taken from the image element
and converts it to a datetime format.
Args:
element: A dict cuntianing all the image attributes.
Returns:
Date the image was taken in the format of datatime.
"""
date_taken = element.get('datetaken')
datetime_date_taken = datetime.strptime(date_taken, '%Y-%m-%d %H:%M:%S')
return datetime_date_taken
|
b42f6b24ce3545571bf1025b5b984386fde20208
| 15,958
|
import os
def _global_env_var_is(key, value):
"""
Check if file exists on disk or not.
"""
if key not in os.environ:
return False
else:
return os.environ[key] == value
|
0c606b2e15b142cd00abe98022748479f10cff99
| 15,961
|
import os
def get_config_filename():
"""Returns the configuration filename"""
return os.path.expanduser("~/.irods/irods_environment.json")
|
d6b58898e7ec891e560e1d194e3f354df090fa92
| 15,962
|
import copy
def __kalman_backward_smooth__(xhat_fp, xhat_fm, P_fp, P_fm, A):
"""
:param xhat_fp:
:param xhat_fm:
:param P_fp:
:param P_fm:
:param A:
:return:
"""
N = xhat_fp.shape[1]
xhat_smooth = copy.copy(xhat_fp)
P_smooth = copy.copy(P_fp)
for t in range(N-2, -1, -1):
L = P_fp[t]*A.T*P_fm[t].I
xhat_smooth[:, t] = xhat_fp[:, t] + L*(xhat_smooth[:, t+1] - xhat_fm[:, t+1])
P_smooth[t] = P_fp[t] - L*(P_smooth[t+1] - P_fm[t+1])
return xhat_smooth, P_smooth
|
ec2ea4494f4711d41eaa6378406108d9423c14e7
| 15,963
|
def generate_chesksum(data: bytes) -> int:
"""
生成 ICMP 校验位
"""
n = len(data)
count = sum(data[i] + ((data[i + 1]) << 8) for i in range(0, n - n % 2, 2))
count += n % 2 and data[-1]
count = (count >> 16) + (count & 0xFFFF)
count += count >> 16
answer = ~count & 0xFFFF
return (answer >> 8) | (answer << 8 & 0xFF00)
|
bc1d576dca880aee51ffed7f75542b30f1bb7834
| 15,964
|
def _transform_dataframe_to_dict(raw_fact, gbkey):
"""[summary]
Parameters
----------
raw_fact : [type]
[description]
gbkey : [type]
[description]
Returns
-------
[type]
[description]
"""
data_param_list = list()
if gbkey is None:
data_param_list.append({'ts_id':1, 'df':raw_fact})
else:
for k,v in raw_fact.groupby(gbkey):
data_param_list.append({'ts_id':k, 'df':v})
return data_param_list
|
0e67b06284f1e8a59f40dbfa181c02e52ef87bc1
| 15,965
|
def _get_adjust_options(options, version, setuptools_url, setuptools_version):
"""Return a string containing the definition of the adjust_options function
that will be included in the generated virtualenv bootstrapping script.
"""
anaconda_error = None
if options.dev:
code = """
for arg in args:
if not arg.startswith('-'):
print 'ERROR: no args allowed that start without a dash (-)'
sys.exit(-1)
args.append(join(os.path.dirname(__file__), 'devenv')) # force the virtualenv to be in <top>/devenv
"""
anaconda_error = "if sys.platform == 'win32':\n print 'ERROR: OpenMDAO go scripts cannot be used with Anaconda distributions.\\nUse the command below to install the dev version of OpenMDAO:\\n\\n\\tcmd /c conda-openmdao-dev.bat\\n'\n\n else:\n print 'ERROR: OpenMDAO go scripts cannot be used with Anaconda distributions.\\nUse the command below to install the dev version of OpenMDAO:\\n\\n\\tbash conda-openmdao-dev.sh\\n'\n"
else:
code = """
# name of virtualenv defaults to openmdao-<version>
if len(args) == 0:
args.append('openmdao-%%s' %% '%s')
""" % version
anaconda_error = "print 'ERROR: OpenMDAO go scripts cannot be used with Anaconda distributions.\\nUse the command below to install the latest version of OpenMDAO:\\n\\n\\tconda create --name <environment name> openmdao'"
adjuster = """
def adjust_options(options, args):
version = sys.version
if "Analytics" in version or "Anaconda" in version:
%s
sys.exit(-1)
major_version = sys.version_info[:2]
if major_version != (2,7):
print 'ERROR: python major version must be 2.7, yours is %%s' %% str(major_version)
sys.exit(-1)
%s
# Check if we're running in an activated environment.
virtual_env = os.environ.get('VIRTUAL_ENV')
if options.relocatable:
import distutils.util
import zipfile
if not virtual_env:
print 'ERROR: --relocatable requires an activated environment'
sys.exit(-1)
# Make current environment relocatable.
make_environment_relocatable(virtual_env)
# Copy files to archive.
base = os.path.basename(virtual_env)
zipname = '%%s-%%s.zip' %% (base, distutils.util.get_platform())
print 'Packing the relocatable environment into', zipname
count = 0
with zipfile.ZipFile(zipname, 'w', zipfile.ZIP_DEFLATED) as zipped:
for dirpath, dirname, filenames in os.walk(virtual_env):
arcpath = os.path.join(base, dirpath[len(virtual_env)+1:])
for filename in filenames:
count += 1
if (count %% 100) == 0:
sys.stdout.write('.')
sys.stdout.flush()
zipped.write(os.path.join(dirpath, filename),
os.path.join(arcpath, filename))
zipped.writestr(os.path.join(base, 'script-fixer.py'),
_SCRIPT_FIXER)
print "\\nRemember to run 'python script-fixer.py' after unpacking."
sys.exit(0)
if virtual_env:
# Install in current environment.
after_install(options, virtual_env, activated=True)
try:
if not is_win:
download('%s')
import ez_setup
ez_setup.use_setuptools(version='%s', download_base='https://openmdao.org/dists')
os.remove('ez_setup.py')
except Exception as err:
logger.warn(str(err))
""" % (anaconda_error, code, setuptools_url, setuptools_version)
fixer = '''
_SCRIPT_FIXER = """\\
def main():
# Move to script directory of the unzipped environment.
root = os.path.dirname(os.path.abspath(__file__))
scripts = 'Scripts' if sys.platform == 'win32' else 'bin'
scripts = os.path.join(root, scripts)
os.chdir(scripts)
tmpname = 'script-to-fix'
# Fix activate scripts.
for filename in sorted(glob.glob('activate*')):
if filename == 'activate': # Bourne/bash.
pattern = 'VIRTUAL_ENV="'
elif filename == 'activate.csh': # C shell.
pattern = 'setenv VIRTUAL_ENV "'
elif filename == 'activate.fish': # ?
pattern = 'set -gx VIRTUAL_ENV "'
elif filename == 'activate.bat': # Windows.
pattern = 'set "VIRTUAL_ENV='
else:
continue
print 'Fixing', filename
if os.path.exists(tmpname):
os.remove(tmpname)
os.rename(filename, tmpname)
with open(tmpname, 'rU') as inp:
with open(filename, 'w') as out:
for line in inp:
if line.startswith(pattern):
line = '%s%s"\\\\n' % (pattern, root)
out.write(line)
os.remove(tmpname)
# Fix Windows 'shadow' scripts.
if sys.platform == 'win32':
replacement = '#!%s\\\\\\\\python.exe\\\\n' % scripts
for filename in sorted(glob.glob('*-script.py')):
print 'Fixing', filename
if os.path.exists(tmpname):
os.remove(tmpname)
os.rename(filename, tmpname)
with open(tmpname, 'rU') as inp:
with open(filename, 'w') as out:
for line in inp:
if line.startswith('#!'):
line = replacement
out.write(line)
os.remove(tmpname)
if __name__ == '__main__':
main()
"""'''
return adjuster + fixer
|
02f471756b3ed347ef9352ec3b3aae952318ceb3
| 15,966
|
def find_motif(motif, visit, chain, nsteps, current, previous, motifset, allover, natom, bond, atom, eqv):
"""
This recursive function finds a specific motif in the structure.
FIXIT - the comments here
nsteps: the number of steps made
current: atom selected for testing, this is a candidate to be the steps'th element of motif
previous: atom before current
motif: a given order of given atom types connected to each other covalently
visit: 1 if an atom was visited
chain: atoms in order of search, when retracting, elements are overwritten.
motifset: 2D array of the found motives
allover < 0: all-over path-finding mode
allover = > 0: only atom 'allover' is used for the search
eqv: array for equvivalent atoms
"""
if nsteps == -1:
nsteps = 0
previous = -1
if allover < 0:
for i in range(natom):
current = i
visit = [0 for i in range(natom)]
chain = [-999 for i in range(natom)]
find_motif (motif, visit, chain, nsteps, current, previous, motifset, allover, natom, bond, atom, eqv)
else:
current = allover
visit = [0 for i in range(natom)]
find_motif (motif, visit, chain, nsteps, current, previous, motifset, allover, natom, bond, atom, eqv)
if nsteps > -1:
if nsteps > natom: return 0
# check if one of the motifs already had an equivalent atom in the same position (that is not the same atom)
eqv_list = []
for list in eqv:
if current in list:
eqv_list = list[:]
eqv_list.remove(current)
for m in motifset:
if m[nsteps] in eqv_list:
return 0
if visit[current] == 1: return 0
if nsteps == len(motif): return 0
if current in chain[:nsteps-1]: return 0
if motif[nsteps] != 'X' and atom[current] != motif[nsteps]: return 0
if bond[current][previous] == 0 and previous > -1: return 0
if nsteps == len(motif) - 1:
chain[nsteps] = current
if -999 in chain:
motifset.append(chain[:chain.index(-999)])
else:
motifset.append(chain[:])
return 0
visit[current] = 1
chain[nsteps] = current
previous = current
nsteps += 1
for i in range(natom):
current = i;
find_motif (motif, visit, chain, nsteps, current, previous, motifset, allover, natom, bond, atom, eqv)
visit[current] = 0
if nsteps > 0:
nsteps = nsteps - 1
previous = chain[nsteps - 1]
else: previous = -1
#visit[current] = 0
return 0
|
a8ac8d425a856fc97642ecac663dfc54d94234d4
| 15,967
|
def make_predictions(data, model, weights):
"""Predict the labels of all points in a data set for a given model.
Args:
data (array[float]): Input data. A list with shape N x 2
representing points on a 2D plane.
model (qml.QNode): A QNode whose output expectation value will be
used to make predictions of the labels of data.
weights (array[float]): The trainable model parameters for the QNode.
Returns:
array[int]: The array of predictions for each data point made by
the model QNode.
"""
preds = []
for idx in range(len(data)):
estimated_expval = model(data[idx], weights)
if estimated_expval > 0:
preds.append(1)
else:
preds.append(-1)
return preds
|
4ac2ba85a12d56f0128ba518e8a7d030c0eb5734
| 15,968
|
def finds(itemlist, vec):
"""return the index of the first occurence of item in vec"""
idlist = []
for x in itemlist:
ix = -1
for i in range(len(vec)):
if x == vec[i]:
idlist.append(i)
ix = i
if ix == -1:
idlist.append(-1)
if not idlist:
return -1
else:
return idlist
|
a59fb3512510e8e0996779e429a588af5c82546c
| 15,969
|
def get_major_minor(stat_inst):
"""get major/minor from a stat instance
:return: major,minor tuple of ints
"""
return ( stat_inst.st_rdev >> 8 ) & 0xff, stat_inst.st_rdev & 0xff
|
ec623deb66d1e95f5ec9744ffbefc03c52ebf6a9
| 15,970
|
def _resources_json_version_required() -> str:
"""
Specifies the version of resources.json to obtain.
"""
return "develop"
|
7f9eaff50b3a03ec50501e7ae125f4daab462325
| 15,971
|
def _can_show(view, location=-1):
"""
Check if popup can be shown.
I have seen Sublime can sometimes crash if trying
to do a popup off screen. Normally it should just not show,
but sometimes it can crash. We will check if popup
can/should be attempted.
"""
can_show = True
sel = view.sel()
if location >= 0:
region = view.visible_region()
if region.begin() > location or region.end() < location:
can_show = False
elif len(sel) >= 1:
region = view.visible_region()
if region.begin() > sel[0].b or region.end() < sel[0].b:
can_show = False
else:
can_show = False
return can_show
|
adef07025a4732b7f18898f63884b4fec1d8f7e0
| 15,973
|
def driver_cookies_list_2_str(cookies_list:list) -> str:
"""
driver的cookies list 转 str
:param cookies_list: eg: [{"domain":".jianshu.com", "expirationDate":1552467568.95627, ..., "name":"_m7e_session_core", ..., "value":"cc5871cc6fd05e742b83fbf476676450",}, ...]
:return:
"""
res = ''
for item in cookies_list:
name = item.get('name', '')
value = item.get('value', '')
if name != '':
res += '{}={};'.format(name, value)
return res
|
d3498666af2a0a01308afb2975459f893c91bb1b
| 15,974
|
import hashlib
def get_metadata_hash_for_attachments(attachments):
"""
Calculate a metadata hash from a collection of attachments.
The hash will change if any of the attachments changes.
"""
hashes = [attachment.metadata_hash for attachment in attachments]
# Sort the hashes to make the hash deterministic regardless of order
hashes.sort()
data = b"".join([hash_.encode("utf-8") for hash_ in hashes])
return hashlib.sha256(data).hexdigest()
|
fb56306c611a1aa1d87e897650142375a69f26e3
| 15,976
|
import torch
def rmspe(pred, true):
"""Computes RMSPE"""
return torch.mean(((true - pred) / true)**2)**0.5
|
2a83c9c10fb0547b4d90c805d94db871eb1b9e11
| 15,977
|
import os
import shlex
import subprocess
def pkgconfig(package, variable):
"""pkg-config"""
pkgconfig_env = os.environ.get("PKG_CONFIG", "pkg-config")
cmd = f"{pkgconfig_env} --variable={variable} {package}"
cmd = shlex.split(cmd)
return subprocess.check_output(cmd).decode().strip()
|
157108473c030a37caf2845e696f3585d1b8dd66
| 15,978
|
def _as_phi_args(
kappa=None,
tau=None,
# _default={},
**kwargs):
"""
utility function to convert model arguments to kernel arguments
"""
kwargs = dict(**kwargs)
# kwargs.setdefault(**_default)
if kappa is not None:
kwargs['kappa'] = kappa
if tau is not None:
kwargs['tau'] = tau
return kwargs
|
70e8b648bf0f18fe9bf8e670b99b0fdd4dbdc79b
| 15,979
|
import unicodedata
def soundex(s):
"""
https://stackoverflow.com/a/67197882/2771733
"""
if not s:
return ""
s = unicodedata.normalize("NFKD", s)
s = s.upper()
replacements = (
("BFPV", "1"),
("CGJKQSXZ", "2"),
("DT", "3"),
("L", "4"),
("MN", "5"),
("R", "6"),
)
result = [s[0]]
count = 1
# find would-be replacment for first character
for lset, sub in replacements:
if s[0] in lset:
last = sub
break
else:
last = None
for letter in s[1:]:
for lset, sub in replacements:
if letter in lset:
if sub != last:
result.append(sub)
count += 1
last = sub
break
else:
if letter != "H" and letter != "W":
last = None
if count == 4:
break
result += "0" * (4 - count)
return "".join(result)
|
94ed0c26a441cbbe56beaf8e042d346f2b195509
| 15,980
|
def load_requirements():
"""
Loads requirements.txt
:param:
:return: list of requirements
"""
with open('requirements.txt') as f:
return [req for req in f.read().split() if req]
|
2e4dc452ef9f89e546e0ce49a637090056b88ced
| 15,983
|
def media_anual(temperaturas):
"""Receba uma lista com as temperaturas médias de cada mês
e devolva uma lista com os números correspondentes aos meses
que possuem temperatura superior á média anual."""
media = sum(temperaturas) / len(temperaturas)
meses_acima_da_media = []
for mes, temperatura in enumerate(temperaturas):
if temperatura > media:
meses_acima_da_media.append(mes)
return meses_acima_da_media
|
10c6da72824f8021b451ed710b6a915240a5082e
| 15,984
|
def get_xr_resolution(ds):
"""
Read dataset and get pixel resolution from attributes. If
attributes don't exist, fall back to rough approach of minus
one pixel to another.
Parameters
----------
ds: xarray dataset, dataarray
A single xarray dataset with variables and x and y dims.
Returns
----------
res : float
A float containing the cell resolution of xarray dataset.
"""
# check if x and y dims exist
if 'x' not in list(ds.dims) and 'y' not in list(ds.dims):
raise ValueError('No x, y dimensions in dataset.')
# try getting max res option 1
try:
res = abs(max(ds.res))
except:
res = None
# try getting max res option 2
try:
if not res:
res = max(ds.geobox.resolution)
except:
res = None
# try getting max res the dirty way
try:
if not res:
x_res = abs(float(ds['x'].isel(x=0))) - abs(float(ds['x'].isel(x=1)))
y_res = abs(float(ds['y'].isel(y=0))) - abs(float(ds['y'].isel(y=1)))
res = abs(float(max(x_res, y_res)))
except:
res = None
# check if something exists
if not res:
raise ValueError('Could not extract cell resolution from dataset.')
# return
return res
|
3d87ff33190078753a496fd5f14854fa98eb1017
| 15,985
|
from datetime import datetime
def extract_date():
"""
Extract the date when the query is made in the format YYYYMMDD.
"""
return datetime.today().strftime('%Y%m%d')
|
099df827a72b2fa002159f30a6f345918ad897f5
| 15,986
|
import sys
def isLinux():
"""
isLinux
"""
return sys.platform.startswith("linux")
|
62e9b728864608a1e32e5d241b28fc208fdcfa8a
| 15,988
|
from typing import List
from typing import Dict
def match_relationships(relationships: List):
"""Creates a dict that connects object_id to all objects_ids it has a relationship with.
Args:
relationships (List): A list of relationship objects.
Returns:
Dict. Connects object_id to all objects_ids it has a relationship with. In the form of `id: [related_ids]`
"""
matches: Dict[str, set] = {}
for relationship in relationships:
source = relationship.get('source_ref')
target = relationship.get('target_ref')
if not source or not target:
continue
if source in matches:
matches[source].add(target)
else:
matches[source] = {target}
if target in matches:
matches[target].add(source)
else:
matches[target] = {source}
return matches
|
870db3b324f340a7f632251ebe22bfae6e693076
| 15,989
|
def makeChromTiles(db):
"""
Make a region for each chromosome
"""
out = []
for (k, v) in db.chromsTuple:
out.append([k, 0, v])
return out
|
ca887035f05047bf7172c4e120fc7623a0fcb3e5
| 15,990
|
def _construct_GDS_url(accession):
"""Example URL:
ftp://ftp.ncbi.nlm.nih.gov/geo/datasets/GDS4nnn/GDS4999/soft/GDS4999.soft.gz
"""
number_digits = len(accession) - 3 # 'GDS' is of length 3.
if number_digits > 3:
folder = accession[:4] + "nnn"
else:
folder = accession[:3] + "n" * number_digits
url = '/'.join(["ftp://ftp.ncbi.nlm.nih.gov/geo/datasets",
folder,
accession,
"soft",
accession + ".soft.gz"])
return url
|
0b6968a1d47beb3be1e521f4b84ff2174de8baf3
| 15,991
|
import re
def getMovieName(downloadLink):
"""输入ftp链接,使用正则表达式分析文本,返回电影名字"""
name_patten = re.compile(r'](?!/)\.?(.+?)\.[a-z]{3,4}$') # 结果是xxx.rmvb 或xxx.mkv
name_match = re.findall(name_patten, downloadLink)
if bool(name_match):
return name_match[0].split('.')[0]
else:
return 'None'
|
fee6e95bc0ada0ea983e78db62e829789e4cc248
| 15,993
|
def data2mesh(data):
"""
Extracts from a given torch_geometric Data object the mesh elements
Parameters
----------
data : Data
a torch_geometric Data object
Returns
-------
(Tensor,LongTensor,Tensor)
the points set, the topology and the vertex normals tensor
"""
return data.pos, data.face, data.norm
|
781489d95db76d106c910efda9bbc5f348e9d7ed
| 15,994
|
def uglify(text: str):
"""
Написать фильтр который меняет четные и нечетные символы на upper(), lower()
"""
new = ''
for index in range(len(text)):
if index % 2 == 0:
new += text[index].upper()
else:
new += text[index].lower()
return new
|
61228973664061adf8da616404f74a8e311b4e26
| 15,997
|
def prime_generator(maxi):
"""
Generate all the prime numbers below maxi. maxi is not included.
The method uses Aristotle's sieve algorithm.
>>> prime_generator(10)
[2, 3, 5, 7]
"""
li = []
for _ in range(maxi):
li.append(1)
li[0] = li[1] = 0
for pos, val in enumerate(li):
if val:
for index in range(pos+pos, maxi, pos):
li[index] = 0
primes = []
for pos, val in enumerate(li):
if val:
primes.append(pos)
return primes
|
f2829ed995f0f289b22960fad706cf3ec0371446
| 15,998
|
def filter_content(contents, rules, mode=any):
"""
Filter contents by given rule.
Args:
contents `list`
List of illust, the content may vary from source to source.
Besure you know the data hierachy of object.
rules `list`
A list of function takes one content and returns boolean value,
indicating the content is selected.
mode `any` or `all`
Choose wether satisfy all rules or any of them.
Returns:
list of filtered contents.
Raises:
None
"""
if not (mode in (any, all)):
raise ValueError("Accept only one of 'any' or 'all'.")
res = []
for i in contents:
if mode(r(i) for r in rules):
res.append(i)
return res
|
a875ee3d8523a29043b576c9d19ef8a09589ed91
| 15,999
|
def get_dict_key_by_value(source_dict: dict, dict_value):
"""Return the first key of the ``source_dict`` that has the ``dict_value`` as value."""
for k, v in source_dict.items():
if v == dict_value:
return k
return
|
0ae198c7d07fe57898779f0b75b75bdb590f5a3d
| 16,000
|
from pathlib import Path
def get_cases(f, sep="---"):
"""
Extracts inputs and outputs for each test/verification case within f, where f is a folder.
Params
======
f: str
The folder containing the cases to be extracted.
sep: str
The substring separating comments from the input from the output in each case file.
Returns
=======
cases: []
Array of dictionaries containing each case
Each case is a dictionary with the following fields:
- "filename": The name of the file
- "comments": Any comments in the folder
- "inputs": The inputs
- "outputs": The expected outputs
Raises
======
AssertionError:
If the given path is not a folder.
"""
# Initialise path
p = Path(f)
# Assert that target folder is a folder
assert p.is_dir()
# List of cases in the folder
cases = []
# Loop through all cases within the folder
for f in p.iterdir():
# Open each case file
with open(f) as fr:
# Obtain the contents of the case file
contents = fr.read()
# The case files are structured such that it has COMMENTS, followed by the separator substring, followed by the INPUTS, followed by the separator substring, and finally followed by the OUTPUTS
# Instantiate case dictionary
c = {}
# Separate the contents by the separator, and then clean each individual element of newline/whitespace
contents = contents.split(sep)
contents = [c.strip() for c in contents]
# Populate dictionary
c["filename"] = f.with_suffix("").name
c["inputs"] = contents[0]
c["outputs"] = contents[1]
if len(contents) == 3:
c["comments"] = contents[2]
# Add dictionary to list of cases
cases.append(c)
# After all cases have been looped through, return cases
return cases
|
bedcc7cedd791505dbed886a539df92aa0fa3f87
| 16,001
|
import requests
def create_empty_zenodo_upload(access_token):
"""
Create an empty upload using Zenodo API.
:param access_token: Zenodo access token.
:return: requests.models.Response from Zenodo API
"""
headers = {"Content-Type": "application/json"}
r = requests.post('https://zenodo.org/api/deposit/depositions',
params={'access_token': access_token}, json={},
headers=headers)
return r
|
14a26a07a08b2dab1ccf55ddea8c3d4cb3d9a2d6
| 16,002
|
def slurp(name):
"""
Read the file
:param name: read the named file
:return: the content
"""
with open(name, "r") as f:
return f.read()
|
2edb241b5cbb0c9298dbdc9dd5f1f89786fff036
| 16,004
|
def vec_is_void(a):
"""
Check whether a given vector is empty
A vector is considered "void" if it is None or has no
elements.
Parameters
----------
a: list[]
The vector to be checked
Returns
-------
bool
True if the vector is empty, False otherwise
"""
return a is None or len(a) == 0
|
9a9b6f78ec2ddb81990fe54a5b429413c0472742
| 16,006
|
def MergeTwoListsAsDic(keys, values):
"""
"""
dic = {}
for i in range(len(keys)):
dic[keys[i]] = values[i]
return dic
|
85e7fa6fcf93c51bcdee93ff3c355f8d369fa043
| 16,007
|
import sys
def __library_name() -> str:
"""Get lattice_symmetries C library file name with correct extension."""
if sys.platform == "linux":
extension = ".so"
elif sys.platform == "darwin":
extension = ".dylib"
else:
raise ImportError("Unsupported platform: {}".format(sys.platform))
return "liblattice_symmetries{}".format(extension)
|
651ece84a37221c67d6a511bb455991ebd0e88e5
| 16,008
|
import gzip
def read_gzipped_file(filepath):
"""
Opens an underlying process to access a gzip file through the creation of a new pipe to the child.
:param str filepath: path to gzip file.
:return: A bytes sequence that specifies the standard output.
"""
handle = gzip.open(filepath, "rt")
return handle
|
4f627f8ee5be5e77e427158216be8d254586a72b
| 16,009
|
def chargeandprop(aa_seq):
""" Calculates protein net charge and charged AA proportion
"""
protseq = aa_seq.upper()
charge = -0.002
cp = 0
aa_charge = {'C':-.045,'D':-.999,'E':-.998,'H':.091,
'K':1,'R':1,'Y':-.001}
for aa in protseq:
charge += aa_charge.get(aa, 0)
if aa in aa_charge:
cp += 1
prop = float(cp)/len(aa_seq)*100
return (charge, prop)
|
1d967642235188090875cb35e21a9367cca9343e
| 16,010
|
import os
import yaml
def save_dict_as_yaml(dictionary: dict, path: str) -> str:
"""Save a cfg dict to path as yaml
Parameters
----------
dictionary
Dictionary to be saved
path
Filesystem location where the yaml file will be saved
Returns
-------
path
Location of the yaml file
"""
dir_name = os.path.dirname(path)
# Prevent current workdir relative routes
# `save_dict_as_yaml("just_here.yml")
if dir_name:
os.makedirs(dir_name, exist_ok=True)
with open(path, "w") as yml_file:
yaml.dump(dictionary, yml_file, default_flow_style=False, allow_unicode=True)
return path
|
6c7fc19327e993ca4597a46e0a92efdd93f38bd5
| 16,011
|
import calendar
def _sort_order(count_items):
"""Key for sorting day counts in days of the week order."""
return list(calendar.day_name).index(count_items[0])
|
9fa5a3f37ee034a99c2c6ed428655261661210aa
| 16,012
|
def load_db_class(dbtype):
"""
read subcommand from subcmds directory
:return: subcommands list
"""
pkgname = 'datafaker.dbs.' + dbtype + 'db'
classname = dbtype.capitalize() + 'DB'
module = __import__(pkgname, fromlist=(classname))
db_class = getattr(module, classname)
return db_class
|
3dbbfeab6505cf2aa72929b16f65a6f9fa8751f5
| 16,013
|
def make_alternating_color_pattern(ctr, rgblst):
"""
Return a pattern of alternating colors from rgblst.
"""
n = len(rgblst)
return ctr.make_func_pattern(lambda i: rgblst[i % n])
|
3ff2396c390c813169e3399dd04d353c98a9f9f8
| 16,014
|
def download_input(storage_provider, parsed_event, input_dir_path):
"""Receives the event where the file information is and
the tmp_dir_path where to store the downloaded file.
Returns the file path where the file is downloaded."""
return storage_provider.download_file(parsed_event, input_dir_path)
|
887ca61a40a658d172b4b77833132b73933a14ce
| 16,015
|
def get_bit(val: int, bitNo: int) -> int:
"""
Get bit from int
"""
return (val >> bitNo) & 1
|
19d49512387da66e5889fc1bacc014be240be4a9
| 16,016
|
def GetChildNodesWhereTrue(node_id, tree, stop_function):
"""walk down in tree and stop where stop_function is true
The walk finishes at the leaves.
returns a list of tuples of nodes and distance.
"""
result = []
def __getChildNodes(node_id, distance):
node = tree.node(node_id)
distance += node.data.branchlength
if not node.succ:
result.append((node_id, distance))
elif stop_function(node_id):
result.append((node_id, distance))
else:
for x in node.succ:
__getChildNodes(x, distance)
node = tree.node(node_id)
__getChildNodes(node_id, -node.data.branchlength)
return result
|
9837a8ac294b8202a6e17deecd213d25599f575b
| 16,017
|
def truncate(vec, max_length, truncate_tail=True):
"""truncate vec to make its length no more than max length.
Args:
vec (list): source list.
max_length (int)
truncate_tail (bool, optional): Defaults to True.
Returns:
list: truncated vec.
"""
if max_length is None:
return vec
if len(vec) <= max_length:
return vec
if truncate_tail:
return vec[:max_length]
else:
return vec[-max_length:]
|
31921b3c15c8b77b74d65965b997c374bd1cafb2
| 16,019
|
def get_full_version(package_data):
"""
Given a mapping of package_data that contains a version and may an epoch and
release, return a complete version.
For example::
>>> get_full_version(dict(version='1.2.3'))
'1.2.3'
>>> get_full_version(dict(version='1.2.3', epoch='2'))
'2~1.2.3'
>>> get_full_version(dict(version='1.2.3', epoch='2', release='23'))
'2~1.2.3-23'
"""
version = package_data['version']
release = package_data.get('release', '')
if release:
release = f'-{release}'
epoch = package_data.get('epoch', '')
if epoch:
epoch = f'{epoch}~'
version = f'{epoch}{version}{release}'
return version
|
3a8cc3731da2ef3f99e3e9f203e084c9478f48c8
| 16,020
|
def knight_tour(n, path, u, limit):
"""
Conduct a knight's tour using DFS.
Args:
n: current depth of the search tree.
path: a list of vertices visited up to this point.
u: the vertex we wish to explore.
limit: the number of nodes in the path.
Returns:
done (bool)
"""
visited = set(u)
path.append(u)
if n < limit:
nbrList = list(u.get_connections())
i = 0
done = False
while i < len(nbrList) and not done:
if nbrList[i] in visited:
done = knight_tour(n + 1, path, nbrList[i], limit)
i = i + 1
if not done: # prepare to backtrack
path.pop()
visited.remove(u)
else:
done = True
return done
|
f11a2da4e740183a85dbd2f281c65dda77237dad
| 16,021
|
def read_tags_and_datablocks(text):
""" read a file consisting of blocks of numbers which are
separated by tag lines. separate tag lines from data lines
return two lists
e.g. for pp.data file:
Atomic number and pseudo-charge
14 4.00
Energy units (rydberg/hartree/ev):
rydberg
Angular momentum of local component (0=s,1=p,2=d..)
2
NLRULE override (1) VMC/DMC (2) config gen (0 ==> input/default value)
0 0
Number of grid points
1603
R(i) in atomic units
0.000000000000000E+00
0.719068853804059E-09
0.144778949458300E-08
will be parsed into:
['Atomic number and pseudo-charge', ...
['14 4.00', ...
Args:
text (str): file content
Return:
tuple: (tags, blocks), both are a list of strings
"""
lines = text.split('\n')
tags = []
blocks = []
block = ''
for line in lines:
try:
map(float, line.split())
block += line
except:
tags.append(line)
blocks.append(block)
block = ''
blocks.append(block)
return tags, blocks[1:]
|
372512010198aa401552302d45f0a0745477bec1
| 16,022
|
def convert_sensor_type(v):
"""
converts the sensor type value into something
more meaningful.
"""
if v is 0:
return "None"
elif v is 1:
return "RealPower"
elif v is 2:
return "ApparentPower"
elif v is 3:
return "Voltage"
elif v is 4:
return "Current"
else:
return "Unknown"
|
7f5bd77db7a240d21728b526daae7729b0871143
| 16,023
|
def annotate_route_announce(announce, ro_rad_tree):
"""
Add a list of IRR containing valid route objects for this `announce' in "valid".
:param announce: dictionary to annotate containing "asn" and "prefix" fields
:param ro_rad_tree: radix tree containing route objects, AS nb in data["asn"]
:return: `announce'
"""
prefix = announce["prefix"]
asn = announce["asn"]
ro_declared = ro_rad_tree.search_covering(prefix)
valid = set(announce.get("valid", set()))
for node in ro_declared:
bases = node.data.get(asn, None)
if bases is not None:
valid.update(bases)
if len(valid) > 0:
announce["valid"] = list(valid)
return announce
|
79ebdc487433a6b94ab098bb457263953a449a88
| 16,024
|
import argparse
def get_args():
"""! Command line parser """
parser = argparse.ArgumentParser(
description='Audio Reader, reading and manipulating Audio')
parser.add_argument("--input_path", type=str,
help="""Path for an audio file to be parsed""",
default='./sample_audio/stereo/bsi_test_audio.avi')
parser.add_argument("--suffix", type=str,
help="""The suffix of your audio file. If no suffix is provided
then it would be utomatically infered by the remaining right
part of a character '.' split.""",
default=None, choices=['wav', 'mp3', 'mp4', 'ogg', 'avi', 'flac'])
args = parser.parse_args()
return args
|
fc6372e812a1af8ee1cfae8c3eb5e911da2fcf9c
| 16,025
|
def create_y_range(motile_count,
non_motile_count,
auto_motile_count,
auto_non_motile_count):
"""
Generate the y range on the motility bar
:param motile_count: the amount of motile life at this frame
:param non_motile_count: the amount of non motile life at this frame
:param auto_count: the amount of hand labeled tracks at this frame
:return: the amount of life at this frame
"""
y = []
if motile_count is not None:
y += motile_count
if non_motile_count is not None:
y += non_motile_count
if auto_motile_count is not None:
y += auto_motile_count
if auto_non_motile_count is not None:
y += auto_non_motile_count
_min = None
_max = None
if y:
_min = min(y)
_max = max(y) + 1
return [_min, _max]
|
037c3020e82bda853410e15e7c0b0f9b8d0d9be1
| 16,026
|
def area_trapezio(a, b, c):
"""a área do trapézio que tem A e B por bases e C por altura."""
area_trape = (a + b)/2 * c
return print(f'TRAPEZIO: {area_trape:.3f}')
|
64fadfa67e2ef64722a016b4171e318a68765c55
| 16,029
|
def signed_up_user():
"""
pytest fixture can be a good way to share data across
your test suites using dependency injection
see link here: https://docs.pytest.org/en/latest/fixture.html#fixture
but you can decide to use pytest-datadir or pytest-datafiles
:return: a dictionary as signed user
"""
return dict(name='aaron', email='abiliyok@gmail.com')
|
44964dc8a9c784e3a2fcce77a0d459c3ccbd02bc
| 16,030
|
def karatsuba_multiply(a, b, precision=50, radix=10):
"""
Karatsuba multiplication for integers
Generalized for integers 0 <= a, b <= radix ** precision
Complexity: O(n ^ log_2(3)) where n is the precision of
multiplication desired.
"""
precision_on_2 = precision >> 1
a_1 = a // (radix ** precision_on_2)
a_0 = a - (a_1 * (radix ** precision_on_2))
b_1 = b // (radix ** precision_on_2)
b_0 = b - (a_1 * (radix ** precision_on_2))
if a_0 == a or b_0 == b:
return a * b
c_2 = karatsuba_multiply(a_1, b_1, precision_on_2, radix)
c_1 = karatsuba_multiply(a_1 + a_0, a_0 + b_1, precision_on_2, radix)
c_0 = karatsuba_multiply(a_0, b_0, precision_on_2, radix)
return (c_2 * (radix * precision)) \
+ ((c_1 - c_2 - c_0) * (radix ** precision_on_2)) \
+ c_0
|
58fdf93d4fad4d31bc60c679dad69f91a63e17a0
| 16,031
|
import subprocess
import re
def get_ck_frames(kernel):
"""
Get all of the reference frames defined in a kernel.
Parameters
----------
kernel : str
The path to the kernel
Returns
-------
ids : list
The set of reference frames IDs defined in the kernel
"""
ckbrief = subprocess.run(["ckbrief", "-t {}".format(kernel)],
capture_output=True,
check=True,
text=True)
ids = set()
for id in re.findall(r'^(-?[0-9]+)', ckbrief.stdout, flags=re.MULTILINE):
ids.add(int(id))
# Sort the output list for testability
return sorted(list(ids))
|
f50b3ac3534767e8071f07c4fffc26057eb1f8db
| 16,033
|
import bz2
def bunzip2(fileobj):
""" bunzip2 the file object. """
return bz2.decompress(fileobj)
|
7da3f9b64cd0f6765860678b18be661fd42b813e
| 16,034
|
def observatory_from_string(string):
"""If "jwst" or "hst" is in `string`, return it, otherwise return None."""
if "jwst" in string:
return "jwst"
elif "hst" in string:
return "hst"
else:
return None
|
217cc3cf3c5b802799c0db73563f6d11b7ab4c4d
| 16,035
|
def get_type(data):
"""
@param data: rosdoc manifest data
@return 'stack' of 'package'
"""
return data.get('package_type', 'package')
|
ee3f881ff2479dfec0ce17a8c19161407e246a1c
| 16,036
|
def get_letters_pep(peptides_lst):
"""
Get letters list and letter-index dictionaries
"""
word_to_ix_ = dict((i, j) for j, i in enumerate(['<PAD>']+list(set(x for l in peptides_lst for x in l))))
ix_to_word_ = dict((j, i) for j, i in enumerate(['<PAD>']+list(set(x for l in peptides_lst for x in l))))
return word_to_ix_, ix_to_word_
|
5c341a9cdd99a874a34c1ed967a5e95ad1c7ed6f
| 16,037
|
import copy
def addAction(state, timeDifference, newAction) :
"""Adds an newAction to a state atfter timeDifference s after the current
state timestamp
"""
newState = copy.deepcopy(state)
# Sorts the nextActions array with the newAction added by their respective
# Timestamps; adds the newAction at its relevant position
newTs = newState['timeline']['timestamp'] + timeDifference
nas = newState['timeline']['nextActions']
newState['timeline']['nextActions'] = [ na for na in nas if na[0] <= newTs ] + [( newTs, newAction )] + [ na for na in nas if na[0] > newTs ]
return newState
|
beacec84a43efa59f5f51e488b9e92d8acf1ee97
| 16,038
|
def seq_info(names, id2names, insertions, sequences):
"""
get insertion information from header
"""
seqs = {} # seqs[id] = [gene, model, [[i-gene_pos, i-model_pos, i-length, iseq, [orfs], [introns]], ...]]
for name in names:
id = id2names[name]
gene = name.split('fromHMM::', 1)[0].rsplit(' ', 1)[1]
model = name.split('fromHMM::', 1)[1].split('=', 1)[1].split()[0]
i_gene_pos = insertions[id] # coordinates of each insertion wrt gene
i_model_pos = name.split('fromHMM::', 1)[1].split('model-pos(ins-len)=')[1].split()[0].split(';') # model overlap
i_info = []
for i, ins in enumerate(i_gene_pos):
model_pos = i_model_pos[i].split('-')[1].split('(')[0]
length = i_model_pos[i].split('(')[1].split(')')[0]
iheader = '>%s_%s insertion::seq=%s type=insertion strand=n/a gene-pos=%s-%s model-pos=%s'\
% (id, (i + 1), (i + 1), ins[0], ins[1], model_pos)
iseq = sequences[id][1][ins[0]:(ins[1] + 1)]
iseq = [iheader, iseq]
info = [ins, model_pos, length, iseq, [], []]
i_info.append(info)
seqs[id] = [gene, model, i_info]
return seqs
|
a3bfd66d77034bb96970cea7892be4694908047e
| 16,039
|
def _get_lat_lon(df):
"""Get latitude and longitude from geometries."""
col = df._geometry_column_name
df["latitude"] = [latlon.y for latlon in df[col]]
df["longitude"] = [latlon.x for latlon in df[col]]
return df
|
9ab4b1cb469ae88444de97ec0cb0cec008643c4a
| 16,040
|
def get_list_params_with_serialized_objs(page):
"""
Search for parameters that contain a java serialized object
:param page: The page source code for search in it
:return: List of parameters found
"""
page = str(page).replace("\\n", "\n")
list_params = []
for i in page.split('\n'):
tokens = i.strip().split(" ")
for ind in range(0, len(tokens)):
t = tokens[ind]
if 'value=\"H4sI' in t or 'value=\"rO0' in t:
if 'name=\"' in tokens[ind-1] or 'id=\"' in tokens[ind-1]:
param = tokens[ind-1].split("\"")[1]
if param not in list_params:
list_params.append(param)
return list_params
|
502d32a920a4844d78a9a4aabe81f1d018d3f4ef
| 16,041
|
def fix_fp(sequence, parallel):
"""
Fix footprints
Parameters
--------------
sequence
Sequence
parallel
Parallel
Returns
-------------
sequence
Sequence
parallel
Parallel
"""
sequence = sequence.difference(parallel)
return sequence, parallel
|
721021af7d9f4b07ee25861788cde878a31b6135
| 16,044
|
def apply_ratio(o_w, o_h, t_w, t_h):
"""Calculate width or height to keep aspect ratio.
o_w, o_h -- origin width and height
t_w, t_h -- target width or height, the dimension
to be calculated must be set to 0.
Returns: (w, h) -- the new dimensions
"""
new_w = t_h * o_w / o_h
new_h = t_w * o_h / o_w
return new_w+t_w, new_h+t_h
|
f3143e5a5ad8aeafbb913e73aab40e8e8990ddd6
| 16,045
|
def knapsack_rep(weights, values, W):
""" knapsack with repetition """
k = [0]*(W + 1)
for w in range(1, W+1):
k[w] = max([k[w-i] + values[i] if weights[i]<=w else 0 for i in range(len(weights))])
return k[-1]
|
5ec8a544d6869130970d1b7194c3d7ea77b59b4b
| 16,047
|
def maybe_append(usa_facts, jhu):
"""
Append dataframes if available, otherwise return USAFacts.
If both data frames are available, append them and return.
If only USAFacts is available, return it.
If USAFacts is not available, return None.
"""
if usa_facts is None:
return None
if jhu is None:
return usa_facts
return usa_facts.append(jhu)
|
4f0831a09ac36caaec6f825036e69d0f5b62b19f
| 16,050
|
def stepcalc(time, duration):
"""Calculates frequency and resulting windowlength
"""
tstep = (time[1] - time[0])
freq = 1. / tstep
arr_len = duration * freq
return int(arr_len)
|
5b42e281d10cb888d1dbdc8cd01422b8c6ec6979
| 16,051
|
import re
def get_platform(properties_file):
"""Finds and returns the platform version in the properties file.
Returns:
String form of the platform version if found, else "unknown".
"""
android_regex = re.compile(r'(android-\w+)')
vendor_regex = re.compile(r':(\d+)\s*$')
for line in properties_file:
match = android_regex.search(line)
if match is not None:
return match.group(1)
match = vendor_regex.search(line)
if match is not None:
return 'android-{}'.format(match.group(1))
return 'unknown'
|
a1f765f383973251adc21725a19fe96a3bcaa2bc
| 16,052
|
def evaluate(clauses, sol):
"""
evaluate the clauses with the solution
"""
sol_vars = {} # variable number -> bool
for i in sol:
sol_vars[abs(i)] = bool(i > 0)
return all(any(sol_vars[abs(i)] ^ bool(i < 0) for i in clause)
for clause in clauses)
|
be50aa2c8f04b6d1ac76a17aea86beedc7abff4c
| 16,053
|
import base64
import sys
def xor_decode(encoded_text):
"""
Decode xor encoded text
"""
#remove initial {xor} if it exists
if encoded_text[0:5].lower() == '{xor}':
encoded_text = encoded_text[5:]
#Convert to bytes, and then pass to base64.decodebytes
try:
decoded_bytes = base64.decodebytes(bytes(encoded_text, 'ascii'))
except:
sys.exit("Unable to decode the input string. Is it valid base64?")
## Now do the xor with the underscore on each byte
decoded_text_byte_array=bytearray([])
decimalOfUnderScore=ord('_')
for byte in decoded_bytes:
xored_byte = byte ^ decimalOfUnderScore
decoded_text_byte_array.append(xored_byte)
## convert the bytes to string using ascii
return decoded_text_byte_array.decode('ascii').rstrip()
|
95979e077bfbfcc43757164d89b5b9a824873214
| 16,054
|
import json
def _load_iam_data(path):
"""Builds a dictionary containing the information about all the
AWS IAM resources and the actions they relate to (for more information look
at the README.md in this directory). The keys of the dictionary are all the
possible IAM policy actions and the values are sets containing the
resources they allow access to. For instance:
{'ec2:allocateaddres':{'elastic-ip', 'ipv4pool-ec2'}}"""
data = None
with open(path, "r") as file:
data = json.load(file)
actions = {}
for service in data:
prefix = service["prefix"]
for privilege in service["privileges"]:
action = privilege["privilege"].lower()
action = f"{prefix}:{action}"
resources = set()
for resource_type in privilege["resource_types"]:
if "resource_type" not in resource_type:
continue
resource = resource_type["resource_type"].replace("*", "")
if resource == "":
continue
# The actions related to S3 can give access to objects, buckets
# or both (an object is a file in a bucket). Altimeter scans
# buckets, but not objects. So,for us, if an action give access
# to a object, it gives access to whole the bucket.
if prefix == "s3" and resource == "object":
resource = "bucket"
resources.add(resource)
actions[action] = resources
return actions
|
7b394285f088ade8042207fdccbb9e6dfec78314
| 16,058
|
def is_field(x):
"""
Return whether or not ``x`` is a field.
Alternatively, one can use ``x in Fields()``.
EXAMPLES::
sage: R = PolynomialRing(QQ, 'x')
sage: F = FractionField(R)
sage: is_field(F)
True
"""
return x.is_field()
|
87efa719721d72df5c751d734f2f26d6641190c1
| 16,059
|
def sentence_spans(text, sentence_detector):
""" Разбиваем на предложения и генерируем разметку - для красоты """
sentences = sentence_detector(text)
spans = []
sent_start= 0
idx = 1
for sent in sentences:
sent_end = sent_start + len(sent)
spans.append((sent_start,sent_end, 's{}'.format(idx)))
sent_start = 1+sent_end
idx += 1
return spans
|
174c8633bfeb03e67cdba4701b23324c736423ad
| 16,060
|
from typing import Optional
from typing import Dict
def create_exclusive_start_key(player_id: str, start_key: Optional[str]) -> Optional[Dict[str, str]]:
"""
Create the 'ExclusiveStartKey' parameter for the DynamoDB query, based on the user-provided 'start_key' parameter to
this Lambda function.
"""
if start_key:
return {
'player_id': player_id,
'slot_name': start_key,
}
else:
return None
|
7a03434e2d52908eb4f4d68483058183913ac9bb
| 16,061
|
def label_id_to_cluster_id(label_id, C, unused_labels):
"""Map the label id to the cluster id according to clustering matrix.
Args:
label_id: the label id.
C: the cluster matrix of shape L x C.
unused_labels: used to adjust the label id.
Returns:
the cluster id.
"""
# count how many unused labels that are smaller than label_id
offset = sum([l < label_id for l in unused_labels])
row_id = label_id - offset
assert C.indptr[row_id] + 1 == C.indptr[row_id + 1]
cluster_id = C.indices[C.indptr[row_id]]
return cluster_id
|
61593eb822dbaf88f101b2948c02de3fc07794d1
| 16,062
|
import tarfile
import os
def unpack_tar_to_rocket(tar_path: str, rocket_folder_name: str, folder_path: str, remove_after_unpack: bool = True):
"""Unpack a tar archive to a Rocket folder
Unpack a tar archive in a specific folder, rename it and then remove the tar file (or not if the user doesn't want to)
Args:
tar_path (str): path to the tar file containing the Rocket which should be unpacked
rocket_folder_name (str): folder name for the Rocket (to change the one from the tar file)
folder_path (str): folder where the Rocket should be moved once unpacked.
remove_after_unpack (bool, optional): choose to remove the tar file once the Rocket is unpacked. Defaults to True.
Returns:
rocket_folder_path(str): path to the Rocket folder once unpacked.
"""
with tarfile.open(tar_path, 'r') as t:
tar_folder_name = os.path.commonprefix(t.getnames())
t.extractall(folder_path) # unpack in the wrong folder
# Should rename the folder once it is unpacked
rocket_folder_path = os.path.join(folder_path, rocket_folder_name)
os.rename(os.path.join(folder_path, tar_folder_name), rocket_folder_path)
if remove_after_unpack:
os.remove(tar_path)
return rocket_folder_path
|
c06c29d6d1f4f56fefe99afaa9d8f1f9a5e8cf9c
| 16,063
|
def expand_locations_and_make_variables(ctx, attr, values, targets = []):
"""Expands the `$(location)` placeholders and Make variables in each of the given values.
Args:
ctx: The rule context.
values: A list of strings, which may contain `$(location)`
placeholders, and predefined Make variables.
targets: A list of additional targets (other than the calling rule's
`deps`) that should be searched for substitutable labels.
Returns:
A list of strings with any `$(location)` placeholders and Make
variables expanded.
"""
return_values = []
for value in values:
expanded_value = ctx.expand_location(
value,
targets = targets,
)
expanded_value = ctx.expand_make_variables(
attr,
expanded_value,
{},
)
return_values.append(expanded_value)
return return_values
|
cb426117582161c5f32034df2cc1db29ebe37205
| 16,065
|
def swaggerFilterByOperationId(pathTypes):
"""take pathTypes and return a dictionary with operationId as key
and PathType object as value
Keyword arguments:
pathTypes -- list of types that build the model, list of yacg.model.openapi.PathType instances
"""
ret = {}
for pathType in pathTypes:
for command in pathType.commands:
ret[command.operationId] = pathType
return ret
|
57a2322088602f9cbde626d4ea0bb65a602cfd21
| 16,066
|
from pathlib import Path
import os
def path_resolver(source_root):
"""Construct a function to calculate paths inside source root folder.
Args:
source_root (String): Path to the root folder in which all source video files are located.
Returns:
function: Function to relativize paths to the dataset root folder. If the argument is outside
the content root folder, the returned function will raise ValueError.
"""
# Get canonical path of the content root folder
source_root = Path(os.path.abspath(source_root))
def storepath(path):
"""Get path relative to content root."""
absolute_path = os.path.abspath(path)
if source_root not in Path(absolute_path).parents:
raise ValueError(f"Path '{path}' is outside of content root folder '{source_root}'")
return os.path.relpath(absolute_path, source_root)
return storepath
|
0478371a426d2d8deb2024c556dd01c96db268bf
| 16,067
|
def get_result_or_raise(future):
"""Returns the ``result`` of *future* if it is available, otherwise
raise.
"""
return future.result
|
8f6b2b6b6def964d48829f2b63467a6e39e3b853
| 16,071
|
def string_not(str1):
"""Apply logical 'not' to every symbol of string"""
return "".join([chr(256 + ~ord(x)) for x in str1])
|
aca64e1fb4c50528ba27dceb2557291404215c42
| 16,072
|
def missing_respondents(reported, observed, identified):
"""Fill in missing respondents for the f1_respondent_id table.
Args:
reported (iterable): Respondent IDs appearing in f1_respondent_id.
observed (iterable): Respondent IDs appearing anywhere in the ferc1 DB.
identified (dict): A {respondent_id: respondent_name} mapping for those
observed but not reported respondent IDs which we have been able to
identify based on circumstantial evidence. See also:
`pudl.extract.ferc1.PUDL_RIDS`
Returns:
list: A list of dictionaries representing minimal f1_respondent_id table
records, of the form {"respondent_id": ID, "respondent_name": NAME}. These
records are generated only for unreported respondents. Identified respondents
get the values passed in through ``identified`` and the other observed but
unidentified respondents are named "Missing Respondent ID"
"""
records = []
for rid in observed:
if rid in reported:
continue
elif rid in identified:
records.append(
{
"respondent_id": rid,
"respondent_name": f"{identified[rid]} (PUDL determined)",
},
)
else:
records.append(
{
"respondent_id": rid,
"respondent_name": f"Missing Respondent {rid}",
},
)
return records
|
f919a9d398898b06d4442c75cc314a8cb52e1c5f
| 16,073
|
def sass_change_vars(string_sass: str, context: dict={}) -> str:
"""This function is used to change the values of variables in a sass file
:param string_sass: String with the sass code
:param context: The dictionary keys in context represent the variables that will be searched for in the sass code
with their respective new values.
Example:
>>> from phanterpwa.tools import sass_change_vars
>>> sass_str = '''
... $BG: red
... $FG: green
... .my_class
... background-color: $BG
... color: $FG
... '''
>>> print(sass_change_vars(sass_str, {"BG": blue, "FG": "black"}))
$BG: blue
$FG: black
.my_class
background-color: $BG
color: $FG
"""
ns = ""
if all([isinstance(context, dict), isinstance(string_sass, str), context, string_sass]):
lines = string_sass.split('\n')
for x in lines:
find = False
for y in context:
v = "".join(["$", y, ":"])
if v in x:
ini = x.index(v)
ns = "".join([ns, x[:ini], v, " ", context[y], "\n"])
find = True
break
if not find:
ns = "".join([ns, x, "\n"])
return ns[:-1]
elif not isinstance(context, dict):
raise ValueError("The context must be dict type. Given: {0}".format(type(context)))
elif not isinstance(string_sass, str):
raise ValueError("The string_sass must be str type. Given: {0}".format(type(string_sass)))
elif not string_sass:
raise ValueError("The string_sass is invalid. Given: {0}".format(string_sass))
return ""
|
16d2b80b46ce66368b9ef0f539e7e21a6f2a83cd
| 16,074
|
def translate_humanity(request):
"""
translates request.session['humanity'] dictionary {0: True, 1: False, ..}
into 'One, Two, Three' according to numbers that are True
@param request: Http Request
@return string
"""
numbers = []
translation = {0: 'one', 1: 'two', 2: 'three', 3: 'four'}
for i in request.session['humanity']:
if request.session['humanity'][i]:
numbers.append(translation[i])
check_string = ', '.join(numbers)
return check_string
|
915056438673e8581cf1f762dcfefcbabb820d19
| 16,076
|
def distance(x_0, y_0, x_1, y_1):
"""Return distance between 2 points (x_0, y_0) and (x_1, y_1)
"""
x_dist = x_0 - x_1
y_dist = y_0 - y_1
return(x_dist ** 2 + y_dist ** 2) ** 0.5
|
06c250b09e2a386f1814fe9c748cad574869a741
| 16,077
|
def get_cache_encrypt_key(key):
"""Prepare key for use with crypto libs.
:param key: Passphrase used for encryption.
"""
key += (16 - (len(key) % 16)) * '-'
return key.encode('utf-8')
|
86beda15822e593315fa39a19eca2dccd8968e7e
| 16,078
|
from typing import Callable
from typing import Sequence
def randline(filename: str, randchoice: Callable[[Sequence[str]], str]) -> str:
"""
return a randomly-selected line from the given file
"""
with open(filename, "rt", encoding="utf-8") as fh:
return randchoice(fh.readlines()).rstrip()
|
6978158b25a8702e99ee6e7f9461cd391873eee4
| 16,081
|
async def latency(ctx):
"""Returns my gateway latency."""
return f'{ctx.client.gateway.latency*1000.:.0f} ms'
|
f2d088adfa485bfff8da5154ce672232e4d57e1d
| 16,082
|
import os
def pdir():
"""Get absolute path to parent directory.
Returns
-------
str
abs path to parent directory
"""
return os.path.realpath(os.path.pardir)
|
244081bb26b8e2371c1f96b0938cef20e60a485c
| 16,083
|
def str2intlist(s, delim=","):
""" create a list of ints from a delimited string
Parameters
----------
s: string
delim: string
Returns
-------
int_list: list of ints
Examples
--------
>>> str2intlist("1,2,3")
[1, 2, 3]
>>> str2intlist("1-3")
[1, 2, 3]
>>> str2intlist("2,3-4,6")
[2, 3, 4, 6]
>>> str2intlist("a")
Traceback (most recent call last):
...
TypeError: not a valid list of ints: "a"
"""
def get_int(n):
try:
return int(n)
except:
raise TypeError('not a valid list of ints: "{}"'.format(s))
return sum(((list(range(*[get_int(j) + k for k, j in enumerate(i.split('-'))]))
if '-' in i else [get_int(i)]) for i in s.split(delim)), [])
|
ae7a568a9e8b7c55e146515fad4dd810bee4ae46
| 16,084
|
def generate_timestamp_format(date_mapper: dict) -> str:
"""
Description
-----------
Generates a the time format for day,month,year dates based on each's
specified time_format.
Parameters
----------
date_mapper: dict
a dictionary for the schema mapping (JSON) for the dataframe filtered
for "date_type" equal to Day, Month, or Year.
Output
------
e.g. "%m/%d/%Y"
"""
day = "%d"
month = "%m"
year = "%y"
for kk, vv in date_mapper.items():
if vv["date_type"] == "day":
day = vv["time_format"]
elif vv["date_type"] == "month":
month = vv["time_format"]
elif vv["date_type"] == "year":
year = vv["time_format"]
return str.format("{}/{}/{}", month, day, year)
|
cd535a4fb35917517711cf149430c128e2c46b6d
| 16,085
|
def get_command(line, fmt_space):
"""
Given a header line, get the possible command
Parameters
-----------
line : string
Line of the header
fmt_space : boolean
Yes = Novonix format with spaces in the commands
Returns
--------
command : string
Instruction in the header line
Examples
---------
>>> import preparenovonix.novonix_io as prep
>>> command = prep.get_command('[Open circuit storage]',fmt_space=True)
>>> print(command)
Open circuit storage
"""
command = " "
fw = line.strip()
# Find commands ignoring left spaces
if fmt_space:
command = fw[1:-1]
else:
if ":" in fw:
command = fw.split(":")[1].strip()
else:
command = fw[1:-1]
return command
|
78642fd6e98817b85ce8431774a34723ed649473
| 16,086
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.