content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def get_tracer(request):
"""
Utility function to retrieve the tracer from the given ``request``.
It is meant to be used only for testing purposes.
"""
return request['__datadog_request_span']._tracer | facd1ff0922dcc7743814cfd738d022316ba5d6d | 30,400 |
import asyncio
import os
import sys
async def stdio(loop=None):
"""Set up stdin/stdout stream handlers"""
if loop is None:
loop = asyncio.get_event_loop()
reader = asyncio.StreamReader()
reader_protocol = asyncio.StreamReaderProtocol(reader)
writer_transport, writer_protocol = await loop.connect_write_pipe(FlowControlMixin, os.fdopen(0, 'wb'))
writer = StreamWriter(writer_transport, writer_protocol, None, loop)
await loop.connect_read_pipe(lambda: reader_protocol, sys.stdin)
return reader, writer | dd720ac8defec4fe6ac4250aba9367296f987cd6 | 30,401 |
def local_self_attention_layer(hparams, prefix):
"""Create self-attention layer based on hyperparameters."""
return transformer_layers.LocalSelfAttention(
num_heads=hparams.get(prefix + "num_heads"),
num_memory_heads=hparams.get(prefix + "num_memory_heads"),
radius=hparams.local_attention_radius,
key_value_size=hparams.d_kv,
shared_kv=hparams.get(prefix + "shared_kv", False),
attention_kwargs=attention_kwargs_from_hparams(hparams)) | 0b49b116cacdec203f531dfb28b389748a84b9b6 | 30,402 |
import pickle
def hwtrain(X_csv: str, y_csv: str, model: str = 'lm') -> str:
""" Read the feature matrix and label vector from training data and fit a
machine learning model. The model is saved in pickle format.
Parameters
----------
X_csv
The path to the feature matrix in CSV format
y_csv
The path to the label vector in CSV format
model
The type of machine learning model
Returns
-------
pickled_model_path
Path to the pickled model
"""
# Read the X and y CSV files
X_train_df = pd.read_csv(X_csv)
y_train_df = pd.read_csv(y_csv)
# Fit the model
if model == 'lm':
estimator = LinearRegression()
else:
raise ValueError('The only available model is "lm"')
estimator.fit(X_train_df, y_train_df)
# Save the model
pickled_model_path = model + '_model.pkl'
with open(pickled_model_path, 'wb') as model_file:
pickle.dump(estimator, model_file)
return pickled_model_path | fde68a010249859a95264552bfe767f9c4636d7c | 30,403 |
def convert_ban_to_quan(str_ban):
"""半角转全角"""
str_quan = ""
for uchar in str_ban:
inside_code = ord(uchar)
if inside_code == 32: #半角空格直接转化
inside_code = 12288
elif inside_code >= 32 and inside_code <= 126: #半角字符(除空格)根据关系转化
inside_code += 65248
#
str_quan += chr(inside_code)
return str_quan | b8e7417c0680dcf113377d32dbb2c43738c0af6c | 30,404 |
import requests
def update_user_permissions(userid, profile="grafana", **kwargs):
"""
Update a user password.
userid
Id of the user.
isGrafanaAdmin
Whether user is a Grafana admin.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
CLI Example:
.. code-block:: bash
salt '*' grafana4.update_user_permissions <user_id> isGrafanaAdmin=<true|false>
"""
if isinstance(profile, string_types):
profile = __salt__["config.option"](profile)
response = requests.put(
"{0}/api/admin/users/{1}/permissions".format(profile["grafana_url"], userid),
json=kwargs,
auth=_get_auth(profile),
headers=_get_headers(profile),
timeout=profile.get("grafana_timeout", 3),
)
if response.status_code >= 400:
response.raise_for_status()
return response.json() | 4f5b2ed94896dcb0f7d76b066ed7767700ada682 | 30,405 |
def to_xepsilon(_):
"""
:param _:
:return: """
return xepsilon() | 69e4e478f3e4d7978cb2a5f76aaf12053458c64a | 30,406 |
def build_permissions_response():
""" Build a response containing only speech """
output = "I'm sorry, I was not able to lookup your home town. "\
"With your permission, I can provide you with this information. "\
"Please check your companion app for details"
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'AskForPermissionsConsent',
'permissions': [
'read::alexa:device:all:address'
]
},
'shouldEndSession': True
} | 4b01a0fa32958127f7c373b0cedf5d518074e29e | 30,407 |
def simple_tokenizer(text):
"""
Example for returning a list of terms from text.
1. Normalizes text by casting to lowercase.
2. Removes punctuation from tokens.
3. Returns tokens as indicated by whitespace.
"""
text = text.lower()
# Remove punctuation from file
if isinstance(text, str):
# Python 2+3 unicode strings
text = text.translate(REMOVE_TABLE)
elif isinstance(text, basestring):
# Python 2 old-style strings
text = text.translate(None, REMOVE.encode('utf-8'))
# Split the tokens
return text.split() | 5b26c2dfdb5cc794fd5238e5a2ecdacb18ae1e19 | 30,408 |
def progress(*args, **kwargs):
"""
The HTML <progress> Element is used to view the completion
progress of a task. While the specifics of how it's displayed is
left up to the browser developer, it's typically displayed as a
progress bar. Javascript can be used to manipulate the value of
progress bar.
"""
return el('progress', *args, **kwargs) | f2c41b3b3562485d21c2f1f990f2fa368627c7e7 | 30,409 |
def openssl_error():
"""Return the OpenSSL error type for use in exception clauses"""
return _OpenSSLError | f1782d0cdce002b2214ead438f153631afdf51ab | 30,410 |
def load_path(path, visitor=TokenVisitor):
"""
Args:
path (str): Path to file to deserialize
visitor (type(TokenVisitor)): Visitor to use
Returns:
(list): Deserialized documents
"""
with open(path) as fh:
return deserialized(Scanner(fh), visitor) | e938f08c45737ec60f5258f96ea9d5f08053f99e | 30,411 |
def get_card_names(cards):
"""
:param cards: List of card JSONs
:return: List of card names (str)
"""
names = []
for card in cards:
name = card.get("name")
names.append(name)
return names | a30ad1ef7d8beaab0451d6f498254b0b5df3cf6d | 30,412 |
import platform
def pyversion(ref=None):
"""Determine the Python version and optionally compare to a reference."""
ver = platform.python_version()
if ref:
return [
int(x) for x in ver.split(".")[:2]
] >= [
int(x) for x in ref.split(".")[:2]
]
else: return ver | 2e31c7710b171ad67e56f9dbc1181685e0f32de1 | 30,413 |
import tqdm
def opt_tqdm(iterable):
"""
Optional tqdm progress bars
"""
try:
except:
return iterable
else:
return tqdm.tqdm(iterable) | 5bcdde21f706f1eb3907beafc9ddeebf6891e0ec | 30,414 |
def get_dataset_config():
"""Gets the config for dataset."""
config = config_dict.ConfigDict()
# The path to the specification of grid evaluator.
# If not specified, normal evaluator will be used.
config.grid_evaluator_spec = ''
# The directory of saved mgcdb84 dataset.
config.dataset_directory = ''
# The data types of MGCDB84 dataset to use. If specified, the training and
# validation set will be obtained by partition data with specified type.
# If not specified, training and validation set will be those of MCGDB84.
config.mgcdb84_types = ''
# The fraction of training, validation and set sets.
# Only used if mgcdb84_types is not None. Comma separated string of 3 floats.
config.train_validation_test_split = '0.6,0.2,0.2'
# The targets used for training. Defaults to mgcdb84_ref, which uses target
# values from reference values given by MCGDB84. targets can also be set to
# the exchange-correlation energies of a certain functional, which can be
# specified by an existing functional name in xc_functionals or the path to
# a json file specifying the functional form and parameters.
config.targets = 'mgcdb84_ref'
# The number of targets used for training. Default to 0 (use all targets).
config.num_targets = 0
# If True, only spin unpolarized molecules are used.
config.spin_singlet = False
# The evaluation mode for training, validation and test sets. Possible values
# are jit, onp and jnp. Comma separated string.
config.eval_modes = 'jit,onp,onp'
return config | 52a026e7c67d09cff9eab3045508f5fb17363db6 | 30,415 |
import pathlib
import tests
import json
import yaml
def stub_multiservo_yaml(tmp_path: pathlib.Path) -> pathlib.Path:
"""Return the path to a servo config file set up for multi-servo execution."""
config_path: pathlib.Path = tmp_path / "servo.yaml"
settings = tests.helpers.BaseConfiguration()
measure_config_json = json.loads(
json.dumps(
settings.dict(
by_alias=True,
)
)
)
optimizer1 = servo.Optimizer(id="dev.opsani.com/multi-servox-1", token="123456789")
optimizer1_config_json = json.loads(
optimizer1.json(
by_alias=True,
)
)
config1 = {
"optimizer": optimizer1_config_json,
"connectors": ["measure", "adjust"],
"measure": measure_config_json,
"adjust": {}
}
optimizer2 = servo.Optimizer(id="dev.opsani.com/multi-servox-2", token="987654321")
optimizer2_config_json = json.loads(
optimizer2.json(
by_alias=True,
)
)
config2 = {
"optimizer": optimizer2_config_json,
"connectors": ["measure", "adjust"],
"measure": measure_config_json,
"adjust": {}
}
config_yaml = yaml.dump_all([config1, config2])
config_path.write_text(config_yaml)
return config_path | 572d438f406bb1a1d8ced68be6612dced7cdca9b | 30,416 |
import os
def _escape_space(program):
"""escape spaces in for windows"""
if os.name == "nt" and ' ' in program:
return '"' + program + '"'
else:
return program | 67a8fa1544f524e9a2591c3221f48c2c130ef86b | 30,417 |
import collections
def rename_internal_nodes(tree, pg_dict):
"""Rename internal nodes (add phylogroups to the name).
"""
numbers = collections.defaultdict(lambda: 0)
for node in tree.traverse("postorder"):
if node.is_leaf():
continue
pgs = node_to_pg(node, pg_dict)
pgs_s = "-".join(sorted(list(pgs), key=_sorting_key))
nname = "PG-{}_{}".format(pgs_s, numbers[pgs_s])
node.name = nname
numbers[pgs_s] += 1
return tree | ed77d3d4df42164919a3394fcebc8d49f5bd80eb | 30,418 |
import math
import torch
import tqdm
def assign_by_euclidian_at_k(X, T, k):
"""
X : [nb_samples x nb_features], e.g. 100 x 64 (embeddings)
k : for each sample, assign target labels of k nearest points
"""
# distances = sklearn.metrics.pairwise.pairwise_distances(X)
chunk_size = 1000
num_chunks = math.ceil(len(X)/chunk_size)
distances = torch.tensor([])
for i in tqdm(range(0, num_chunks)):
chunk_indices = [chunk_size*i, min(len(X), chunk_size*(i+1))]
chunk_X = X[chunk_indices[0]:chunk_indices[1], :]
distance_mat = torch.from_numpy(sklearn.metrics.pairwise.pairwise_distances(X, chunk_X))
distances = torch.cat((distances, distance_mat), dim=-1)
assert distances.shape[0] == len(X)
assert distances.shape[1] == len(X)
distances = distances.numpy()
# get nearest points
indices = np.argsort(distances, axis = 1)[:, 1 : k + 1]
return np.array([[T[i] for i in ii] for ii in indices]) | 31033b8ddf3a2427ec98fdacecb253f6381d38d4 | 30,419 |
def _svd_classification(dataset='mnist_small'):
"""
svd on classificaiton dataset
Inputs:
dataset: (str) name of dataset
Outputs:
accuracy on predicted values
"""
if dataset=='rosenbrock':
x_train, x_valid, x_test, y_train, y_valid, y_test = load_dataset('rosenbrock', n_train=5000, d=2)
else:
x_train, x_valid, x_test, y_train, y_valid, y_test = load_dataset(dataset)
x_total = np.vstack([x_train, x_valid])
y_total = np.vstack([y_train, y_valid])
X = np.ones((len(x_total), len(x_total[0]) + 1))
X[:, 1:] = x_total
U, S, Vh = np.linalg.svd(X)
# Invert Sigma
sig = np.diag(S)
filler = np.zeros([len(x_total)-len(S), len(S)])
sig_inv = np.linalg.pinv(np.vstack([sig, filler]))
# Compute weights
w = Vh.T @ (sig_inv @ (U.T @ y_total))
# Make test predictions
X_test = np.ones((len(x_test), len(x_test[0]) + 1))
X_test[:, 1:] = x_test
predictions = np.argmax(X_test @ w, axis=1)
y_test = np.argmax(1 * y_test, axis=1)
return (predictions == y_test).sum() / len(y_test) | 55eae147130a4552ba33f466f6127ab5df1323b9 | 30,420 |
def set_answer(set_number):
"""
get result answer
>>> set_answer(600851475143)
6857
>>> set_answer(3000)
5
"""
while True:
prime_fac = prime_factorization(set_number)
if prime_fac < set_number:
set_number //= prime_fac
else:
return set_number | b601a764123b737993c1b822ff466f47ca5caea6 | 30,421 |
import torch
def model_infer(model, test_images, test_affinities, test_beliefs, args):
"""
Parameters:
model: object with the trained model
test_images: batch of images (float32), size: (test_batch_size,3,x,y)
test_affinities: batch of affinity maps (float32), size: (test_batch_size,16,x/8,y/8)
test_beliefs: batch of belief maps (float32), size: (test_batch_size,9,x/8,y/8)
Returns:
loss: scalar
belief: output belief maps, size: size: (test_batch_size,9,x/8,y/8)
affinity: output affinity maps, size: (test_batch_size,16,x/8,y/8)
"""
if torch.cuda.is_available():
test_images_v = Variable(test_images.cuda(device=args.gpu_device))
test_beliefs_v = Variable(test_beliefs.cuda(device=args.gpu_device))
test_affinities_v = Variable(test_affinities.cuda(device=args.gpu_device))
else:
test_images_v = Variable(test_images)
test_beliefs_v = Variable(test_beliefs)
test_affinities_v = Variable(test_affinities)
# This shall be adjusted according to the specific model
with torch.no_grad():
output_belief, output_affinity = model.forward(test_images_v)
J = compute_loss(output_belief, output_affinity, test_beliefs_v, test_affinities_v)
belief = output_belief[5].data.cpu().numpy()
affinity = output_affinity[5].data.cpu().numpy()
loss = J.data.cpu().numpy()
return belief, affinity, loss | 32534691056c96e3c96adaebfc112da7525ef6dd | 30,422 |
import re
def clean_value(value, suffix):
"""
Strip out copy suffix from a string value.
:param value: Current value e.g "Test Copy" or "test-copy" for slug fields.
:type value: `str`
:param suffix: The suffix value to be replaced with an empty string.
:type suffix: `str`
:return: Stripped string without the suffix.
"""
# type: (str, str) -> str
return re.sub(r"([\s-]?){}[\s-][\d]$".format(suffix), "", value, flags=re.I) | d2ec3b3affbf71411039f234c05935132205ae16 | 30,423 |
def list_devices_to_string(list_item):
"""Convert cfg devices into comma split format.
Args:
list_item (list): list of devices, e.g. [], [1], ["1"], [1,2], ...
Returns:
devices (string): comma split devices
"""
return ",".join(str(i) for i in list_item) | 717f40d3fd0c24b93d5859491d3f9f16a2b0a069 | 30,424 |
def trace_module(no_print=True):
""" Trace plot series module exceptions """
mname = 'series'
fname = 'plot'
module_prefix = 'putil.plot.{0}.Series.'.format(mname)
callable_names = (
'__init__',
'data_source',
'label',
'color',
'marker',
'interp',
'line_style',
'secondary_axis'
)
return docs.support.trace_support.run_trace(
mname, fname, module_prefix, callable_names, no_print, ['putil.eng']
) | b46e69836257de525e55346872f01cb338e036c9 | 30,425 |
def get_ids(values):
"""Transform numeric identifiers, corpora shortcodes (slugs),
and two-letter ISO language codes, into their corresponding numeric
identifier as per the order in CORPORA_SOURCES.
:return: List of indices in CORPORA_SOURCES
:rtype: list
"""
if "all" in values:
ids = list(range(len(CORPORA_SOURCES)))
else:
ids = []
for index, corpus_info in enumerate(CORPORA_SOURCES):
corpus_id = index + 1
props = corpus_info["properties"]
if (str(corpus_id) in values
or props["slug"] in values
or props["language"] in values):
ids.append(index)
return ids | 482c3940a0a8492820d94d5a9af41c5891c82406 | 30,426 |
import click
def quiet_option(func):
"""Add a quiet option."""
def _callback(ctx, unused_param, value):
_set_verbosity(ctx, -value)
return value
return click.option('-q', '--quiet', count=True,
expose_value=False, help='Decreases verbosity.',
callback=_callback)(func) | b13dd670cd06136fbccfccff23ed27233efcb7bd | 30,427 |
def _compute_gaussian_fwhm(spectrum, regions=None):
"""
This is a helper function for the above `gaussian_fwhm()` method.
"""
fwhm = _compute_gaussian_sigma_width(spectrum, regions) * gaussian_sigma_to_fwhm
return fwhm | b5e83752141830911a3d1e26cce10c82b1414740 | 30,428 |
from re import T
from typing import Optional
from typing import Callable
from typing import Any
from typing import List
async def sorted(
iterable: AnyIterable[T],
*,
key: Optional[Callable[[T], Any]] = None,
reverse: bool = False,
) -> List[T]:
"""
Sort items from an (async) iterable into a new list
The optional ``key`` argument specifies a one-argument (async) callable, which
provides a substitute for determining the sort order of each item.
The special value and default :py:data:`None` represents the identity functions,
i.e. compares items directly.
The default sort order is ascending, that is items with ``a < b``
imply ``result.index(a) < result.index(b)``. Use ``reverse=True``
for descending sort order.
.. note::
The actual sorting is synchronous,
so a very large ``iterable`` or very slow comparison
may block the event loop notably.
It is guaranteed to be worst-case O(n log n) runtime.
"""
if key is None:
try:
return _sync_builtins.sorted(iterable, reverse=reverse) # type: ignore
except TypeError:
pass
key = _awaitify(key) if key is not None else _identity
keyed_items = [(await key(item), item) async for item in aiter(iterable)]
keyed_items.sort(key=lambda ki: ki[0], reverse=reverse)
return [item for key, item in keyed_items] | 628336093c282f340f3ad2f750227e1286ceefef | 30,429 |
def config_split(config):
"""
Splits a config dict into smaller chunks.
This helps to avoid sending big config files.
"""
split = []
if "actuator" in config:
for name in config["actuator"]:
split.append({"actuator": {name: config["actuator"][name]}})
del(config["actuator"])
split.append(config)
return split | 2006534ece382c55f1ba3914300f5b6960323e53 | 30,430 |
def transl(x, y=None, z=None):
"""
Create or decompose translational homogeneous transformations.
Create a homogeneous transformation
===================================
- T = transl(v)
- T = transl(vx, vy, vz)
The transformation is created with a unit rotation submatrix.
The translational elements are set from elements of v which is
a list, array or matrix, or from separate passed elements.
Decompose a homogeneous transformation
======================================
- v = transl(T)
Return the translation vector
"""
if y==None and z==None:
x=mat(x)
try:
if ishomog(x):
return x[0:3,3].reshape(3,1)
else:
return concatenate((concatenate((eye(3),x.reshape(3,1)),1),mat([0,0,0,1])))
except AttributeError:
n=len(x)
r = [[],[],[]]
for i in range(n):
r = concatenate((r,x[i][0:3,3]),1)
return r
elif y!=None and z!=None:
return concatenate((concatenate((eye(3),mat([x,y,z]).T),1),mat([0,0,0,1]))) | d3b47af2ea8f130559f19dea269fbd1a50a8559c | 30,431 |
def find_next_square2(sq: int) -> int:
"""
This version is just more compact.
"""
sqrt_of_sq = sq ** (1/2)
return -1 if sqrt_of_sq % 1 != 0 else int((sqrt_of_sq + 1) ** 2) | 62246b78cc065b629961a7283671e776481a8659 | 30,432 |
import colorsys
def hex_2_hsv(hex_col):
"""
convert hex code to colorsys style hsv
>>> hex_2_hsv('#f77f00')
(0.08569500674763834, 1.0, 0.9686274509803922)
"""
hex_col = hex_col.lstrip('#')
r, g, b = tuple(int(hex_col[i:i+2], 16) for i in (0, 2 ,4))
return colorsys.rgb_to_hsv(r/255.0, g/255.0, b/255.0) | a80e9c5470dfc64c61d12bb4b823411c4a781bef | 30,433 |
from pathlib import Path
def _drivers_dir() -> str:
"""
ドライバ格納ディレクトリのパスを返します
:return: ドライバ格納ディレクトリのパス
"""
return str(Path(__file__).absolute().parent.parent.joinpath('drivers')) | 45b173099f6df24398791ec33332072a7651fa4f | 30,434 |
import types
def create_list_response_value(
*,
authorization: types.TAuthorization,
uri: types.TUri,
auth_info: types.CredentialsAuthInfo,
) -> types.TResponseValue:
"""
Calculate the response for a list type response.
Raises NotFoundError when the uri is not linked to a known spec.
Args:
uri: The requested uri.
auth_info: Information about the user.
Returns:
The html to return to the user for the request.
"""
assert uri.startswith("/")
assert uri.endswith("/")
spec_id = uri[1:-1]
try:
version_infos = package_database.get().list_spec_versions(
sub=auth_info.sub, name=spec_id
)
except package_database.exceptions.NotFoundError as exc:
raise exceptions.NotFoundError(
f"could not find package with {spec_id=}"
) from exc
host = "index.package.openalchemy.io"
def package_name(version: str) -> str:
"""Calculate the name of the package."""
return f"{spec_id.replace('-', '_')}-{version}.tar.gz"
install_links = list(
map(
lambda version_info: (
f'<a href="https://'
f"{authorization.public_key}:{authorization.secret_key}"
f"@{host}/{spec_id}/"
f'{package_name(version_info["version"])}">'
f'{package_name(version_info["version"])}</a><br>'
),
version_infos,
)
)
joined_install_links = "\n".join(install_links)
return f"""
<body>
{joined_install_links}
</body>
""" | 492d5a93b7db393dafde48c66d323dda64c7e32c | 30,435 |
def get_variables(expr):
"""
Get variables of an expression
"""
if isinstance(expr, NegBoolView):
# this is just a view, return the actual variable
return [expr._bv]
if isinstance(expr, _NumVarImpl):
# a real var, do our thing
return [expr]
vars_ = []
# if list or Expr: recurse
if is_any_list(expr):
for subexpr in expr:
vars_ += get_variables(subexpr)
elif isinstance(expr, Expression):
for subexpr in expr.args:
vars_ += get_variables(subexpr)
# else: every non-list, non-expression
return vars_ | 6326ae90f0fa6daa04e0dedc95cf52f4081fa359 | 30,436 |
def tousLesIndices(stat):
"""
Returns the indices of all the elements of the graph
"""
return stat.node2com.keys()
#s=stat.node2com.values()
global globAuthorIndex
global globTfIdfTab
#pprint(globAuthorIndex)
#pprint(stat.node2com.values())
#glob node->index
return [globAuthorIndex[x] for x in stat.node2com]
#return stat.node2com.values()
#def varianceGroupe():
#def distanceListePointsCentre(indexsCommunaute, centre): | fa847ee3913d521778ee3462c8e946f0ff001c76 | 30,437 |
def edit_tx_sheet(request, sheet_id):
"""Allows the user to edit treatment sheet fields and updates the date of the sheet"""
tx_sheet = get_object_or_404(TxSheet, id=sheet_id)
form = TxSheetForm(instance=tx_sheet)
if request.user == tx_sheet.owner:
if request.method == 'POST':
form = TxSheetForm(data=request.POST)
if form.is_valid():
defaults = {'owner': request.user,
'name': request.POST['name'],
'comment': request.POST['comment'],
'date': date.today()}
tx_sheet = form.update(sheet_id=sheet_id, defaults=defaults)
return redirect(tx_sheet)
return render(request, 'tx_sheet/tx_sheet_edit.html', {'navbar': 'tx_sheet', 'form': form})
else:
raise PermissionDenied | 04ef22e069cc329d4eceae5ae567867fb31f787c | 30,438 |
import warnings
def system(
W, L_x, L_sc_up, L_sc_down, z_x, z_y, a, shape, transverse_soi,
mu_from_bottom_of_spin_orbit_bands, k_x_in_sc, wraparound, infinite,
sc_leads=False, no_phs=False, rough_edge=None,
phs_breaking_potential=False):
"""Create zigzag system
Parameters
----------
W : float
Width of the semiconductor (or contact seperation of the junction.)
L_x : float
Length of the system (x-dimension).
L_sc_up : float
Minimum width of the top superconductor.
L_sc_down : float
Minimum width of the bottom superconductor.
z_x : float
Period of zigzag.
z_y : float
Amplitude of zigzag.
a : float
Lattice spacing.
shape : string
Can be either 'sawtooth' for zigzag shape, or 'parallel_curve'
for a shape formed by curve parallel to a sine curve.
transverse_soi : bool
Toggle Rashba spin-orbit in the y-direction.
mu_from_bottom_of_spin_orbit_bands : bool
Toggle counting chemical potential from bottom of spin orbit band.
k_x_in_sc : bool
Toggle whether superconductor have hopping in the x-direction.
wraparound : bool
Toggle a wraparound system, such that the translational invariance
is transformed into the momentum parameter k_x.
infinite : bool
Toggle whether the system contains a z_x periodic
translational invariance.
sc_leads : bool, optional
Toggle superconducting leads in y-direction.
no_phs : bool, optional
Remove particle-hole symmetry by removing the electron-hole orbital.
rough_edge : bool, optional
Toggle roughened edges to shape.
phs_breaking_potential : bool, optional
Add particle-hole symmetry breaking potential to allow for a
computationally cheaper way to calculate the Majorana decay length.
Returns
-------
kwant.builder.FiniteSystem or kwant.builder.InfiniteSystem
"""
if wraparound and not infinite:
raise ValueError('If you want to use wraparound, infinite must be True.')
if sc_leads and not infinite or sc_leads and not wraparound:
raise ValueError('If you want to use sc_leads, infinite and wraparound must be True.')
template_strings = get_template_strings(
transverse_soi, mu_from_bottom_of_spin_orbit_bands,
k_x_in_sc, False, no_phs, phs_breaking_potential)
template = {k: discretize(v, coords=('x', 'y'), grid_spacing=a)
for k, v in template_strings.items()}
shapes = get_shapes(shape, a, z_x, z_y, W, L_x, L_sc_down, L_sc_up, rough_edge)
syst = kwant.Builder(kwant.TranslationalSymmetry([L_x, 0]) if infinite else None)
for y in np.arange(-W - L_sc_down, W + L_sc_up, a):
# We're unsure about the location of the barrier
# so we loop over all possible sites.
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
sites = syst.fill(template['barrier'], shapes['edge'], (0, y))
syst.fill(template['normal'], *shapes['normal'])
if L_sc_up > 0:
syst.fill(template['sc_top'], *shapes['sc_top'])
if L_sc_down > 0:
syst.fill(template['sc_bot'], *shapes['sc_bot'])
if infinite and wraparound:
syst = kwant.wraparound.wraparound(syst)
if sc_leads:
lead_up = kwant.Builder(kwant.TranslationalSymmetry([L_x, 0], [0, a]))
lead_down = kwant.Builder(kwant.TranslationalSymmetry([L_x, 0], [0, -a]))
lead_up = kwant.wraparound.wraparound(lead_up, keep=1)
lead_down = kwant.wraparound.wraparound(lead_down, keep=1)
lead_up.fill(template_sc_top, lambda s: 0 <= s.pos[0] < L_x, (0, 0))
lead_down.fill(template_sc_bot, lambda s: 0 <= s.pos[0] < L_x, (0, 0))
syst.attach_lead(lead_up)
syst.attach_lead(lead_down)
return syst.finalized() | 101f3fffeb333cb9c53c2ef930e17fa0f7e08966 | 30,439 |
import os
def list_files(root_dir, mindepth = 1, maxdepth = float('inf'), filter_ext=[], return_relative_path=False):
"""
Usage:
d = get_all_files(rootdir, mindepth = 1, maxdepth = 2)
This returns a list of all files of a directory, including all files in
subdirectories. Full paths are returned.
WARNING: this may create a very large list if many files exists in the
directory and subdirectories. Make sure you set the maxdepth appropriately.
rootdir = existing directory to start
mindepth = int: the level to start, 1 is start at root dir, 2 is start
at the sub directories of the root dir, and-so-on-so-forth.
maxdepth = int: the level which to report to. Example, if you only want
in the files of the sub directories of the root dir,
set mindepth = 2 and maxdepth = 2. If you only want the files
of the root dir itself, set mindepth = 1 and maxdepth = 1
filter_ext(list, optional) : filter files ex. [.jpg, .png]
return_relative_path(bool): Default false. If true return relative path else return absolute path
"""
root_dir = os.path.normcase(root_dir)
file_paths = []
root_depth = root_dir.rstrip(os.path.sep).count(os.path.sep) - 1
lowered_filter_ext = tuple([ext.lower() for ext in filter_ext])
for abs_dir, dirs, files in sorted(os.walk(root_dir)):
depth = abs_dir.count(os.path.sep) - root_depth
if mindepth <= depth <= maxdepth:
for filename in files:
if filter_ext:
if not filename.lower().endswith(lowered_filter_ext):
continue
if return_relative_path:
rel_dir = os.path.relpath(abs_dir, root_dir)
if rel_dir == ".":
file_paths.append(filename)
else:
file_paths.append(os.path.join(rel_dir, filename))
else:
# append full absolute path
file_paths.append(os.path.join(abs_dir, filename))
elif depth > maxdepth:
# del dirs[:]
pass
return file_paths | 8df0e009f40e77ef7ed86ef870a9f1e508d876d5 | 30,440 |
def get_state_x1_pure_state_vector() -> np.ndarray:
"""Returns the pure state vector for :math:`|-\\rangle`.
:math:`|-\\rangle := \\frac{1}{\\sqrt{2}} (|0\\rangle - |1\\rangle)`
Returns
-------
np.ndarray
the pure state vector.
"""
vec = (1 / np.sqrt(2)) * np.array([1, -1], dtype=np.complex128)
return vec | 81d52d34492a0b57206d3cf55ceb9dd939a7cdf8 | 30,441 |
def user_create(user_data):
""" Cria um usuário no banco de dados e retorna o objeto criado """
user_model = get_user_model()
user = user_model.objects.create_user(**user_data)
return user | 163c742601d25a7b04e572ea6b32de6625b99de5 | 30,442 |
from typing import Counter
def shannon_entropy(text: str) -> float:
"""
same definition as in feature processor for feature extraction
calculates shannon entropy of a given string
"""
content_char_counts = Counter([ch for ch in text])
total_string_size = len(text)
entropy: float = 0
for ratio in [char_count / total_string_size for char_count in content_char_counts.values()]:
entropy -= ratio * log(ratio, 2)
return entropy | e3092f8620b809a3d935ad16290d077122f6b1df | 30,443 |
def solve(inputmatrix):
"""
This function contains a solution to the data in 4be741c5.json posed by the Abstraction and
Reasoning Corpus (ARC).
The problem presents an n x m grid, with some rows containing 0-m coloured squares with repetition over a row or colomuns.
The solution requires the rows to be ordered such that it get color of all unique colors if it is either row-wise or colomun-wise.
"""
#Empty result list to return results
result=[]
#convert input to numpy array
y = np.array([np.array(xi) for xi in inputmatrix])
if len(np.unique(y[:1][0]))>1:#if the count of unique colors is more than one
indexes = np.unique(y[:1][0], return_index=True)[1] #Get the indexes of unique colour
row=[y[:1][0][index] for index in sorted(indexes)]#Get the unique colors in unsorted list
result.append(row)#append row to result
else:#if colour are in colomun
indexes = np.unique(y[:, 0], return_index=True)[1]#Get the indexes of unique colour
colomun = [y[:, 0][index] for index in sorted(indexes)]#Get the unique colors in unsorted list
for value in colomun:
result.append([value])#Appending values to the result
return (result) | 33f27b9bd57ce00972d8c29e7ae3a0ac71dd2455 | 30,444 |
def compute_rewards(s1, s2):
"""
input: s1 - state before action
s2 - state after action
rewards based on proximity to each goal
"""
r = []
for g in TASKS:
dist1 = np.linalg.norm(s1 - g)
dist2 = np.linalg.norm(s2 - g)
reward = dist1 - dist2
r.append(reward)
return r | 1a6ee67ce581dc07d735e15fe62d09561cf0453f | 30,445 |
import ipaddress
def decode(i_dunno):
"""
Decode an I-DUNNO representation into an ipaddress.IPv6Address or an ipaddress.IPv4Address object.
A ValueError is raised if decoding fails due to invalid notation or resulting IP address is invalid.
The output of this function SHOULD NOT be presented to humans, as recommended by RFC8771.
"""
bits = []
for char in i_dunno.decode('utf-8'):
num = ord(char)
for minimum, length in utf8_lengths:
if num < (1 << length) and (minimum == 0 or num >= (1 << minimum)):
bits += int_to_bits(num, length)
break
else:
raise ValueError('invalid I-DUNNO')
addr = bits_to_bytes(bits)
if len(addr) == 16:
cls = ipaddress.IPv6Address
elif len(addr) == 4:
cls = ipaddress.IPv4Address
else:
raise ValueError('invalid I-DUNNO')
try:
return cls(addr)
except ipaddress.AddressValueError:
raise ValueError('invalid IP address') | 557da5af7b33d988754f67ecd4574be9bdc85784 | 30,446 |
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
pass
num_train = X.shape[0]
num_class=W.shape[1]
#we use for loop alnog the training set.
for i in range(num_train):
Scores = np.dot(X[i], W)
Scores -= np.max(Scores)
sum_exponential=np.sum(np.exp(Scores))
#implmenting the loss function(summation of the exponential and it divided by max score)
loss = loss + np.log(sum_exponential) - Scores[y[i]]
dW[:, y[i]] -= X[i]
Total_scores_exp = np.exp(Scores).sum()
#calculate the gradient
for j in range(num_class):
dW[:, j] += np.exp(Scores[j]) / Total_scores_exp * X[i]
loss = (loss / num_train) + 0.5 * reg * np.sum(W**2)
#divide it among the num_training and add the regularized term to dw
dW /= num_train
dW += reg * W
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW | 01a85b6af46f5baf25b2e73b1b52cc9026d79fb6 | 30,447 |
def location_descriptors():
"""Provide possible templated_sequence input."""
return [
{
"id": "NC_000001.11:15455",
"type": "LocationDescriptor",
"location": {
"sequence_id": "ncbi:NC_000001.11",
"interval": {
"start": {
"type": "Number",
"value": 15455
},
"end": {
"type": "Number",
"value": 15456
}
},
"type": "SequenceLocation"
},
"label": "NC_000001.11:15455",
},
{
"id": "NC_000001.11:15566",
"type": "LocationDescriptor",
"location": {
"sequence_id": "ncbi:NC_000001.11",
"interval": {
"start": {
"type": "Number",
"value": 15565
},
"end": {
"type": "Number",
"value": 15566
}
},
"type": "SequenceLocation"
},
"label": "NC_000001.11:15566",
},
{
"id": "chr12:p12.1",
"type": "LocationDescriptor",
"location": {
"species_id": "taxonomy:9606",
"chr": "12",
"interval": {"start": "p12.1", "end": "p12.1"}
},
"label": "chr12:p12.1",
},
{
"id": "chr12:p12.2",
"type": "LocationDescriptor",
"location": {
"species_id": "taxonomy:9606",
"chr": "12",
"interval": {"start": "p12.2", "end": "p12.2"}
},
"label": "chr12:p12.2",
},
{
"id": "NC_000001.11:15455-15566",
"type": "LocationDescriptor",
"location": {
"sequence_id": "ncbi:NC_000001.11",
"interval": {
"start": {
"type": "Number",
"value": 15455
},
"end": {
"type": "Number",
"value": 15566
}
},
"type": "SequenceLocation"
},
"label": "NC_000001.11:15455-15566",
},
{
"id": "chr12:p12.1-p12.2",
"type": "LocationDescriptor",
"location": {
"species_id": "taxonomy:9606",
"chr": "12",
"interval": {"start": "p12.1", "end": "p12.2"}
},
"label": "chr12:p12.1-p12.2",
},
{
"id": "fusor.location_descriptor:NP_001123617.1",
"type": "LocationDescriptor",
"location": {
"sequence_id": "ga4gh:SQ.sv5egNzqN5koJQH6w0M4tIK9tEDEfJl7",
"type": "SequenceLocation",
"interval": {
"start": {
"type": "Number",
"value": 171
},
"end": {
"type": "Number",
"value": 204
}
}
}
},
{
"id": "fusor.location_descriptor:NP_002520.2",
"type": "LocationDescriptor",
"location": {
"sequence_id": "ga4gh:SQ.vJvm06Wl5J7DXHynR9ksW7IK3_3jlFK6",
"type": "SequenceLocation",
"interval": {
"start": {
"type": "Number",
"value": 510
},
"end": {
"type": "Number",
"value": 781
}
}
}
}
] | da13824ff6f91caa635700759a29fb1f36aae1be | 30,448 |
from typing import Optional
def positional_features_gamma(positions: tf.Tensor,
feature_size: int,
seq_length: Optional[int] = None,
bin_size: Optional[int] = None,
stddev=None,
start_mean=None):
"""Positional features computed using the gamma distributions."""
del bin_size # Unused.
if seq_length is None:
seq_length = tf.reduce_max(tf.abs(positions)) + 1
if stddev is None:
stddev = seq_length / (2 * feature_size)
if start_mean is None:
start_mean = seq_length / feature_size
mean = tf.linspace(start_mean, seq_length, num=feature_size)
mean = _prepend_dims(mean, positions.shape.rank)
concentration = (mean / stddev)**2
rate = mean / stddev**2
probabilities = gamma_pdf(
tf.abs(tf.cast(positions, dtype=tf.float32))[..., tf.newaxis],
concentration, rate)
probabilities += 1e-8 # To ensure numerical stability.
outputs = probabilities / tf.reduce_max(probabilities)
tf.TensorShape(outputs.shape).assert_is_compatible_with(
positions.shape + [feature_size])
return outputs | 20dc2154a194d99ec1a9931e10fca2b43d360a6e | 30,449 |
from typing import List
def _get_frame_data(mapAPI: MapAPI, frame: np.ndarray, agents_frame: np.ndarray,
tls_frame: np.ndarray) -> FrameVisualization:
"""Get visualisation objects for the current frame.
:param mapAPI: mapAPI object (used for lanes, crosswalks etc..)
:param frame: the current frame (used for ego)
:param agents_frame: agents in this frame
:param tls_frame: the tls of this frame
:return: A FrameVisualization object. NOTE: trajectory are not included here
"""
ego_xy = frame["ego_translation"][:2]
#################
# plot lanes
lane_indices = indices_in_bounds(ego_xy, mapAPI.bounds_info["lanes"]["bounds"], 50)
active_tl_ids = set(filter_tl_faces_by_status(tls_frame, "ACTIVE")["face_id"].tolist())
lanes_vis: List[LaneVisualization] = []
for idx, lane_idx in enumerate(lane_indices):
lane_idx = mapAPI.bounds_info["lanes"]["ids"][lane_idx]
lane_tl_ids = set(mapAPI.get_lane_traffic_control_ids(lane_idx))
lane_colour = "gray"
for tl_id in lane_tl_ids.intersection(active_tl_ids):
lane_colour = COLORS[mapAPI.get_color_for_face(tl_id)]
lane_coords = mapAPI.get_lane_coords(lane_idx)
left_lane = lane_coords["xyz_left"][:, :2]
right_lane = lane_coords["xyz_right"][::-1, :2]
lanes_vis.append(LaneVisualization(xs=np.hstack((left_lane[:, 0], right_lane[:, 0])),
ys=np.hstack((left_lane[:, 1], right_lane[:, 1])),
color=lane_colour))
#################
# plot crosswalks
crosswalk_indices = indices_in_bounds(ego_xy, mapAPI.bounds_info["crosswalks"]["bounds"], 50)
crosswalks_vis: List[CWVisualization] = []
for idx in crosswalk_indices:
crosswalk = mapAPI.get_crosswalk_coords(mapAPI.bounds_info["crosswalks"]["ids"][idx])
crosswalks_vis.append(CWVisualization(xs=crosswalk["xyz"][:, 0],
ys=crosswalk["xyz"][:, 1],
color="yellow"))
#################
# plot ego and agents
agents_frame = np.insert(agents_frame, 0, get_ego_as_agent(frame))
box_world_coords = get_box_world_coords(agents_frame)
# ego
ego_vis = EgoVisualization(xs=box_world_coords[0, :, 0], ys=box_world_coords[0, :, 1],
color="red", center_x=agents_frame["centroid"][0, 0],
center_y=agents_frame["centroid"][0, 1])
# agents
agents_frame = agents_frame[1:]
box_world_coords = box_world_coords[1:]
agents_vis: List[AgentVisualization] = []
for agent, box_coord in zip(agents_frame, box_world_coords):
label_index = np.argmax(agent["label_probabilities"])
agent_type = PERCEPTION_LABELS[label_index]
agents_vis.append(AgentVisualization(xs=box_coord[..., 0],
ys=box_coord[..., 1],
color="#1F77B4" if agent_type not in COLORS else COLORS[agent_type],
track_id=agent["track_id"],
agent_type=PERCEPTION_LABELS[label_index],
prob=agent["label_probabilities"][label_index]))
return FrameVisualization(ego=ego_vis, agents=agents_vis, lanes=lanes_vis,
crosswalks=crosswalks_vis, trajectories=[]) | 65f3733e858f595877af83e0574bb53c5568390c | 30,450 |
def calculate_gc(x):
"""Calculates the GC content of DNA sequence x.
x: a string composed only of A's, T's, G's, and C's."""
x = x.upper()
return float(x.count('G') + x.count('C')) / (x.count('G') + x.count('C') + x.count('A') + x.count('T')) | aae64ff550ef26e75518bdad8a12b7cda9e060d2 | 30,451 |
import os
def listdir(path):
"""
Lists content of a folder.
:param str path: Folder to get list from
:returns: Directory content list
:rtype: tuple
example::
dirs, files = xbmcvfs.listdir(path)
"""
dirs = []
files = []
path = safe_path(path)
for item_name in os.listdir(path):
item_path = os.path.join(path, item_name)
if os.path.isfile(item_path):
files.append(item_name)
else:
dirs.append(item_name)
# Return a tuple of (dir, files)
return dirs, files | c82dfa985eb2003278d16fa7c7df3a1e2c83a948 | 30,452 |
def no_float_zeros(v):
"""
if a float that is equiv to integer - return int instead
"""
if v % 1 == 0:
return int(v)
else:
return v | a33321408c43d164a8ca2c7f1d1bc6270e5708ec | 30,453 |
import torch
def quat_mult(q_1, q_2):
"""Multiplication in the space of quaternions."""
a_1, b_1, c_1, d_1 = q_1[:, 0], q_1[:, 1], q_1[:, 2], q_1[:, 3]
a_2, b_2, c_2, d_2 = q_2[:, 0], q_2[:, 1], q_2[:, 2], q_2[:, 3]
q_1_q_2 = torch.stack(
(
a_1 * a_2 - b_1 * b_2 - c_1 * c_2 - d_1 * d_2,
a_1 * b_2 + b_1 * a_2 + c_1 * d_2 - d_1 * c_2,
a_1 * c_2 - b_1 * d_2 + c_1 * a_2 + d_1 * b_2,
a_1 * d_2 + b_1 * c_2 - c_1 * b_2 + d_1 * a_2,
),
dim=1,
)
return q_1_q_2 | dac82e246221f9af552f44ca26089443b8eaadd7 | 30,454 |
def _flip_dict_keys_and_values(d):
"""Switches the keys and values of a dictionary. The input dicitonary is not modified.
Output:
dict
"""
output = {}
for key, value in d.items():
output[value] = key
return output | b861fc3bd194d26ee05b9a56faad3394939064bf | 30,455 |
from typing import Tuple
from typing import Optional
def set_dative_bonds(
mol: Chem.rdchem.Mol, from_atoms: Tuple[int, int] = (7, 8)
) -> Optional[Chem.rdchem.Mol]:
"""Replaces some single bonds between metals and atoms with atomic numbers in fromAtoms
with dative bonds. The replacement is only done if the atom has "too many" bonds.
Arguments:
mol: molecule with bond to modify
from_atoms: List of atoms (symbol or atomic number) to consider for bond replacement.
By default, only Nitrogen (7) and Oxygen (8) are considered.
Returns:
The modified molecule.
"""
rwmol = Chem.RWMol(mol) # type: ignore
rwmol.UpdatePropertyCache(strict=False)
metals = [at for at in rwmol.GetAtoms() if is_transition_metal(at)]
for metal in metals:
for nbr in metal.GetNeighbors():
if (nbr.GetAtomicNum() in from_atoms or nbr.GetSymbol() in from_atoms) and (
nbr.GetExplicitValence() > PERIODIC_TABLE.GetDefaultValence(nbr.GetAtomicNum())
and rwmol.GetBondBetweenAtoms(nbr.GetIdx(), metal.GetIdx()).GetBondType()
== SINGLE_BOND
):
rwmol.RemoveBond(nbr.GetIdx(), metal.GetIdx())
rwmol.AddBond(nbr.GetIdx(), metal.GetIdx(), DATIVE_BOND)
return rwmol | 8e67732e7f10ac273e51a0ae1b3f6c3cff27b291 | 30,456 |
from typing import Optional
def _b2s(b: Optional[bool]) -> Optional[str]:
"""转换布尔值为字符串。"""
return b if b is None else str(b).lower() | 6030b7fd88b10c4bdccd12abd1f042c518e8a03f | 30,457 |
from typing import Set
def color_csq(all_csq: Set[str], mane_csq: Set[str]) -> str:
"""
takes the collection of all consequences, and MANE csqs
if a CSQ occurs on MANE, write in bold,
if non-MANE, write in red
return the concatenated string
NOTE: I really hate how I've implemented this
:param all_csq:
:param mane_csq:
:return: the string filling the consequence box in the HTML
"""
csq_strings = []
for csq in all_csq:
# bold, in Black
if csq in mane_csq:
csq_strings.append(STRONG_STRING.format(content=csq))
# bold, and red
else:
csq_strings.append(COLOR_STRING.format(color=COLORS['1'], content=csq))
return ', '.join(csq_strings) | 1e6792bf446799a4c4c22770c7abfc1718c88516 | 30,458 |
def hasattrs(object, *names):
"""
Takes in an object and a variable length amount of named attributes,
and checks to see if the object has each property. If any of the
attributes are missing, this returns false.
:param object: an object that may or may not contain the listed attributes
:param names: a variable amount of attribute names to check for
:return: True if the object contains each named attribute, false otherwise
"""
for name in names:
if not hasattr(object, name):
return False
return True | f3a2fc308d041ed0de79e3389e30e02660a1d535 | 30,459 |
def pano_stretch_image(pano_img, kx, ky, kz):
"""
Note that this is the inverse mapping, which refers to Equation 3 in HorizonNet paper (the coordinate system in
the paper is different from here, xz needs to be swapped)
:param pano_img: a panorama image, shape must be [h,w,c]
:param kx: stretching along left-right direction
:param ky: stretching along up-down direction
:param kz: stretching along front-back direction
:return:
"""
w = pano_img.shape[1]
h = pano_img.shape[0]
sin_lon, cos_lon, tan_lat = prepare_stretch(w, h)
n_lon = np.arctan2(sin_lon * kz / kx, cos_lon)
n_lat = np.arctan(tan_lat[..., None] * np.sin(n_lon) / sin_lon * kx / ky)
n_pu = lonlat2pixel(n_lon, w=w, axis=0, need_round=False)
n_pv = lonlat2pixel(n_lat, h=h, axis=1, need_round=False)
pixel_map = np.empty((h, w, 2), dtype=np.float32)
pixel_map[..., 0] = n_pu
pixel_map[..., 1] = n_pv
map1 = pixel_map[..., 0]
map2 = pixel_map[..., 1]
# using wrap mode because it is continues at left or right of panorama
new_img = cv2.remap(pano_img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_WRAP)
return new_img | f5b151e3e124e3333304b8ff6c217971fb10ba35 | 30,460 |
def blue_process(infile, masterbias=None, error=False, rdnoise=None, oscan_correct=False):
"""Process a blue frame
"""
# check to make sure it is a blue file
ccd = ccdproc.CCDData.read(infile, unit=u.adu)
try:
namps = ccd.header['CCDAMPS']
except KeyError:
namps = ccd.header['CCDNAMPS']
# reduce file
try:
blueamp = [ccd.header['AMPSEC'].strip()]
if oscan_correct:
bluescan = [ccd.header['BIASSEC'].strip()]
else:
bluescan = [None]
bluetrim = [ccd.header['DATASEC'].strip()]
#ugly hack for when two amps
if namps>1: raise Exception()
except:
blueamp = ['[1:1050,:]', '[1051:2100,:]']
if oscan_correct:
bluescan = ['[1:26,:]', '[1025:1050,:]']
else:
bluescan = [None, None]
bluetrim = ['[27:1050,:]', '[1:1024,:]']
flip = True
ccd = hrs_process(infile, ampsec=blueamp, oscansec=bluescan,
trimsec=bluetrim, masterbias=masterbias, error=error,
rdnoise=None, flip=flip)
#this is in place to deal with changes from one amp to two
if namps == 1:
ccd.data = ccd.data[:, ::-1]
if (ccd.mask is not None):
ccd.mask = ccd.mask[:, ::-1]
if (ccd.uncertainty is not None):
ccd.uncertainty = ccd.uncertainty[:, ::-1]
return ccd | c3b251a3ae99031b54e8dc5af4d5c95511f31c75 | 30,461 |
def _rand_sparse(m, n, density, format='csr'):
"""Helper function for sprand, sprandn"""
nnz = max(min(int(m*n*density), m*n), 0)
row = np.random.random_integers(low=0, high=m-1, size=nnz)
col = np.random.random_integers(low=0, high=n-1, size=nnz)
data = np.ones(nnz, dtype=float)
# duplicate (i,j) entries will be summed together
return sp.sparse.csr_matrix((data, (row, col)), shape=(m, n)) | 08221cdc9798e0ddf9266b5bf2ca3dfe21451278 | 30,462 |
import os
def return_filepath_or_absent(acl_id):
""" Forms a file path from the acl id, checks if it is present in the input directory,
and returns either the full file path or None (NaN)"""
filepath = '{}/{}.txt'.format(acl_filepath, acl_id)
if os.path.exists(filepath):
return filepath
else:
return None | 184d3498b1e47818fdbee7beb386dd2b5e807ff1 | 30,463 |
import random
def _generate_trace(distance):
"""
生成轨迹
:param distance:
:return:
"""
# 初速度
v = 0
# 位移/轨迹列表,列表内的一个元素代表0.02s的位移
tracks_list = []
# 当前的位移
current = 0
while current < distance - 3:
# 加速度越小,单位时间的位移越小,模拟的轨迹就越多越详细
a = random.randint(10000, 12000) # 加速运动
# 初速度
v0 = v
t = random.randint(9, 18)
s = v0 * t / 1000 + 0.5 * a * ((t / 1000) ** 2)
# 当前的位置
current += s
# 速度已经达到v,该速度作为下次的初速度
v = v0 + a * t / 1000
# 添加到轨迹列表
if current < distance:
tracks_list.append(round(current))
# 减速慢慢滑
if round(current) < distance:
for i in range(round(current) + 1, distance + 1):
tracks_list.append(i)
else:
for i in range(tracks_list[-1] + 1, distance + 1):
tracks_list.append(i)
y_list = []
zy = 0
for j in range(len(tracks_list)):
y = random.choice(
[0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
-1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0])
zy += y
y_list.append(zy)
j += 1
base_y = str(-random.randint(330, 350))
trace = [['0', base_y], ['0', base_y], ['0', base_y]]
for index, x in enumerate(tracks_list):
trace.append([str(x), str(y_list[index])])
t_last = trace[-1]
for _ in range(random.randint(4, 6)):
trace.append(t_last)
return trace | 6ce298de2a1977c7662f83346e7554c546b41131 | 30,464 |
import sqlite3
def update_nt_uid_acc(cachepath, uid, accession):
"""Update nt UID GenBank accession."""
# Path must be string, not PosixPath, in Py3.6
conn = sqlite3.connect(str(cachepath))
results = []
with conn:
cur = conn.cursor()
cur.execute(SQL_UPDATE_UID_ACC, (accession, uid))
results.append(cur.fetchone())
return results | 45c9514ffeca9281269c8f27ac09b3b863de2eff | 30,465 |
def bool_env(env_val):
""" check for boolean values """
if env_val:
if env_val in TRUE_LIST:
return True
if env_val in FALSE_LIST:
return False
# print("Return:%s" % env_val)
return env_val
else:
if env_val in FALSE_LIST:
return False
# print("Returning:%s" % env_val)
return | afb9d2f35c6469a1fd6f32250376b94a05db1de0 | 30,466 |
import json
def try_parse_json(json_):
"""Converts the string representation of JSON to JSON.
:param str json_: JSON in str representation.
:rtype: :class:`dict` if converted successfully, otherwise False.
"""
if not json_:
return False
try:
return json.loads(json_)
except ValueError:
return False | 077819cf82e307aacf3e56b11fbba26a79559968 | 30,467 |
def svn_ra_get_file(*args):
"""
svn_ra_get_file(svn_ra_session_t session, char path, svn_revnum_t revision,
svn_stream_t stream, apr_pool_t pool) -> svn_error_t
"""
return _ra.svn_ra_get_file(*args) | 2e887ace5ed3538ac7f3e401be9fa71ddfc100cc | 30,468 |
def version_microservices(full=True):
"""
Display Zoomdata microservice packages version.
CLI Example:
full : True
Return full version. If set False, return only short version (X.Y.Z).
.. code-block:: bash
salt '*' zoomdata.version_microservices
"""
ms_version = ''
ms_pkgs = list_pkgs_microservices()
for pkg in ms_pkgs:
# pylint: disable=undefined-variable
ms_version = __salt__['pkg.version'](pkg)
if not full:
return ms_version.split('-')[0]
break
return ms_version | aff0ece640e33e2d880c3e4a13ad7e13911aaa68 | 30,469 |
import os
def permissions(file):
"""
Returns the permissions for a given file.
"""
octal = {'0': 'no permission', '1': 'execute', '2': 'write', '3': 'write and execute', '4': 'read', '5': 'read and execute', '6': 'read and write', '7': 'read, write and execute'}
permissions = {}
correct_path = get_correct_path(file)
if os.path.exists(correct_path):
octal_permissions = permissions_in_oct(file)
owner = octal_permissions[2]
group = octal_permissions[3]
others = octal_permissions[4]
permissions['owner'] = octal[owner]
permissions['group'] = octal[group]
permissions['others'] = octal[others]
else:
permissions = {'information': 'Unable to get permissions'}
return(permissions) | a304f2549e41afb6c5b24cf845da689fdd62f99a | 30,470 |
import os
def gatk_variant_recalibrator(job,
mode,
vcf,
ref_fasta, ref_fai, ref_dict,
annotations,
hapmap=None, omni=None, phase=None, dbsnp=None, mills=None,
max_gaussians=4,
unsafe_mode=False):
"""
Runs either SNP or INDEL variant quality score recalibration using GATK VariantRecalibrator. Because the VQSR method
models SNPs and INDELs differently, VQSR must be run separately for these variant types.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str mode: Determines variant recalibration mode (SNP or INDEL)
:param str vcf: FileStoreID for input VCF file
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param list[str] annotations: List of GATK variant annotations to filter on
:param str hapmap: FileStoreID for HapMap resource file, required for SNP VQSR
:param str omni: FileStoreID for Omni resource file, required for SNP VQSR
:param str phase: FileStoreID for 1000G resource file, required for SNP VQSR
:param str dbsnp: FilesStoreID for dbSNP resource file, required for SNP and INDEL VQSR
:param str mills: FileStoreID for Mills resource file, required for INDEL VQSR
:param int max_gaussians: Number of Gaussians used during training, default is 4
:param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreID for the variant recalibration table, tranche file, and plots file
:rtype: tuple
"""
mode = mode.upper()
inputs = {'genome.fa': ref_fasta,
'genome.fa.fai': ref_fai,
'genome.dict': ref_dict,
'input.vcf': vcf}
# Refer to GATK documentation for description of recommended parameters:
# https://software.broadinstitute.org/gatk/documentation/article?id=1259
# https://software.broadinstitute.org/gatk/documentation/article?id=2805
# This base command includes parameters for both INDEL and SNP VQSR.
command = ['-T', 'VariantRecalibrator',
'-R', 'genome.fa',
'-input', 'input.vcf',
'-tranche', '100.0',
'-tranche', '99.9',
'-tranche', '99.0',
'-tranche', '90.0',
'--maxGaussians', str(max_gaussians),
'-recalFile', 'output.recal',
'-tranchesFile', 'output.tranches',
'-rscriptFile', 'output.plots.R']
# Parameters and resource files for SNP VQSR.
if mode == 'SNP':
command.extend(
['-resource:hapmap,known=false,training=true,truth=true,prior=15.0', 'hapmap.vcf',
'-resource:omni,known=false,training=true,truth=true,prior=12.0', 'omni.vcf',
'-resource:dbsnp,known=true,training=false,truth=false,prior=2.0', 'dbsnp.vcf',
'-resource:1000G,known=false,training=true,truth=false,prior=10.0', '1000G.vcf',
'-mode', 'SNP'])
inputs['hapmap.vcf'] = hapmap
inputs['omni.vcf'] = omni
inputs['dbsnp.vcf'] = dbsnp
inputs['1000G.vcf'] = phase
# Parameters and resource files for INDEL VQSR
elif mode == 'INDEL':
command.extend(
['-resource:mills,known=false,training=true,truth=true,prior=12.0', 'mills.vcf',
'-resource:dbsnp,known=true,training=false,truth=false,prior=2.0', 'dbsnp.vcf',
'-mode', 'INDEL'])
inputs['mills.vcf'] = mills
inputs['dbsnp.vcf'] = dbsnp
else:
raise ValueError('Variant filter modes can be SNP or INDEL, got %s' % mode)
for annotation in annotations:
command.extend(['-an', annotation])
if unsafe_mode:
command.extend(['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'])
# Delay reading in files until function is configured
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
job.fileStore.logToMaster('Running GATK VariantRecalibrator on {mode}s using the following annotations:\n'
'{annotations}'.format(mode=mode, annotations='\n'.join(annotations)))
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
recal_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.recal'))
tranches_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.tranches'))
plots_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.plots.R'))
return recal_id, tranches_id, plots_id | 5de437d43b2e146c191be2baf06e0e552b241638 | 30,471 |
import os
def generate_times(fnames, sat_id, freq='1S'):
"""Construct list of times for simulated instruments
Parameters
----------
fnames : (list)
List of filenames. Currently, only the first is used. Does not
support multi-file days as of yet.
sat_id : (str or NoneType)
Instrument satellite ID (accepts '' or a number (i.e., '10'), which
specifies the number of data points to include in the test instrument)
freq : string
Frequency of temporal output, compatible with pandas.date_range
[default : '1S']
Outputs
-------
uts : (array)
Array of integers representing uts for a given day
index : (DatetimeIndex)
The DatetimeIndex to be used in the pysat test instrument objects
date : (datetime)
The requested date reconstructed from the fake file name
"""
# TODO: Expand for multi-file days
# grab date from filename
parts = os.path.split(fnames[0])[-1].split('-')
yr = int(parts[0])
month = int(parts[1])
day = int(parts[2][0:2])
date = pysat.datetime(yr, month, day)
# Create one day of data at desired frequency
index = pds.date_range(start=date, end=date+pds.DateOffset(seconds=86399),
freq=freq)
# Allow numeric string to select first set of data
try:
index = index[0:int(sat_id)]
except ValueError:
# non-integer sat_id produces ValueError
pass
uts = index.hour*3600 + index.minute*60 + index.second
return uts, index, date | 9d033868167544053b9e0f669838a29442338f9b | 30,472 |
def KGCOVID19(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "current", **kwargs
) -> Graph:
"""Return kg-covid-19 graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "current"
Version to retrieve
The available versions are:
- 20200925
- 20200927
- 20200929
- 20201001
- 20201012
- 20201101
- 20201202
- 20210101
- 20210128
- 20210201
- 20210218
- 20210301
- 20210412
- 20210725
- 20210726
- 20210727
- 20210823
- 20210902
- 20211002
- 20211102
- 20211202
- 20220102
- 20220202
- 20220217
- 20220223
- 20220225
- 20220228
- 20220328
- 20220330
- 20220402
- 20220502
- current
References
----------
Please cite:
```bib
@article{reese2021kg,
title={KG-COVID-19: a framework to produce customized knowledge graphs for COVID-19 response},
author={Reese, Justin T and Unni, Deepak and Callahan, Tiffany J and Cappelletti, Luca and Ravanmehr, Vida and Carbon, Seth and Shefchek, Kent A and Good, Benjamin M and Balhoff, James P and Fontana, Tommaso and others},
journal={Patterns},
volume={2},
number={1},
pages={100155},
year={2021},
publisher={Elsevier}
}
```
"""
return AutomaticallyRetrievedGraph(
"KGCOVID19", version, "kghub", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)() | 24e419d80cd9634f5dab352f37db1fd6a19661d2 | 30,473 |
def _cryptodome_encrypt(cipher_factory, plaintext, key, iv):
"""Use a Pycryptodome cipher factory to encrypt data.
:param cipher_factory: Factory callable that builds a Pycryptodome Cipher
instance based on the key and IV
:type cipher_factory: callable
:param bytes plaintext: Plaintext data to encrypt
:param bytes key: Encryption key
:param bytes IV: Initialization vector
:returns: Encrypted ciphertext
:rtype: bytes
"""
encryptor = cipher_factory(key, iv)
return encryptor.encrypt(plaintext) | 99774fe7d4e4783af68e291213823096b9c2ac50 | 30,474 |
def is_gh_online():
"""
Check if GitHub is online.
The different services of GitHub are running in seperat services
and thus just being GitHub online does not mean,
that required parts are online.
"""
return _is_online("github.com", "/", 200, "OK") | 8c5dc7090f9d851e5b5c303ffa376da5f926202a | 30,475 |
import math
def get_items_with_pool(
source_key: str, count: int, start_index: int = 0, workers: int = 4
) -> Items:
"""Concurrently reads items from API using Pool
Args:
source_key: a job or collection key, e.g. '112358/13/21'
count: a number of items to retrieve
start_index: an index to read from
workers: the number of separate processors to get data in
Returns:
A list of items
"""
active_connections_limit = 10
processes_count = min(max(helpers.cpus_count(), workers), active_connections_limit)
batch_size = math.ceil(count / processes_count)
items = []
with Pool(processes_count) as p:
results = p.starmap(
partial(get_items, source_key, batch_size, child=True),
zip([i for i in range(start_index, start_index + count, batch_size)]),
)
for items_batch in results:
items.extend(items_batch)
return items | cbf07015872fc72bfa0e70be5c6a4553e5bef363 | 30,476 |
import torch
def batch_data(words, sequence_length, batch_size):
"""
Batch the neural network data using DataLoader
:param words: The word ids of the TV scripts
:param sequence_length: The sequence length of each batch
:param batch_size: The size of each batch; the number of sequences in a batch
:return: DataLoader with batched data
"""
# TODO: Implement function
num_features = len(words) - sequence_length
train_x = np.zeros((num_features, sequence_length), dtype = int)
train_y = np.zeros(num_features)
for i in range(num_features):
train_x[i] = words[i:i+sequence_length]
train_y[i] = words[i+sequence_length]
data = TensorDataset(torch.from_numpy(np.array(train_x, np.int64)), torch.from_numpy(np.array(train_y,np.int64)))
dataloader = DataLoader(data, batch_size = batch_size, shuffle = True)
return dataloader | 871ecfbcec2d142074bf2341fb0fb51c74c75227 | 30,477 |
from typing import Tuple
def calc_portfolio_holdings(initial_investment: int, weights: pd.DataFrame, prices: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Calculate the initial portfolio holdings given am amount of cash to invest.
:param initial_investment: The initial investment used to purchase the portfolio (no partial shares)
:param weights: a data frame containing the the weights for each asset as symbol: fraction
:param prices: the share prices
:return: the dollar value of the share holdings and the number of shares
"""
weights_np: np.array = np.zeros(weights.shape[1])
prices_np: np.array = np.zeros(weights.shape[1])
for ix, col in enumerate(weights.columns):
weights_np[ix] = weights[col]
prices_np[ix] = prices[col]
budget_np = weights_np * float(initial_investment)
shares = budget_np // prices_np
holdings = shares * prices_np
holdings_df: pd.DataFrame = pd.DataFrame(holdings).transpose()
holdings_df.columns = weights.columns
shares_df: pd.DataFrame = pd.DataFrame(shares).transpose()
shares_df.columns = weights.columns
return holdings_df, shares_df | 7adf46894b27c679eec16f94a80bcad3c7539c88 | 30,478 |
def ford_fulkerson(G, s, t, capacity='capacity'):
"""Find a maximum single-commodity flow using the Ford-Fulkerson
algorithm.
This is the legacy implementation of maximum flow. See Notes below.
This algorithm uses Edmonds-Karp-Dinitz path selection rule which
guarantees a running time of `O(nm^2)` for `n` nodes and `m` edges.
Parameters
----------
G : NetworkX graph
Edges of the graph are expected to have an attribute called
'capacity'. If this attribute is not present, the edge is
considered to have infinite capacity.
s : node
Source node for the flow.
t : node
Sink node for the flow.
capacity : string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
Returns
-------
R : NetworkX DiGraph
The residual network after computing the maximum flow. This is a
legacy implementation, se Notes and Examples.
Raises
------
NetworkXError
The algorithm does not support MultiGraph and MultiDiGraph. If
the input graph is an instance of one of these two classes, a
NetworkXError is raised.
NetworkXUnbounded
If the graph has a path of infinite capacity, the value of a
feasible flow on the graph is unbounded above and the function
raises a NetworkXUnbounded.
See also
--------
:meth:`maximum_flow`
:meth:`minimum_cut`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
Notes
-----
This is a legacy implementation of maximum flow (before 1.9).
This function used to return a tuple with the flow value and the
flow dictionary. Now it returns the residual network resulting
after computing the maximum flow, in order to follow the new
interface to flow algorithms introduced in NetworkX 1.9.
Note however that the residual network returned by this function
does not follow the conventions for residual networks used by the
new algorithms introduced in 1.9. This residual network has edges
with capacity equal to the capacity of the edge in the original
network minus the flow that went throught that edge. A dictionary
with infinite capacity edges can be found as an attribute of the
residual network.
Examples
--------
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_edge('x','a', capacity=3.0)
>>> G.add_edge('x','b', capacity=1.0)
>>> G.add_edge('a','c', capacity=3.0)
>>> G.add_edge('b','c', capacity=5.0)
>>> G.add_edge('b','d', capacity=4.0)
>>> G.add_edge('d','e', capacity=2.0)
>>> G.add_edge('c','y', capacity=2.0)
>>> G.add_edge('e','y', capacity=3.0)
This function returns the residual network after computing the
maximum flow. This network has graph attributes that contain:
a dictionary with edges with infinite capacity flows, the flow
value, and a dictionary of flows:
>>> R = nx.ford_fulkerson(G, 'x', 'y')
>>> # A dictionary with infinite capacity flows can be found as an
>>> # attribute of the residual network
>>> inf_capacity_flows = R.graph['inf_capacity_flows']
>>> # There are also attributes for the flow value and the flow dict
>>> flow_value = R.graph['flow_value']
>>> flow_dict = R.graph['flow_dict']
You can use the interface to flow algorithms introduced in 1.9 to get
the output that the function ford_fulkerson used to produce:
>>> flow_value, flow_dict = nx.maximum_flow(G, 'x', 'y',
... flow_func=nx.ford_fulkerson)
"""
flow_value, R = ford_fulkerson_impl(G, s, t, capacity=capacity)
flow_dict = _create_flow_dict(G, R, capacity=capacity)
R.graph['flow_value'] = flow_value
R.graph['flow_dict'] = flow_dict
R.graph['algorithm'] = 'ford_fulkerson_legacy'
return R | dd8e8e351829feb98cd144f4148f8c9bf62cb239 | 30,479 |
def index():
"""
Show the main page of Stream4Flow
:return: Empty dictionary
"""
# Do not save the session
session.forget(response)
return dict() | 531c10ca17406086fcf14a5dfcdcecce5cc60119 | 30,480 |
from typing import Type
def create_temporary_table_sql(model: Type[Model]) -> str:
"""
Get the SQL required to represent the given model in the database as a
temporary table.
We cache the results as this will be called for each request, but the model
should never change (outside of tests), so we can use a very small cache.
"""
# Need to use _meta, so disable protected property access checks
# pylint: disable=protected-access
# For each field, generate the required SQL to add that field to the table
definition = ", ".join(
_column_sql(field)
for field in model._meta.get_fields() # noqa
if isinstance(field, Field)
)
sql = f'CREATE TEMPORARY TABLE "{model._meta.db_table}" ({definition})'
return sql | 3074b4e6bb0c9147faa9da3243d0d66a3fee7517 | 30,481 |
def field_paths(h5, key='externalFieldPath'):
"""
Looks for the External Fields
"""
if key not in h5.attrs:
return []
fpath = h5.attrs[key].decode('utf-8')
if '%T' not in fpath:
return [fpath]
path1 = fpath.split('%T')[0]
tlist = list(h5[path1])
paths = [path1+t for t in tlist]
return paths | 578e1a2d0971a94afa665f368e9b72c8f6e449d3 | 30,482 |
def get_quantifier(ch, input_iter):
"""
Parse a quantifier from the input, where "ch" is the first character in the
quantifier.
Return the minimum number of occurrences permitted by the quantifier and
either None or the next character from the input_iter if the next character
is not part of the quantifier.
"""
if ch in '*?+':
try:
ch2, escaped = next(input_iter)
except StopIteration:
ch2 = None
if ch2 == '?':
ch2 = None
if ch == '+':
return 1, ch2
return 0, ch2
quant = []
while ch != '}':
ch, escaped = next(input_iter)
quant.append(ch)
quant = quant[:-1]
values = ''.join(quant).split(',')
# Consume the trailing '?', if necessary.
try:
ch, escaped = next(input_iter)
except StopIteration:
ch = None
if ch == '?':
ch = None
return int(values[0]), ch | 36dea445aa416be79e86bb1e7c6f9dbe454c6c2a | 30,483 |
from typing import List
def get_defined_vars(
operation: "OperationDefinitionNode",
) -> List["VariableNode"]:
"""
Retrieve a list of VariableNode defined inside the variableDefinitionNode list of an OperationDefinitionNode
:param operation: the operation definition node to look through
:type operation: "OperationDefinitionNode"
:return: The List of VariableNode that was buried in the list of VariableDefinitionNode of the given OperationDefinitionNode
:rtype: List["VariableNode"]
"""
return [x.variable for x in operation.variable_definitions] | f39cd2b205bdfba5347884e9b675949e08877a5f | 30,484 |
def snv(img):
"""
standard normal variates (SNV) transformation of spectral data
"""
mean = np.mean(img, axis=0)
std = np.std(img, axis=0)
return (img - mean[np.newaxis, ...])/std[np.newaxis, ...] | 63c549f1e319ab4cc4b4beb4ea602b136d71168f | 30,485 |
def fredkin(cell: int, live_count: int, neighbors: Neighbors = None) -> int:
"""\"Fredkin\" Game of Life rule
This rule can be specified using these strings:
- ``B1357/S02468``
- ``2468/1357``
- ``fredkin``
Parameters
----------
cell: int
Value of the current cell. Can be ``1`` (alive) or ``0`` (dead)
live_count: int
Count of cells alive (``1``) around the current cell.
neighbors: Iterator[Tuple[int, int, int]], optional
Iterator yielding the value, the x- and the y-coordinate of the
individual neighbors. This parameters might only be required by very few
rules and is present in every game rule for consistency.
Returns
-------
int
Computed value of the current cell. Can be ``1`` (alive) or ``0`` (dead).
Notes
-----
The value of ``live_count`` depends on the type of neighborhood you use.
PyGoL uses the Moore neighborhood by default. See the LifeWiki for more
information on types of neighborhood:
https://www.conwaylife.com/wiki/Cellular_automaton#Common_dimensions_and_neighborhoods
References
----------
Find this rule in the LifeWiki:
https://www.conwaylife.com/wiki/OCA:Replicator#Replicator_2
"""
if (live_count + cell) % 2 == 1:
return 1
return 0 | ef08d99fa90c6d615bf3aabd6a4fe72903ecfb62 | 30,486 |
def mad(arr):
""" Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
arr = np.ma.array(arr).compressed() # should be faster to not use masked arrays.
med = np.median(arr)
return np.median(np.abs(arr - med)) | f02919fcb082c815602d8b57cb59d5c71cb6a219 | 30,487 |
def get_logs_directory():
"""Return path of logs directory"""
LDAModel_directory = get_LDAModel_directory()
logs_directory = LDAModel_directory / 'logs'
if not logs_directory.is_dir():
create_directory(logs_directory)
return logs_directory | eb37ca64a07280d55584ac0146b6babc02051b3d | 30,488 |
def svn_ra_rev_proplist(*args):
"""
svn_ra_rev_proplist(svn_ra_session_t session, svn_revnum_t rev, apr_hash_t props,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_rev_proplist, args) | 2f6c1f25f6b62d1306aa746dc0dc04f39370aa0e | 30,489 |
from re import DEBUG
def gen_poly_model(XVALS, DATA_FLAT, degree: int = 15, debug: bool = DEBUG):
"""Polynomial fit model for data calculation. Uses np.polynomial.polyfit.
Args:
XVALS (array_like): one-dimensional array of x-values of data.
DATA_FLAT (array_like): one-dimensional array of y-values of data.
# Note: zip(XVALS, DATA_FLAT) = two-dimensional array in form (x, y) of data
degree (int): the degree of the polynomial.
debug (bool): show all print() statements.
Returns:
A dict with the following data:
{
"predictions" (np.array): map of the data's predictions to the x-values,
"equation" (string): a textual approximation of the equation, potentially rounded,
"coefficients" (np.array): exact coefficients of the equation,
"degree" (int): the exact degree of the equation, identical to args["input"],
"model" (func): takes input x-vals and returns predictions
}
Raises:
None
"""
# calculate polynomial fit
print = _print if debug else blank
COEFFS = poly.polyfit(XVALS, DATA_FLAT, degree)
print("Coeffs:", COEFFS)
terms = []
for index, weight in enumerate(COEFFS):
coefficient = utils.format_float_exponent(weight)
terms.append(coefficient + "x" + utils.EXPONENT +
utils.BRACKETS[0] + str(index) + utils.BRACKETS[1])
EQN = " + ".join(terms)
print("Generated equation:", EQN)
# generate model
def model(XVALS):
return poly.polyval(XVALS, COEFFS)
# predict results
PREDICTIONS = model(XVALS)
# should be theoretically equal - let's check it!
DIFF = np.subtract(np.around(PREDICTIONS), DATA_FLAT)
print(DIFF)
for index, value in enumerate(DIFF):
if value:
print("DIFF [%d]: %d" % (index, value))
return {
"predictions": PREDICTIONS,
"equation": EQN,
"coefficients": COEFFS,
"degree": degree,
"model": model
} | 1372aae35ad5999c40dd99a65cb8d8e65bd9c6cb | 30,490 |
from datetime import datetime
def parse_date(s):
"""
Given a string matching the 'full-date' production above, returns
a datetime.date instance. Any deviation from the allowed format
will produce a raised ValueError.
>>> parse_date("2008-08-24")
datetime.date(2008, 8, 24)
>>> parse_date(" 2008-08-24 ")
datetime.date(2008, 8, 24)
>>> parse_date("2008-08-00")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 134, in parse_date
return datetime.date(int(y), int(m), int(d))
ValueError: day is out of range for month
>>> parse_date("2008-06-31")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 134, in parse_date
return datetime.date(int(y), int(m), int(d))
ValueError: day is out of range for month
>>> parse_date("2008-13-01")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 134, in parse_date
return datetime.date(int(y), int(m), int(d))
ValueError: month must be in 1..12
>>> parse_date("22008-01-01")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 136, in parse_date
raise ValueError('Invalid RFC 3339 date string', s)
ValueError: ('Invalid RFC 3339 date string', '22008-01-01')
>>> parse_date("2008-08-24").isoformat()
'2008-08-24'
"""
m = date_re.match(s)
if m:
(y, m, d) = m.groups()
return datetime.date(int(y), int(m), int(d))
else:
raise ValueError('Invalid RFC 3339 date string', s) | d21ab8b08d52bf155d5e8af192f19036307568b5 | 30,491 |
def setup_config(quiz_name):
"""Updates the config.toml index and dataset field with the formatted
quiz_name. This directs metapy to use the correct files
Keyword arguments:
quiz_name -- the name of the quiz
Returns:
True on success, false if fials to open file
"""
try:
conf_file = open("config.toml", 'r')
lines = conf_file.readlines()
conf_file.close()
for i in range(len(lines)):
if lines[i].startswith("index"):
lines[i] = "index = 'idx-{0}'\n".format(quiz_name.replace(" ", "_"))
if lines[i].startswith("dataset"):
lines[i] = "dataset = '{0}'\n".format(quiz_name.replace(" ", "_"))
conf_file = open("config.toml", 'w')
with conf_file:
conf_file.writelines(lines)
except Exception as e:
print(e)
return False
return True | 28aba9399926f27da89953c8b0c6b41d95a12d96 | 30,492 |
def unitary_connection(h_pre, h_post, n_pre, n_post, X):
"""
Gives the connectivity value between the n_pre unit
in the h_pre hypercolumn and the n_post unit in the h_post column
"""
hits_pre = X[:, h_pre] == n_pre
hits_post = X[:, h_post] == n_post
return np.sum(hits_pre * hits_post) | d272b92abfed50d7f89c2d4b40ff3c4e5a417d5e | 30,493 |
def mase(y, y_hat, y_train, seasonality=1):
"""Calculates the M4 Mean Absolute Scaled Error.
MASE measures the relative prediction accuracy of a
forecasting method by comparinng the mean absolute errors
of the prediction and the true value against the mean
absolute errors of the seasonal naive model.
Parameters
----------
y: numpy array
actual test values
y_hat: numpy array
predicted values
y_train: numpy array
actual train values for Naive1 predictions
seasonality: int
main frequency of the time series
Hourly 24, Daily 7, Weekly 52,
Monthly 12, Quarterly 4, Yearly 1
Return
------
scalar: MASE
"""
scale = np.mean(abs(y_train[seasonality:] - y_train[:-seasonality]))
mase = np.mean(abs(y - y_hat)) / scale
mase = 100 * mase
return mase | 7373ef660ae9784ecd83a83457c143debb721685 | 30,494 |
def _resize_along_axis(inputs, size, axis, **kwargs):
""" Resize 3D input tensor to size along just one axis. """
except_axis = (axis + 1) % 3
size, _ = _calc_size_after_resize(inputs, size, axis)
output = _resize_except_axis(inputs, size, except_axis, **kwargs)
return output | a7cc206171ffe1cf6df22da3756cefcc6c5fcd86 | 30,495 |
def getTournamentMatches(tourneyId):
"""
Return a dictionary from match id to match data for a tournament.
"""
if tourneyId not in matchDatas:
refreshMatchIndex(tourneyId)
return matchDatas[tourneyId] | a6d5386e9034b126405aedee75ba36b1718c3dc9 | 30,496 |
def compute_protien_mass(protien_string):
"""
test case
>>> compute_protien_mass('SKADYEK')
821.392
"""
p={'A':'71.03711','C':'103.00919','D':'115.02694','E':'129.04259','F':'147.06841','G':'57.02146','H':'137.05891','I':'113.08406','K':'128.09496','L':'113.08406','M':'131.04049','N':'114.04293','P':'97.05276','Q':'128.05858','R':'156.10111','S':'87.03203','T':'101.04768','V':'99.06841','W':'186.07931','Y':'163.06333'}
mass=0
for x in protien_string:
mass=mass+float(p[x])
#to change number of values after decimel point to 3
mass=round(mass,3)
return mass | 86a3ffd0ce3e95fcdf6d510d2865b35aeb93d779 | 30,497 |
import logging
def find_duration(data):
"""Finds the duration of the ECG data sequence
Finds the duration by looking at the last time value
as the first value is always at time = 0 seconds
:param data: 2D array of time sequences and voltage sequences
:return: Time duration of data sequence
"""
logging.info("Detecting Duration of Data Stream...\n")
return data[:, 0][-1] | e65135457e23886c402e0671d720fe9c5ed257a1 | 30,498 |
def response(data, **kwd):
"""Returns a http response"""
return HttpResponse(data, **kwd) | 8d88295751ee5f53f99ea8a134d01e1df7cb1fd5 | 30,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.