content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def canonicalize_path(cwd, path, debug):
"""Given a path composed by concatenating two or more parts,
clean up and canonicalize the path."""
# // => /
# foo/bar/../whatever => foo/whatever [done]
# foo/bar/./whatever => foo/whatever [done]
# /foo/bar => /foo/bar [done]
# foo/bar => cwd/foo/bar [done]
# <empty_path> => cwd [done]
# Since we construct cwd from a node_id now, it always ends in /,
# so trim off the last empty string in cwd_parts
cwd_parts = cwd.split('/')[:-1]
path_parts = path.split('/')
new = path_parts if path and path[0] == '/' else cwd_parts + path_parts
if debug:
print("# canonicalize_path(cwd: '" + cwd \
+ "', path: '" + path + "')")
print("# cwd_parts: " + str(cwd_parts))
print("# path_parts: " + str(path_parts))
print("# new: '" + str(new) + "'")
# Now we will do some canonicalization ...
while '..' in new:
where = new.index('..')
new = new[:where-1] + new[where+1:] if where >= 2 else new[where+1:]
while '.' in new:
where = new.index('.')
new = new[:where] + new[where+1:] if where >= 1 else new[where+1:]
# Get rid of trailing slashes
while new and new[-1] == "":
new = new[:-1]
# Get rid of double slashes (an empty string in the middle of new)
while '' in new[1:-1]:
where = new[1:-1].index('')
new = new[:where+1] + new[where+2:]
# Make sure it's not empty
if new and new[0] != '':
new.insert(0, "")
new_path = '/'.join(new)
if not new_path:
new_path = '/'
if debug:
print("# new: '" + str(new) + "'")
print("new_path: '" + new_path + "'")
return new_path
|
f8714f93106dcb3330c2f0df635aae5975f68abf
| 3,646,500
|
import click
import functools
import sys
def context_command(func):
"""
Base options for jobs that can override context variables on the command
line.
The command receives a *context_overrides* argument, a dict ready to be
deep merged in templates contexts.
"""
@click.option('--context', '-c', 'context_vars', multiple=True,
metavar='VAR=VALUE', help='Override context VAR with '
'VALUE; use --context multiple times to override multiple '
'variables. Use dots to target a nested variable: '
'foo.bar=baz')
@functools.wraps(func)
def wrapper(context_vars, **kwargs):
try:
context_overrides = parse_context_vars(context_vars)
except exceptions.MalformedContextVar as exc:
click.secho('Malformed context var in command-line: %s' % exc,
fg='red', bold=True)
click.secho('')
click.secho('Use PATH.TO.VAR=VALUE format.', fg='green')
sys.exit(1)
return func(context_overrides=context_overrides, **kwargs)
return wrapper
|
b140411121b814a732f6988be77c607811a148c6
| 3,646,501
|
def checksum(number):
"""Calculate the checksum. A valid number should have a checksum of 1."""
check = 0
for n in number:
check = (2 * check + int(10 if n == 'X' else n)) % 11
return check
|
8ada40ca46bc62bbe8f96d69528f2cd88021ad6a
| 3,646,502
|
def instanceof(value, type_):
"""Check if `value` is an instance of `type_`.
:param value: an object
:param type_: a type
"""
return isinstance(value, type_)
|
3de366c64cd2b4fe065f15de10b1e6ac9132468e
| 3,646,503
|
def step(y, t, dt):
""" RK2 method integration"""
n = y.shape[0]
buf_f0 = np.zeros((n, ndim+1))
buf_f1 = np.zeros((n, ndim+1))
buf_y1 = np.zeros((n, ndim+1))
buf_f0 = tendencies(y)
buf_y1 = y + dt * buf_f0
buf_f1 = tendencies(buf_y1)
Y = y + 0.5 * (buf_f0 + buf_f1) * dt
return Y
|
e3c946b37d96ad0083fc5cc7a8d84b2f03ca897b
| 3,646,504
|
import torch
import gc
def sample_deletes(graph_, rgb_img_features, xyz,
delete_scores, num_deletes, threshold,
gc_neighbor_dist, padding_config,
**kwargs):
"""Sample Deletes.
Args:
graph_: a torch_geometric.data.Batch instance with attributes:
- rgb: a [N x C_app] torch.FloatTensor of rgb features
- depth: a [N x 3 x H' x W'] torch.FloatTensor
- mask: a [N x 1 x H' x W'] torch.FloatTensor
- orig_masks: a [N x H x W] torch.FloatTensor of original masks
- crop_indices: a [N, 4] torch.LongTensor. xmin, ymin, xmax, ymax.
rgb_img_features: an OrderedDict of image features. Output of gc.extract_rgb_img_features()
xyz_img: a [3, H, W] torch.FloatTensor. 3D point cloud from camera frame of reference
delete_scores: a [N] torch.FloatTensor with values in [0, 1]. Output of
DeleteNetWrapper.delete_scores().
num_deletes: Maximum number of deletes allowed.
threshold: Minimum delete score required to consider the delete.
gc_neighbor_dist: Distance threshold for connecting nodes in new graph
padding_config: a Python dictionary with padding parameters.
Returns:
boolean of whether merge operation was successful.
a torch_geometric.data.Data instance.
"""
# Sort scores, consider only the ones above a certain threshold
sorted_scores, score_indices = torch.sort(delete_scores, descending=True)
num_potential_deletes = torch.sum(sorted_scores > threshold)
if num_potential_deletes == 0 and torch.all(~graph_.added): # Nothing to delete
return False, None
score_indices = score_indices[:num_potential_deletes]
delete_inds = torch.zeros(graph_.orig_masks.shape[0]).bool()
# Sample some masks to delete
leftover_delete_scores = delete_scores[score_indices]
leftover_delete_indices = score_indices
while torch.sum(delete_inds) < num_deletes and leftover_delete_indices.shape[0] > 0:
# Sample delete index
sample_idx = torch.multinomial(leftover_delete_scores, 1)
delete_idx = leftover_delete_indices[sample_idx][0]
delete_inds[delete_idx] = True
# Get leftover potential deletes
temp = torch.ones(leftover_delete_scores.shape[0]).bool()
temp[sample_idx] = False
leftover_delete_indices = leftover_delete_indices[temp]
leftover_delete_scores = leftover_delete_scores[temp]
# If the deleting only undoes the potential adds, consider the sampling to be a failure
if torch.all(delete_inds == graph_.added):
return False, None
# Keep the un-deleted masks
new_masks = graph_.orig_masks[~delete_inds]
# Create new graph
new_masks = new_masks[1:] # Get rid of BG mask
new_masks = util_.convert_mask_NHW_to_HW(new_masks.float(), start_label=constants.OBJECTS_LABEL)
new_graph = gc.construct_segmentation_graph(rgb_img_features, xyz, new_masks,
neighbor_dist=gc_neighbor_dist,
padding_config=padding_config)
return True, new_graph
|
3a24d3806e3e7aebf5ae6d2c7141149358d21607
| 3,646,505
|
def make_char(hex_val):
"""
Create a unicode character from a hex value
:param hex_val: Hex value of the character.
:return: Unicode character corresponding to the value.
"""
try:
return unichr(hex_val)
except NameError:
return chr(hex_val)
|
edbbad92c56ec74ff28295c46dca4f2976768d0a
| 3,646,506
|
def normalize(features):
"""
Normalizes data using means and stddevs
"""
means, stddevs = compute_moments(features)
normalized = (np.divide(features, 255) - means) / stddevs
return normalized
|
3b4c07bf80e68ec3d6c807a9293aa5b4f4203401
| 3,646,507
|
def get_args_from_str(input: str) -> list:
"""
Get arguments from an input string.
Args:
input (`str`): The string to process.
Returns:
A list of arguments.
"""
return ARG_PARSE_REGEX.findall(input)
|
50de69e4ee60da31a219842ce09833a92218ea14
| 3,646,508
|
import os
def get_all_files(repo_root):
"""Get all files from in this repo."""
output = []
for root, _, files in os.walk(repo_root):
for f in files:
if f.lower().endswith(tuple(CPP_SUFFIXES + ['.py'])):
full_name = os.path.join(root, f)[len(repo_root) + 1:]
if not any(n in full_name
for n in ALL_FILES_BLACKLISTED_NAMES):
output.append(full_name)
return output
|
3a7cfcba087df93be74dd064d92f679cc987b714
| 3,646,509
|
from typing import List
def simulate(school: List[int], days: int) -> int:
"""Simulates a school of fish for ``days`` and returns the number of fish."""
school = flatten_school(school)
for day in range(1, days + 1):
school = simulate_day(school)
return sum(school)
|
efcfbfdde9c3fc941a40028459ddc35db0653296
| 3,646,510
|
import torch
def SPTU(input_a, input_b, n_channels: int):
"""Softplus Tanh Unit (SPTU)"""
in_act = input_a+input_b
t_act = torch.tanh(in_act[:, :n_channels, :])
s_act = torch.nn.functional.softplus(in_act[:, n_channels:, :])
acts = t_act * s_act
return acts
|
a03cc114cf960af750b13cd61db8f4d2e6c064ad
| 3,646,511
|
def is_fouling_team_in_penalty(event):
"""Returns True if fouling team over the limit, else False"""
fouls_to_give_prior_to_foul = event.previous_event.fouls_to_give[event.team_id]
return fouls_to_give_prior_to_foul == 0
|
ac1578af1092586a30b8fc9cdb3e5814da1f1544
| 3,646,512
|
import re
def is_img_id_valid(img_id):
"""
Checks if img_id is valid.
"""
t = re.sub(r'[^a-z0-9_:\-\.]', '', img_id, re.IGNORECASE)
t = re.sub(r'\.+', '.', t)
if img_id != t or img_id.count(':') != 1:
return False
profile, base_name = img_id.split(':', 1)
if not profile or not base_name:
return False
try:
get_profile_configs(profile)
except ValueError:
return False
return True
|
749a8830d1a932465ca0c9c8c3a18032e2dc357e
| 3,646,513
|
import warnings
def lmc(wave, tau_v=1, **kwargs):
""" Pei 1992 LMC extinction curve.
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:returns tau:
The optical depth at each wavelength.
"""
if (wave < 1e3).any():
warnings.warn('LMC: extinction extrapolation below 1000AA is poor')
mic = wave * 1e-4
aa = [175., 19., 0.023, 0.005, 0.006, 0.020]
ll = [0.046, 0.08, 0.22, 9.7, 18., 25.]
bb = [90., 5.50, -1.95, -1.95, -1.80, 0.00]
nn = [2.0, 4.5, 2.0, 2.0, 2.0, 2.0]
abs_ab = mic * 0.
norm_v = 0 # hack to go from tau_b to tau_v
mic_5500 = 5500 * 1e-4
for i, a in enumerate(aa):
norm_v += aa[i] / ((mic_5500 / ll[i])**nn[i] +
(ll[i] / mic_5500)**nn[i] + bb[i])
abs_ab += aa[i] / ((mic / ll[i])**nn[i] + (ll[i] / mic)**nn[i] + bb[i])
return tau_v * (abs_ab / norm_v)
|
04c89605e8ad4188c62b631e173a9c8fe714958a
| 3,646,514
|
def minMax(xs):
"""Calcule le minimum et le maximum d'un tableau de valeur xs (non-vide !)"""
min, max = xs[0], xs[0]
for x in xs[1:]:
if x < min:
min = x
elif x > max:
max = x
return min,max
|
8453b71e5b62592f38f4be84f4366fb02bd0171b
| 3,646,515
|
def events(request):
"""Events"""
# Get profile
profile = request.user.profile
# Get a QuerySet of events for this user
events = Event.objects.filter(user=request.user)
# Create a new paginator
paginator = Paginator(events, profile.entries_per_page)
# Make sure page request is an int, default to 1st page
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
# If page request is out of range, deliver last page of results
try:
events = paginator.page(page)
except (EmptyPage, InvalidPage):
events = paginator.page(paginator.num_pages)
# Render template
return render_page(
'thing/events.html',
{
'events': events,
'user': request.user
},
request,
)
|
3561856af65d2e54eb4f00a13ca85ece4c939b7a
| 3,646,516
|
from typing import Tuple
from typing import Callable
def latent_posterior_factory(x: np.ndarray, y: np.ndarray) -> Tuple[Callable]:
"""Factory function that yields further functions to compute the log-posterior
of the stochastic volatility model given parameters `x`. The factory also
constructs functions for the gradient of the log-posterior and the Fisher
information metric.
Args:
x: The stochastic volatilities.
y: Observations from the stochastic volatility model.
Returns:
log_posterior: Function to compute the log-posterior.
grad_log_posterior: Function to compute the gradient of the log-posterior.
metric: Function to compute the Fisher information metric.
grad_metric: Function to compute the gradient of the Fisher information
metric.
"""
T = x.size
def _log_posterior(sigma: float, phi: float, beta: float) -> float:
"""The log-posterior of the stochastic volatility model given the stochastic
volatilities. The inference is over the model parameters `sigma`, `phi`,
and `beta`.
Args:
sigma: Parameter of the stochastic volatility model.
phi: Parameter of the stochastic volatility model.
beta: Parameter of the stochastic volatility model.
Returns:
lp: The log-posterior of the stochastic volatility model.
"""
phisq = np.square(phi)
ly = spst.norm.logpdf(y, 0.0, beta*np.exp(0.5 * x)).sum()
lxo = spst.norm.logpdf(x[0], 0.0, sigma / np.sqrt(1.0 - phisq))
lx = spst.norm.logpdf(x[1:], phi*x[:-1], sigma).sum()
lp = ly + lx + lxo + log_prior(sigma, phi, beta)
return lp
def _grad_log_posterior_helper(gamma, alpha, beta, sigmasq, phi, phisq):
dpgamma, dpalpha, dpbeta = grad_log_prior(gamma, alpha, beta)
dbeta = (-T / beta
+ np.sum(np.square(y) / np.exp(x)) / np.power(beta, 3.0)
+ dpbeta)
dgamma = (
-T + np.square(x[0])*(1.0 - phisq) / sigmasq
+ np.sum(np.square(x[1:] - phi*x[:-1])) / sigmasq
+ dpgamma)
dalpha = (
-phi + phi*np.square(x[0])*(1.0 - phisq) / sigmasq
+ np.sum(x[:-1] * (x[1:] - phi*x[:-1])) * (1.0 - phisq) / sigmasq
+ dpalpha)
return np.array([dgamma, dalpha, dbeta])
def _metric_helper(gamma, alpha, beta, sigmasq, phi, phisq):
# Note that this ordering of the variables differs from that presented
# in the Riemannian manifold HMC paper.
G = np.array([
# gamma alpha beta
[ 2.0*T, 2.0*phi, 0.0], # gamma
[2.0*phi, 2.0*phisq + (T - 1.0)*(1.0 - phisq), 0.0], # alpha
[ 0.0, 0.0, 2.0 * T / np.square(beta)] # beta
])
# Add in the negative Hessian of the log-prior.
H = hess_log_prior(gamma, alpha, beta)
G -= H
return G
def _grad_metric_helper(gamma, alpha, beta, sigmasq, phi, phisq):
dGbeta = np.array([
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -4.0 * T / np.power(beta, 3.0)]
])
dGgamma = np.zeros((3, 3))
a = 2.0*(1.0 - phisq)
b = 2.0*phi*(3.0 - T)*(1.0 - phisq)
dGalpha = np.array([
[0.0, a, 0.0],
[ a, b, 0.0],
[0.0, 0.0, 0.0]
])
dG = np.array([dGgamma, dGalpha, dGbeta]).swapaxes(0, -1)
dH = grad_hess_log_prior(gamma, alpha, beta)
return dG - dH
def _grad_log_posterior(gamma: float, alpha: float, beta: float) -> np.ndarray:
"""The gradient log-posterior of the stochastic volatility model given the
stochastic volatilities with respect to the (transformed) parameters
`gamma`, `alpha`, and `beta`.
Args:
gamma: Transformed parameter `sigma` of the stochastic volatility model.
alpha: Transformed parameter `phi` of the stochastic volatility model.
beta: Parameter of the stochastic volatility model.
Returns:
dgamma: The gradient of the log-posterior with respect to the
transformed parameter `sigma`.
dalpha: The gradient of the log-posterior with respect to the
transformed parameter `phi`.
dbeta: The gradient of the log-posterior with respect to `beta`.
"""
sigma = np.exp(gamma)
sigmasq = np.square(sigma)
phi = np.tanh(alpha)
phisq = np.square(phi)
return _grad_log_posterior_helper(gamma, alpha, beta, sigmasq, phi, phisq)
def _metric(gamma: float, alpha: float, beta: float) -> np.ndarray:
"""The Fisher information metric of the stochastic volatility model given the
stochastic volatilities.
Args:
gamma: Transformed parameter of the stochastic volatility model.
alpha: Transformed parameter of the stochastic volatility model.
beta: Parameter of the stochastic volatility model.
Returns:
G: The Fisher information metric.
"""
sigma = np.exp(gamma)
sigmasq = np.square(sigma)
phi = np.tanh(alpha)
phisq = np.square(phi)
return _metric_helper(gamma, alpha, beta, sigmasq, phi, phisq)
def _grad_metric(gamma: float, alpha: float, beta: float) -> np.ndarray:
"""The gradient of the Fisher information metric of the stochastic volatility
model given the stochastic volatilities with respect to the `sigma`,
`alpha`, and `beta` parameters of the stochastic volatility model.
Args:
gamma: Transformed parameter of the stochastic volatility model.
alpha: Transformed parameter of the stochastic volatility model.
beta: Parameter of the stochastic volatility model.
Returns:
dG: The gradient of the Fisher information metric.
"""
sigma = np.exp(gamma)
sigmasq = np.square(sigma)
phi = np.tanh(alpha)
phisq = np.square(phi)
return _grad_metric_helper(gamma, alpha, beta, sigmasq, phi, phisq)
def grad_log_posterior_and_metric_and_grad_metric(q):
gamma, alpha, beta = q
sigma = np.exp(gamma)
sigmasq = np.square(sigma)
phi = np.tanh(alpha)
phisq = np.square(phi)
glp = _grad_log_posterior_helper(gamma, alpha, beta, sigmasq, phi, phisq)
G = _metric_helper(gamma, alpha, beta, sigmasq, phi, phisq)
dG = _grad_metric_helper(gamma, alpha, beta, sigmasq, phi, phisq)
return glp, G, dG
# Convert functions defined for separate arguments to take a vector
# concatenation of the parameter.
log_posterior = lambda q: _log_posterior(*inverse_transform(q)[0])
grad_log_posterior = lambda q: _grad_log_posterior(q[0], q[1], q[2])
metric = lambda q: _metric(q[0], q[1], q[2])
grad_metric = lambda q: _grad_metric(q[0], q[1], q[2])
return (
log_posterior, grad_log_posterior, metric, grad_metric,
grad_log_posterior_and_metric_and_grad_metric)
|
0fe2ec7a7fab480fbe19a374e71ac3ab5232d8e0
| 3,646,517
|
def update_build_configuration_set(id, **kwargs):
"""
Update a BuildConfigurationSet
"""
data = update_build_configuration_set_raw(id, **kwargs)
if data:
return utils.format_json(data)
|
ee02faf0d683e271747d6e30a3ef8ffd9c271e6c
| 3,646,518
|
from typing import Optional
def create_app(settings_override: Optional[dict]=None) -> Flask:
"""
Create a Flask app
:param settings_override: any settings to override
:return: flask app
"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_object('config.settings')
app.config.from_pyfile('settings.py', silent=True)
if settings_override:
app.config.update(settings_override)
configure_logging(app)
initialize_extensions(app)
db.app = app
register_blueprints(app)
initialize_jinja2(app)
load_models()
return app
|
2a4ee3b8f4f67db1966a678b6059b53aa21ac73f
| 3,646,519
|
def compute_prefix_function(pattern):
"""
Computes the prefix array for KMP.
:param pattern:
:type pattern: str
:return:
"""
m = len(pattern)
prefixes = [0]*(m+1)
i = 0
for q in range(2, m + 1):
while i > 0 and pattern[i] != pattern[q - 1]:
i = prefixes[i]
if pattern[i] == pattern[q - 1]:
i += 1
prefixes[q] = i
return prefixes[1:]
|
7933cc33eba53247e858ae40b9691d101c7030e6
| 3,646,520
|
def binary_indicator(states,
actions,
rewards,
next_states,
contexts,
termination_epsilon=1e-4,
offset=0,
epsilon=1e-10,
state_indices=None,
summarize=False):
"""Returns 0/1 by checking if next_states and contexts overlap.
Args:
states: A [batch_size, num_state_dims] Tensor representing a batch
of states.
actions: A [batch_size, num_action_dims] Tensor representing a batch
of actions.
rewards: A [batch_size] Tensor representing a batch of rewards.
next_states: A [batch_size, num_state_dims] Tensor representing a batch
of next states.
contexts: A list of [batch_size, num_context_dims] Tensor representing
a batch of contexts.
termination_epsilon: terminate if dist is less than this quantity.
offset: Offset the rewards.
epsilon: small offset to ensure non-negative/zero distance.
Returns:
A new tf.float32 [batch_size] rewards Tensor, and
tf.float32 [batch_size] discounts tensor.
"""
del states, actions # unused args
next_states = index_states(next_states, state_indices)
dist = tf.reduce_sum(tf.squared_difference(next_states, contexts[0]), -1)
dist = tf.sqrt(dist + epsilon)
discounts = dist > termination_epsilon
rewards = tf.logical_not(discounts)
rewards = tf.to_float(rewards) + offset
return tf.to_float(rewards), tf.ones_like(tf.to_float(discounts))
|
68531010c695e4bb8d49d05f5b0ba8799e1e3cf5
| 3,646,521
|
import math
def sigmoid(num):
"""
Find the sigmoid of a number.
:type number: number
:param number: The number to find the sigmoid of
:return: The result of the sigmoid
:rtype: number
>>> sigmoid(1)
0.7310585786300049
"""
# Return the calculated value
return 1 / (1 + math.exp(-num))
|
73730a39627317011d5625ab85c146b6bd7793d8
| 3,646,522
|
def list_lattices(device_name: str = None, num_qubits: int = None,
connection: ForestConnection = None):
"""
Query the Forest 2.0 server for its knowledge of lattices. Optionally filters by underlying
device name and lattice qubit count.
:return: A dictionary keyed on lattice names and valued in dictionaries of the form
{
"device_name": device_name,
"qubits": num_qubits
}
"""
if connection and connection.session:
session = connection.session
else:
session = get_session()
if connection:
url = connection.sync_endpoint + "/lattices"
else:
config = PyquilConfig()
try:
url = config.forest_url + "/lattices"
except TypeError:
raise ValueError("""Encountered an error when querying the Forest 2.0 endpoint.
Most likely, you're missing an address for the Forest 2.0 server endpoint. This can
be set through the environment variable FOREST_URL or by changing the following lines
in the QCS config file:
[Rigetti Forest]
url = https://rigetti.com/valid/forest/url""")
try:
response = get_json(session, url,
params={"device_name": device_name,
"num_qubits": num_qubits})
return response["lattices"]
except Exception as e:
raise ValueError("""
list_lattices encountered an error when querying the Forest 2.0 endpoint.
Some common causes for this error include:
* You don't have valid user authentication information. Very likely this is because you
haven't yet been invited to try QCS. We plan on making our device information publicly
accessible soon, but in the meanwhile, you'll have to use default QVM configurations and
to use `list_quantum_computers` with `qpus = False`.
* You do have user authentication information, but it is missing or modified. You can find
this either in the environment variables FOREST_API_KEY and FOREST_USER_ID or in the
config file (stored by default at ~/.qcs_config, but with location settable through the
environment variable QCS_CONFIG), which contains the subsection
[Rigetti Forest]
user_id = your_user_id
key = your_api_key
* You're missing an address for the Forest 2.0 server endpoint, or the address is invalid.
This too can be set through the environment variable FOREST_URL or by changing the
following lines in the QCS config file:
[Rigetti Forest]
url = https://rigetti.com/valid/forest/url
For the record, here's the original exception: {}
""".format(repr(e)))
|
a6fb4754f3f76135ed2083441782924f03160994
| 3,646,523
|
def inflate_tilegrid(
bmp_path=None,
target_size=(3, 3),
tile_size=None,
transparent_index=None,
bmp_obj=None,
bmp_palette=None,
):
"""
inflate a TileGrid of ``target_size`` in tiles from a 3x3 spritesheet by duplicating
the center rows and columns.
:param Optional[str] bmp_path: filepath to the 3x3 spritesheet bitmap file
:param Optional[tuple] target_size: desired size in tiles (target_width, target_height)
:param Optional[tuple] tile_size: size of the tiles in the 3x3 spritesheet. If
None is used it will equally divide the width and height of the Bitmap by 3.
:param Optional[Union[tuple, int]] transparent_index: a single index within the palette to
make transparent, or a tuple of multiple indexes to make transparent
:param Optional[OnDiskBitmap] bmp_obj: Already loaded 3x3 spritesheet in an OnDiskBitmap
:param Optional[Palette] bmp_palette: Already loaded spritesheet Palette
"""
# pylint: disable=too-many-arguments, too-many-locals, too-many-branches
if bmp_path is None and (bmp_obj is None and bmp_palette is None):
raise AttributeError("Must pass either bmp_path or bmp_obj and bmp_palette")
if bmp_path is not None:
image, palette = adafruit_imageload.load(bmp_path)
else:
image = bmp_obj
palette = bmp_palette
if transparent_index is not None:
if isinstance(transparent_index, tuple):
for index in transparent_index:
palette.make_transparent(index)
elif isinstance(transparent_index, int):
palette.make_transparent(transparent_index)
if tile_size is None:
tile_width = image.width // 3
tile_height = image.height // 3
else:
tile_width = tile_size[0]
tile_height = tile_size[1]
target_width = target_size[0]
target_height = target_size[1]
tile_grid = displayio.TileGrid(
image,
pixel_shader=palette,
height=target_height,
width=target_width,
tile_width=tile_width,
tile_height=tile_height,
)
# corners
tile_grid[0, 0] = 0 # upper left
tile_grid[tile_grid.width - 1, 0] = 2 # upper right
tile_grid[0, tile_grid.height - 1] = 6 # lower left
tile_grid[tile_grid.width - 1, tile_grid.height - 1] = 8 # lower right
for x in range(target_size[0] - 2):
tile_grid[x + 1, 0] = 1
tile_grid[x + 1, tile_grid.height - 1] = 7
for y in range(target_size[1] - 2):
tile_grid[0, y + 1] = 3
tile_grid[tile_grid.width - 1, y + 1] = 5
for y in range(target_size[1] - 2):
for x in range(target_size[0] - 2):
tile_grid[x + 1, y + 1] = 4
return tile_grid
|
b3c67c9aaa38cc77208f6fc7cafe91814a0fdbb4
| 3,646,524
|
def get_name_and_version(requirements_line: str) -> tuple[str, ...]:
"""Get the name a version of a package from a line in the requirement file."""
full_name, version = requirements_line.split(" ", 1)[0].split("==")
name_without_extras = full_name.split("[", 1)[0]
return name_without_extras, version
|
424b3c3138ba223610fdfa1cfa6d415b8e31aff3
| 3,646,525
|
def _compute_eval_stats(params, batch,
model,
pad_id):
"""Computes pre-training task predictions and stats.
Args:
params: Model state (parameters).
batch: Current batch of examples.
model: The model itself. Flax separates model state and architecture.
pad_id: Token ID representing padding. A mask is used to distinguish padding
from actual inputs.
Returns:
Model predictions and metrics.
"""
inputs = {
"input_ids": batch["input_ids"],
"input_mask": (batch["input_ids"] != pad_id).astype(np.int32),
"type_ids": batch["type_ids"],
"masked_lm_positions": batch["masked_lm_positions"],
"masked_lm_labels": batch["masked_lm_ids"],
"masked_lm_weights": batch["masked_lm_weights"],
"next_sentence_labels": batch["next_sentence_labels"],
"deterministic": True
}
return model.apply({"params": params}, **inputs)
|
cc7e9b48d6255c8f82ae2bff978c54631d246bda
| 3,646,526
|
import locale
import itertools
def validateTextFile(fileWithPath):
"""
Test if a file is a plain text file and can be read
:param fileWithPath(str): File Path
:return:
"""
try:
file = open(fileWithPath, "r", encoding=locale.getpreferredencoding(), errors="strict")
# Read only a couple of lines in the file
for line in itertools.islice(file, 10):
line = line
file.readlines()
# Close the file handle
file.close()
# Return the systems preferred encoding
return locale.getpreferredencoding()
except:
validencodings = ["utf-8", "ascii", "utf-16", "utf-32", "iso-8859-1", "latin-1"]
for currentEncoding in validencodings:
try:
file = open(fileWithPath, "r", encoding=currentEncoding, errors="strict")
# Read only a couple of lines in the file
for line in itertools.islice(file, 10):
line = line
# Close the file handle
file.close()
# Return the succeded encoding
return currentEncoding
except:
# Error occured while reading the file, skip to next iteration
continue
# Error, no encoding was correct
return None
|
22167a4501ca584061f1bddcc7738f00d4390085
| 3,646,527
|
from bs4 import BeautifulSoup
def get_title(filename="test.html"):
"""Read the specified file and load it into BeautifulSoup. Return the title tag
"""
with open(filename, "r") as my_file:
file_string = my_file.read()
file_soup = BeautifulSoup(file_string, 'html.parser')
#find all of the a tags with href attribute
title = file_soup.select("title")
return title
|
31c35588bb10132509a0d35b49a9b7eeed902018
| 3,646,528
|
import re
def is_valid_dump_key(dump_key):
"""
True if the `dump_key` is in the valid format of
"database_name/timestamp.dump"
"""
regexmatch = re.match(
r'^[\w-]+/\d{4}_\d{2}_\d{2}_\d{2}_\d{2}_\d{2}_\d+\.\w+\.dump$',
dump_key,
)
return regexmatch
|
66fd7d465f641a96bd8b22e95918a6dcbefef658
| 3,646,529
|
import math
def GetProfileAtAngle( imdata, xc,yc, angle, radius, width=1 ):
"""
Returns a 1D profile cut through an image at specified angle, extending to
specified radius.
Note: this is designed to imitate pvect, so angles are measured CCW from +x axis!
This function uses IRAF coordinates (1-based, x = column number)
Parameters
----------
imdata : 2D ndarray of float
image data array
xc : int or float
x-coordinate of center to extract profile from (IRAF ordering, 1-based)
yc : int or float
y-coordinate of center to extract profile from (IRAF ordering, 1-based)
angle : float
angle measured CCW from +x axis, in degrees
radius : int
length of profile, in pixels
width : int, optional
width of profile (perpendicular to profile) in pixels
Returns
-------
rr,ii : tuple of 1D ndarray of float
rr = array of radius values (= 0 at (xc,yc))
ii = data pixel values along profile [= Nan if all pixels for that bin
were masked]
"""
angle_rad = math.radians(angle)
x_end = xc + math.cos(angle_rad) * radius
y_end = yc + math.sin(angle_rad) * radius
x_start = xc - math.cos(angle_rad) * radius
y_start = yc - math.sin(angle_rad) * radius
rr,ii = ExtractProfile(imdata, x_start,y_start, x_end,y_end, width=width)
rr = rr - radius
return rr, ii
|
5c20ae064989251a807690e8f90f7156a6dbe642
| 3,646,530
|
import os
def create_out_dir_name(params):
"""
Create output directory name for the experiment based on the current date
and time.
Args:
params (dict): The parameters of the experiment.
Returns:
str: The path to the output directory.
"""
current_timestamp = timestamp()
out_dir = os.path.join('out', current_timestamp)
return out_dir
|
29700a90c780e9ed7e9f23b10f0fc66d1df03864
| 3,646,531
|
def extract_axon_and_myelin_masks_from_image_data(image_data):
"""
Returns the binary axon and myelin masks from the image data.
:param image_data: the image data that contains the 8-bit greyscale data, with over 200 (usually 255 if following
the ADS convention) being axons, 100 to 200 (usually 127 if following the ADS convention) being myelin
and 0 being background
:return axon_mask: the binairy axon mask
:return myelin_mask: the binary myelin mask
"""
image_data_array = np.array(image_data)
axon_mask = image_data_array > 200
myelin_mask = (image_data_array > 100) & (image_data_array < 200)
axon_mask = axon_mask.astype(np.uint8)
myelin_mask = myelin_mask.astype(np.uint8)
return axon_mask, myelin_mask
|
087f80d4c55b7bbba7e60720be26ff3e3ca1648a
| 3,646,532
|
def create_mne_array(recording, ch_names=None):
"""
Populate a full mne raw array object with information.
Parameters
----------
lfp_odict : bvmpc.lfp_odict.LfpODict
The lfp_odict object to convert to numpy data.
ch_names : List of str, Default None
Optional. What to name the mne eeg channels, default: region+chan_idx.
Returns
-------
mne.io.RawArray
"""
# TODO work with quantities here to avoid magic division to uV
raw_data = recording.get_np_signals() / 1000
if ch_names is None:
try:
ch_names = [
"{}-{}".format(x, y)
for x, y in zip(
recording.get_signals().get_property("region"),
recording.get_signal_channels(as_idx=True),
)
]
except BaseException:
ch_names = [str(i) for i in range(len(recording.get_signals()))]
# Convert LFP data into mne format
example_lfp = recording.get_signals()[0]
sfreq = example_lfp.get_sampling_rate()
ch_types = ["eeg"] * len(recording.get_signals())
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
raw = mne.io.RawArray(raw_data, info)
return raw
|
f39a3e38174e0b915c55c4cf3ad4692fa38bca64
| 3,646,533
|
def expand_advanced(var, vars_, nounset, indirect, environ, var_symbol):
"""Expand substitution."""
if len(vars_) == 0:
raise MissingClosingBrace(var)
if vars_[0] == "-":
return expand_default(
var,
vars_[1:],
set_=False,
nounset=nounset,
indirect=indirect,
environ=environ,
var_symbol=var_symbol,
)
if vars_[0] == "=":
return expand_default(
var,
vars_[1:],
set_=True,
nounset=nounset,
indirect=indirect,
environ=environ,
var_symbol=var_symbol,
)
if vars_[0] == "+":
return expand_substitute(
var, vars_[1:], nounset=nounset, environ=environ, var_symbol=var_symbol
)
if vars_[0] == "?":
return expand_strict(
var, vars_[1:], nounset=nounset, environ=environ, var_symbol=var_symbol
)
return expand_offset(
var, vars_, nounset=nounset, environ=environ, var_symbol=var_symbol
)
|
1be5d66c18775bca8669d97ccf8ccd439f154ff2
| 3,646,534
|
def overlap(n2, lamda_g, gama):
""" Calculates the 1/Aeff (M) from the gamma given.
The gamma is supposed to be measured at lamda_g
(in many cases we assume that is the same as where
the dispersion is measured at).
"""
M = gama / (n2*(2*pi/lamda_g))
return M
|
00e1d59a6a8e5b908acfa3097cfb9818edaf608f
| 3,646,535
|
def fill_none(pre_made_replays_list):
"""Fill none and reformat some fields in a pre-made replays list.
:param pre_made_replays_list: pre-made replays list from ballchasing.com.
:return: formatted list.
"""
for replay in pre_made_replays_list:
if replay["region"] is None:
replay["region"] = "North America"
replay["phase"] = "Qualifier"
replay["stage"] = "Tiebreaker"
replay["round"] = "Finals"
if replay['region'] == 'Main Event':
replay['region'] = 'World'
elif replay['region'] == 'Europe' and replay['phase'] == 'Tiebreaker':
replay["phase"] = "Qualifier"
replay["stage"] = "Tiebreaker"
if replay["match"] == "EG vs 00":
replay["round"] = "Lower Finals"
else:
replay["round"] = "Upper Finals"
return pre_made_replays_list
|
ee900227a8afcba71e6a00ef475892da4fdc3e3b
| 3,646,536
|
def parse_args_from_str(arg_str, arg_defs): # , context=None):
"""
Args:
args_str (str): argument string, optionally comma-separated
arg_defs (tuple): list of argument definitions
context (dict, optional):
When passed, the arguments are parsed for ``$(var_name)`` macros,
to lookup values from that dict.
Returns:
(dict) keyword args
Raises:
TypeError: if `argument` is of an unexpected type
ValueError: if `argument` does not fulfill the optional condition
AssertionError:
if `parse_args_from_str` was called with a wrong syntax, i.e.
`arg_defs` is not well-formed.
Examples::
arg_defs = (
("name", str),
("amount", float),
("options", dict, {}),
)
def order(raw_arg_string):
kwargs = parse_args_from_str(arg_defs)
assert isisnstance(kwargs["name"], str)
assert type(kwargs["amount"]) is float
assert isisnstance(kwargs["options"], dict)
"""
check_arg(arg_str, str)
check_arg(arg_defs, (list, tuple))
res = {}
# Special case: '$name()' should not be interpreted as having one "" arg
# if arg_defs defines a default for the first arg
if arg_str.strip() == "" and len(arg_defs[0]) == 3:
arg_str = str(arg_defs[0][2])
arg_list = [a.strip() for a in arg_str.split(",")]
optional_mode = False
for arg_def in arg_defs:
check_arg(arg_def, (list, tuple))
if len(arg_def) == 2:
arg_name, arg_type = arg_def
arg_default = NO_DEFAULT
if optional_mode:
raise AssertionError(
"Mandatory arg definition must not follow optional args: `{}`".format(
arg_def
)
)
elif len(arg_def) == 3:
arg_name, arg_type, arg_default = arg_def
optional_mode = True
else:
raise AssertionError("Expected 2- or 3-tuple: {}".format(arg_def))
if arg_type not in (float, int, str):
raise AssertionError(
"Unsupported argument definition type: {}".format(arg_def)
)
try:
# Get next arg
arg_val = arg_list.pop(0)
# Allow quotes
is_quoted = (arg_val.startswith('"') and arg_val.endswith('"')) or (
arg_val.startswith("'") and arg_val.endswith("'")
)
if is_quoted:
# Strip quotes and return as string (don't cast to other types)
arg_val = arg_val[1:-1]
elif "$(" in arg_val:
# The arg seems to be a macro: don't try to cast.
pass
else:
# Raises ValueError:
arg_val = arg_type(arg_val)
except IndexError:
if arg_default is NO_DEFAULT:
raise ValueError(
"Missing mandatory arg `{}` in '{}'.".format(arg_name, arg_str)
)
arg_val = arg_default
res[arg_name] = arg_val
if arg_list:
raise ValueError("Extra args `{}`.".format(", ".join(arg_list)))
return res
|
1c21cf170c360c7b429b1303bd19e1a23ea5cd3c
| 3,646,537
|
import torch
def model_evaluation(
data_loader,
ml_model_name,
ml_model,
smiles_dictionary,
max_length_smiles,
device_to_use,
):
"""
Evaluation per batch of a pytorch machine learning model.
Parameters
----------
data_loader : torch.utils.data
The training data as seen by Pytorch for mini-batches.
ml_model_name : str
Name of the machine learning model. It can be either "CONV1D", "CONV2D", or "RNN".
ml_model : nn.Module
Instance of the pytorch machine learning model.
smiles_dictionary : dict
The dictionary of SMILES characters.
max_length_smiles : int
The length of the longest SMILES.
device_to_use : torch.device
The device to use for model instance, "cpu" or "cuda".
Returns
-------
tuple of dict:
Dictionary of the predicted, true output values, respectively, in the data loader, with SMILES as keys.
"""
ml_model.eval()
with torch.no_grad():
all_output_pred = {}
all_output_true = {}
for _, data in enumerate(data_loader):
# SMILES and target
smiles, target = data
input_true, output_true = data_to_pytorch_format(
smiles,
target,
smiles_dictionary,
max_length_smiles,
ml_model_name,
device_to_use,
)
# Prediction
output_pred = ml_model(input_true)
# Convert to numpy arrays
output_pred = output_pred.cpu().detach().numpy()
output_true = output_true.cpu().detach().numpy()
for smile in smiles:
all_output_pred[smile] = output_pred
all_output_true[smile] = output_true
return (all_output_pred, all_output_true)
|
8c381eee394e989f8920cc52ad4b94ca4b502741
| 3,646,538
|
def reverse_dict2(d):
"""Reverses direction of dependence dict
>>> d = {'a': (1, 2), 'b': (2, 3), 'c':()}
>>> reverse_dict(d) # doctest: +SKIP
{1: ('a',), 2: ('a', 'b'), 3: ('b',)}
:note: dict order are not deterministic. As we iterate on the
input dict, it make the output of this function depend on the
dict order. So this function output order should be considered
as undeterministic.
"""
result = {}
for key in d:
for val in d[key]:
result[val] = result.get(val, tuple()) + (key, )
return result
|
2419538a13699015f8fefa156e89cf9b1960e358
| 3,646,539
|
import random
def Flip(p, y='Y', n='N'):
"""Returns y with probability p; otherwise n."""
return y if random.random() <= p else n
|
072e170e3f37508a04f8bdbed22470b178f05ab9
| 3,646,540
|
import pylearn2.datasets.avicenna
def resolve_avicenna(d):
"""
.. todo::
WRITEME
"""
return pylearn2.config.checked_call(pylearn2.datasets.avicenna.Avicenna,d)
|
b15ef76cee9d71da26b84025d191aab43c95b297
| 3,646,541
|
def sub_to_db(sub,
add_area=True,
area_srid=3005,
wkt=True,
wkb=False,
as_multi=True,
to_disk=False,
procs=1,
engine=None):
"""
Convert the object to a SQLite database. Returns the |db| module exposing
the database ORM and additional SQLAlchemy objects. Note that |procs|
greater than one results in the database being written to disk (if the
desired database is SQLite).
sub (SubOcgDataset) -- The object to convert to the database.
add_area=True -- Insert the geometric area.
area_srid=3005 -- SRID to use for geometric transformation.
wkt=True -- Insert the geomtry's WKT representation.
wkb=False -- Insert the geometry's WKB representation.
as_multi=True -- Convert geometries to shapely.MultiPolygon.
to_disk=False -- Write the database to disk (applicable for SQLite).
procs=1 -- Number of processes to use when loading data.
engine=None (sqlalchemy.Engine) -- An optional engine to pass overloading
the creation of other backends. Useful to use PostGRES instead of
SQLite for example.
"""
if engine is None:
use_lock = True
else:
use_lock = False
## initialize the db
db = init_db(engine=engine,to_disk=to_disk,procs=procs)
print(' loading geometry...')
## spatial reference for area calculation
sr = get_sr(4326)
sr2 = get_sr(area_srid)
# data = dict([[key,list()] for key in ['gid','wkt','wkb','area_m2']])
# for dd in self.dim_data:
# data['gid'].append(int(self.gid[dd]))
# geom = self.geometry[dd]
# if isinstance(geom,Polygon):
# geom = MultiPolygon([geom])
# if wkt:
# wkt = str(geom.wkt)
# else:
# wkt = None
# data['wkt'].append(wkt)
# if wkb:
# wkb = str(geom.wkb)
# else:
# wkb = None
# data['wkb'].append(wkb)
# data['area_m2'].append(get_area(geom,sr,sr2))
# self.load_parallel(db.Geometry,data,procs)
def f(idx,geometry=sub.geometry,gid=sub.gid,wkt=wkt,wkb=wkb,sr=sr,sr2=sr2,get_area=get_area):
geom = geometry[idx]
if isinstance(geom,Polygon):
geom = MultiPolygon([geom])
if wkt:
wkt = str(geom.wkt)
else:
wkt = None
if wkb:
wkb = str(geom.wkb)
else:
wkb = None
return(dict(gid=int(gid[idx]),
wkt=wkt,
wkb=wkb,
area_m2=get_area(geom,sr,sr2)))
fkwds = dict(geometry=sub.geometry,gid=sub.gid,wkt=wkt,wkb=wkb,sr=sr,sr2=sr2,get_area=get_area)
gen = pl.ParallelGenerator(db.Geometry,
sub.dim_data,
f,
fkwds=fkwds,
procs=procs,
use_lock=use_lock)
gen.load()
print(' loading time...')
## load the time data
data = dict([[key,list()] for key in ['tid','time','day','month','year']])
for dt in sub.dim_time:
data['tid'].append(int(sub.tid[dt]))
data['time'].append(sub.timevec[dt])
data['day'].append(sub.timevec[dt].day)
data['month'].append(sub.timevec[dt].month)
data['year'].append(sub.timevec[dt].year)
load_parallel(db.Time,data,procs,use_lock=use_lock)
print(' loading value...')
## set up parallel loading data
data = dict([key,list()] for key in ['gid','level','tid','value'])
for dt in sub.dim_time:
for dl in sub.dim_level:
for dd in sub.dim_data:
data['gid'].append(int(sub.gid[dd]))
data['level'].append(int(sub.levelvec[dl]))
data['tid'].append(int(sub.tid[dt]))
data['value'].append(float(sub.value[dt,dl,dd]))
load_parallel(db.Value,data,procs,use_lock=use_lock)
return(db)
|
6f3d3763a129a4235c0e5c0e884f7ab62bdfc391
| 3,646,542
|
def T_autoignition_methods(CASRN):
"""Return all methods available to obtain T_autoignition for the desired
chemical.
Parameters
----------
CASRN : str
CASRN, [-]
Returns
-------
methods : list[str]
Methods which can be used to obtain T_autoignition with the given inputs.
See Also
--------
T_autoignition
"""
if not _safety_data_loaded: _load_safety_data()
return list_available_methods_from_df_dict(Tautoignition_sources, CASRN, 'T_autoignition')
|
ab194547a1cc7b5eeb2032b1decad366bc4b43c2
| 3,646,543
|
import configparser
import os
def parse_config2(filename=None):
"""
https://docs.python.org/3.5/library/configparser.html
:param filename: filename to parse config
:return: config_parse result
"""
_config = configparser.ConfigParser(allow_no_value=True)
if filename:
# ConfigParser does not create a file if it doesn't exist, so I will create an empty one.
if not os.path.isfile(filename):
with open(filename, 'w', encoding='utf-8') as f:
print('', file=f)
_config.read_file(open(filename, encoding='utf-8'))
return _config
|
cf260f09e4c293915ab226b915aebed6cb98113f
| 3,646,544
|
def masa(jd, place):
"""Returns lunar month and if it is adhika or not.
1 = Chaitra, 2 = Vaisakha, ..., 12 = Phalguna"""
ti = tithi(jd, place)[0]
critical = sunrise(jd, place)[0] # - tz/24 ?
last_new_moon = new_moon(critical, ti, -1)
next_new_moon = new_moon(critical, ti, +1)
this_solar_month = raasi(last_new_moon)
next_solar_month = raasi(next_new_moon)
is_leap_month = (this_solar_month == next_solar_month)
maasa = this_solar_month + 1
if maasa > 12: maasa = (maasa % 12)
return [int(maasa), is_leap_month]
|
b8b7572f4b5dc597d844683e30c92be618e32c43
| 3,646,545
|
from scipy.special import erf
def sigma(s):
"""The probablity a normal variate will be `<s` sigma from the mean.
Parameters
----------
s : float
The number of sigma from the mean.
Returns
-------
p : float
The probability that a value within +/-s would occur.
"""
return 0.5 * (erf(s / np.sqrt(2.0)) - erf(-s / np.sqrt(2.0)))
|
88727617b1cca678613818be8fdb90e114b25438
| 3,646,546
|
def addneq_parse_residualline(line: str) -> dict:
"""
Parse en linje med dagsløsningsresidualer fra en ADDNEQ-fil.
Udtræk stationsnavn, samt retning (N/E/U), spredning og derefter et vilkårligt
antal døgnresidualer.
En serie linjer kan se således ud:
GESR N 0.07 0.02 -0.06
GESR E 0.10 -0.00 -0.10
GESR U 0.23 -0.10 0.20
"""
params = line.split()
return {
"STATION NAME": params[0],
"DIRECTION": params[1],
"STDDEV": float(params[2]),
"RES": [float(x) for x in params[3:]],
}
|
6d1556cbd01f3fe4cd66dcad231e41fa6b1b9470
| 3,646,547
|
import os
def extract_header(file_path):
"""
Loads the header from a PSG-type file at path 'file_path'.
Returns:
dictionary of header information
"""
fname = os.path.split(os.path.abspath(file_path))[-1]
_, ext = os.path.splitext(fname)
load_func = _EXT_TO_LOADER[ext[1:]]
header = load_func(file_path)
# Add file location data
file_path, file_name = os.path.split(file_path)
header['data_dir'] = file_path
header["file_name"] = file_name
return header
|
8eaccdd9b8252ea0a9651278895be517d0acc023
| 3,646,548
|
def get_xsd_schema(url):
"""Request the XSD schema from DOV webservices and return it.
Parameters
----------
url : str
URL of the XSD schema to download.
Returns
-------
xml : bytes
The raw XML data of this XSD schema as bytes.
"""
response = HookRunner.execute_inject_meta_response(url)
if response is None:
response = get_remote_url(url)
HookRunner.execute_meta_received(url, response)
return response
|
12f5088fea1b9268d75ee90d60b729c8a9577dd0
| 3,646,549
|
def get_char_pmi(data):
"""
获取 pmi
:param data:
:return:
"""
print('get_char_pmi')
model = kenlm.LanguageModel('../software/kenlm/test.bin')
res = []
for line in data:
words = line.strip().split()
length = len(words)
words.append('\n')
i = 0
pmi_out = ""
while i < length:
p_union = get_proba(model, words[i] + " " + words[i + 1])
p1 = get_proba(model, words[i])
p2 = get_proba(model, words[i + 1])
p = pmi(p_union, p1, p2)
# 拆到 char level
word = words[i]
if len(word) > 0:
# 拆到 char level
j = 0
char = word[j]
pmi_out += char + "@@"
pmi_out += "B#" + str(p) + " "
j += 1
while j < len(word):
pmi_out += word[j] + '@@'
pmi_out += 'I#' + str(p) + " "
j += 1
i += 1
# last_char = words[i]
# p_union = get_proba(model, last_char + " \n")
# p1 = get_proba(model, last_char)
# p2 = get_proba(model, '\n')
# p = pmi(p_union, p1, p2)
# pmi_out += last_char + "@@" + 'B#' + str(p)
res.append(pmi_out.strip())
return res
|
2cb28e7671561a52efbbf98431e3c938700f691a
| 3,646,550
|
def fahrenheit_to_celsius(fahrenheit):
"""Convert a Fahrenheit temperature to Celsius."""
return (fahrenheit - 32.0) / 1.8
|
4aee3dd0b54450fabf7a3a01d340b45a89caeaa3
| 3,646,551
|
import random
import itertools
def sample_blocks(num_layers, num_approx):
"""Generate approx block permutations by sampling w/o replacement. Leave the
first and last blocks as ReLU"""
perms = []
for _ in range(1000):
perms.append(sorted(random.sample(list(range(0,num_layers)), num_approx)))
# Remove duplicates
perms.sort()
return [p for p,_ in itertools.groupby(perms) if len(p) == num_approx]
|
b4b75e77b3749bc7766c709d86bf1f694898fc0d
| 3,646,552
|
def adjacent_values(vals, q1, q3):
"""Helper function for violinplot visualisation (courtesy of
https://matplotlib.org/gallery/statistics/customized_violin.html#sphx-glr-gallery-statistics-customized-violin-py)
"""
upper_adjacent_value = q3 + (q3 - q1) * 1.5
upper_adjacent_value = np.clip(upper_adjacent_value, q3, vals[-1])
lower_adjacent_value = q1 - (q3 - q1) * 1.5
lower_adjacent_value = np.clip(lower_adjacent_value, vals[0], q1)
return lower_adjacent_value, upper_adjacent_value
|
a596ed82a1d66213dbdd3f19b29d58b36979c60d
| 3,646,553
|
def l2_first_moment(freq, n_trials, weights):
"""Return the first raw moment of the squared l2-norm of a vector (f-p), where `f` is an MLE
estimate
of the `p` parameter of the multinomial distribution with `n_trials`."""
return (np.einsum("aiai,ai->", weights, freq) - np.einsum("aiaj,ai,aj->", weights, freq, freq)) / n_trials
|
bf597aaa57759dc6d4f0ee1f5ed4f99f49ea271b
| 3,646,554
|
def sigmoid(x: float, a: float = 1, b: float = 1, shift: float = 0) -> float:
"""
Sigmoid function represented by b * \frac{1}{1 + e^{-a * (x - shift)}}}
Args:
x (float): Input x
a (float, optional): Rate of inflection. Defaults to 1.
b (float, optional): Difference of lowest to highest value. Defaults to 1.
shift (float, optional): Horizontal shift. Defaults to 0.
Returns:
float: sigmoid function at x
"""
result = b * (1 / (1 + np.exp(-a * (x - shift))))
return result
|
761497db712619008c1261d2388cea997ae3fff8
| 3,646,555
|
def db_credentials():
"""Load creds and returns dict of postgres keyword arguments."""
creds = load_json('creds.json')
return {
'host': creds['db_host'],
'user': creds['db_username'],
'password': creds['db_password'],
'database': creds['db_database']
}
|
4248452ffb5a9c05b14449972c1db7a18d906b73
| 3,646,556
|
import logging
def generate_corpus_output( cfg, docL, tfidfL ):
""" Generate a list of OutputRecords where the number of key words
is limited to the cfg.corpusKeywordCount highest scoring terms.
(i.e. cfg.usePerDocWordCount == False)
"""
outL = []
# for the cfg.corpusKeyWordCount highest scoring keywords
for i,(wordL,tfidf,termNL) in enumerate(tfidfL[0:min(cfg.corpusKeyWordCount,len(tfidfL))]):
out_recd = OutputRecord(wordL,tfidf,termNL)
logging.info("%i %f %s",i,tfidf,wordL)
# for each document
for doc in docL:
doc.find_sentences_in_doc(out_recd)
outL.append(out_recd)
return outL
|
2296d319fd00022df73da9e7d8484adfd5ab16ad
| 3,646,557
|
import os
def expand(directory: str) -> str:
"""Apply expanduser and expandvars to directory to expand '~' and env vars."""
temp1 = os.path.expanduser(directory)
return os.path.expandvars(temp1)
|
ffad07715d5425211304e340c084c8f134bbcb22
| 3,646,558
|
import sys
def gather_ensemble_info(nmme_model):
"""Gathers ensemble information based on NMME model."""
# Number of ensembles in the forecast (ens_num)
# Ensemble start index (ens_start)
# Ensemble end index (ens_end)
if nmme_model == "CFSv2":
ens_num=24
ens_start=1
ens_end=24
elif nmme_model == "GEOSv2":
ens_num=10
ens_start=25
ens_end=34
elif nmme_model == "CCM4":
ens_num=10
ens_start=35
ens_end=44
elif nmme_model == "GNEMO":
ens_num=10
ens_start=45
ens_end=54
elif nmme_model == "CCSM4":
ens_num=10
ens_start=55
ens_end=64
elif nmme_model == "GFDL":
ens_num=30
ens_start=65
ens_end=94
else:
print(f"[ERR] Invalid argument for nmme_model! Received {nmme_model}")
sys.exit(1)
return ens_num, ens_start, ens_end
|
56516751dc87415b6a08541eadb02b67a5bc6629
| 3,646,559
|
import tqdm
def harmonic_fitter(progressions, J_thres=0.01):
"""
Function that will sequentially fit every progression
with a simple harmonic model defined by B and D. The
"B" value here actually corresponds to B+C for a near-prolate,
or 2B for a prolate top.
There are a number of filters applied in order to minimize
calculations that won't be meaningful - these parameters
may have to be tuned for different test cases.
Because the model is not actually quantized, J is
represented as a float. To our advantage, this will
actually separate real (molecular) progressions from
fake news; at least half of the J values must be
close to being an integer for us to consider fitting.
parameters:
---------------
progressions - iterable containing arrays of progressions
J_thres - optional argument corresponding to how close a
value must be to an integer
returns:
---------------
pandas dataframe containing the fit results; columns
are B, D, fit RMS, and pairs of columns corresponding
to the fitted frequency and approximate J value.
"""
BJ_fit_model = lmfit.models.Model(calc_harmonic_transition)
params = BJ_fit_model.make_params()
data = list()
fit_objs = list()
failed = list()
for index, progression in tqdm(enumerate(progressions)):
# Determine the approximate value of B based on
# the differences between observed transitions
approx_B = np.average(np.diff(progression))
# Calculate the values of J that are assigned based on B
J = (progression / approx_B) / 2.0
# We want at least half of the lines to be close to being integer
if len(progression) >= 2:
if np.sum(quant_check(J, J_thres)) >= len(progression) / 1.5:
# Let B vary a bit
params["B"].set(approx_B, min=approx_B * 0.9, max=approx_B * 1.1)
# Constrain D to be less than 5 MHz
params["D"].set(0.001, min=0.0, max=1.0)
fit = BJ_fit_model.fit(
data=progression, J=J, params=params, fit_kws={"maxfev": 100}
)
# Only include progressions that can be fit successfully
if fit.success is True:
# Calculate fit RMS
rms = np.sqrt(np.average(np.square(fit.residual)))
# Only add it to the list of the RMS is
# sufficiently low
return_dict = dict()
return_dict["RMS"] = rms
return_dict.update(fit.best_values)
# Make columns for frequency and J
for i, frequency in enumerate(progression):
return_dict[i] = frequency
return_dict["J{}".format(i)] = J[i]
data.append(return_dict)
fit_objs.append(fit)
else:
failed.append([index, fit.fit_report()])
else:
failed.append(index)
else:
return_dict = dict()
return_dict["RMS"] = 0.0
return_dict["B"] = approx_B
# reformat the frequencies and approximate J values
for i, frequency in enumerate(progression):
return_dict[i] = frequency
return_dict["J{}".format(i)] = J[i]
data.append(return_dict)
full_df = pd.DataFrame(data=data)
full_df.sort_values(["RMS", "B", "D"], ascending=False, inplace=True)
return full_df, fit_objs
|
55a2c4080938c947501ed830f4236ca8f87608e8
| 3,646,560
|
def print_KruskalWallisH(div_calc):
"""
Compute the Kruskal-Wallis H-test for independent samples. A typical rule is that
each group must have at least 5 measurements.
"""
calc = defaultdict(list)
try:
for k1, v1 in div_calc.iteritems():
for k2, v2 in v1.iteritems():
calc[k1].append(v2)
except:
return "Error setting up input arrays for Kruskal-Wallis H-Test. Skipping "\
"significance testing."
h, p = stats.kruskal(*calc.values())
print "\nKruskal-Wallis H-test statistic for {} groups: {}".format(str(len(div_calc)), h)
print "p-value: {}".format(p)
|
74579ad2f9ee4336ab33f099982a9419d723774e
| 3,646,561
|
import random
import string
def _random_exptname():
"""Generate randome expt name NNNNNNNN_NNNNNN, where N is any number 0..9"""
r = ''.join(random.choice(string.digits) for _ in range(8))
r = r + '_' + ''.join(random.choice(string.digits) for _ in range(6))
return r
|
d9c72ed4bf742adf50e1fdad4f6acb1cc0046167
| 3,646,562
|
def remove_store(store_name):
""" Deletes the named data store.
:param store_name:
:return:
"""
return get_data_engine().remove_store(store_name)
|
ea8ada276095c2ceb85b339b2a925fa53fd93a1e
| 3,646,563
|
from typing import Union
from pathlib import Path
import subprocess
def is_tracked_upstream(folder: Union[str, Path]) -> bool:
"""
Check if the current checked-out branch is tracked upstream.
"""
try:
command = "git rev-parse --symbolic-full-name --abbrev-ref @{u}"
subprocess.run(
command.split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
check=True,
cwd=folder,
)
return True
except subprocess.CalledProcessError as exc:
if "HEAD" in exc.stderr:
raise OSError("No branch checked out")
return False
|
8c58efe0d0619aaa6517d656ba88f1e29653197a
| 3,646,564
|
import random
def limit_checkins_per_user(checkins: list, num_checkins_per_user: int, random_seed=1):
"""
Limit for each user a maximum number of check-ins by randomly select check-ins.
Parameters
----------
checkins: list
list of check-ins
num_checkins_per_user: int
max number of check-ins per user, -1 for unlimited
random_seed: int
a random seed for random check-ins selection
Returns
-------
list
limited check-ins
"""
if num_checkins_per_user < 0:
return checkins
# convert check-in list to dict per user
checkins_per_user = defaultdict(list)
for c in checkins:
checkins_per_user[c.user_id].append(c)
# randomly select check-ins of users
limited_checkins = list()
for user_id, v in checkins_per_user.items():
if len(v) <= num_checkins_per_user:
# there are not enough check-ins, so get them all
limited_checkins.extend(v)
else:
# there are more check-ins than needed, so randomly choose some of them
random.seed(random_seed)
limited_checkins.extend(random.sample(v, k=num_checkins_per_user))
return limited_checkins
|
286760c3630162b78c314f9f8be0943350f47859
| 3,646,565
|
import warnings
import warnings
def getCharacterFilmography(characterID, charIF, charDF, movieIF, movieKF,
personIF, personKF, limit=None):
"""Build a filmography list for the specified characterID."""
try:
ifptr = open(charIF, 'rb')
except IOError, e:
warnings.warn('Unable to access characters information, '
'please run the characters4local.py script: %s' % e)
return None
ifptr.seek(4L*characterID)
piddata = ifptr.read(4)
ifptr.close()
if len(piddata) != 4:
return None
idx = convBin(piddata, 'fulloffset')
try:
dfptr = open(charDF, 'rb')
except IOError, e:
warnings.warn('Unable to access characters information, '
'please run the characters4local.py script: %s' % e)
return None
dfptr.seek(idx)
# Check characterID.
chID = dfptr.read(3)
if characterID != convBin(chID, 'characterID'):
dfptr.close()
return None
length = convBin(dfptr.read(2), 'longlength')
# Skip character name.
latin2utf(dfptr.read(length))
nrItems = convBin(dfptr.read(3), 'nrCharacterItems')
if limit is not None and nrItems/2 > limit:
nrItems = limit*2
filmography = []
for i in xrange(nrItems/2):
personID = convBin(dfptr.read(3), 'personID')
name = getLabel(personID, personIF, personKF)
movieID = convBin(dfptr.read(3), 'movieID')
title = getLabel(movieID, movieIF, movieKF)
# XXX: notes are not retrieved: they can be found scanning
# actors.list and acresses.list, but it will slow down everything.
m = Movie(title=title, movieID=movieID, currentRole=name,
roleID=personID, roleIsPerson=True, accessSystem='local')
filmography.append(m)
dfptr.close()
return filmography
|
ddf7f1da3e95441a2da9d3fe2f16065e0a13f634
| 3,646,566
|
def sqrt_fixed_full(x, config, is_training=True, causal=True):
"""Full attention matrix with sqrt decomposition."""
bsize = x.shape[0]
query, key, value = attention.get_qkv(x, x, x, hidden_size=config.model_size,
num_heads=config.num_heads,
bias=config.dense_use_bias)
head_dim = config.model_size // config.num_heads
assert config.max_seq_len % config.max_seg_len == 0
num_seg = config.max_seq_len // config.max_seg_len
cur_query = tf.reshape(query, [-1,
num_seg,
config.max_seg_len,
config.num_heads,
head_dim])
with tf.variable_scope('pooling_query'):
merged_query = pooling_summary(cur_query, axis=2,
local_summary=config.local_summary,
keepdims=True)
cur_key = tf.reshape(key, cur_query.shape)
cur_val = tf.reshape(value, cur_query.shape)
span_val = attention.dot_product_attention(merged_query,
cur_key,
cur_val,
is_training=is_training,
attn_axis=1,
dropatt=config.dropatt)
span_val = tf.squeeze(span_val, axis=2)
with tf.variable_scope('pooling_key'):
span_key = pooling_summary(cur_key, axis=2,
local_summary=config.local_summary,
keepdims=False)
local_logits = tf.einsum('bsqhd,bskhd->bsqhk', cur_query, cur_key)
if causal:
local_mask = get_causal_mask(cur_query, axis=2, is_strict=False)
local_mask = tf.expand_dims(local_mask, axis=-2)
local_logits += local_mask
prev_logits = tf.einsum('bqhd,bkhd->bqhk', query, span_key)
if causal:
prev_mask = get_causal_mask(cur_query, axis=1, is_strict=True)
prev_mask = tf.repeat(prev_mask, [config.max_seg_len] * num_seg, axis=0)
prev_logits += tf.expand_dims(prev_mask, axis=1)
joint_logits = tf.concat([tf.reshape(local_logits,
[bsize, config.max_seq_len,
config.num_heads, -1]),
prev_logits], axis=-1)
attn_weights = attention.float32_softmax(joint_logits, axis=-1)
local_att, prev_att = tf.split(attn_weights, [config.max_seg_len, num_seg],
axis=-1)
if is_training:
local_att = tf.nn.dropout(local_att, rate=config.dropatt)
local_att = tf.reshape(local_att, [bsize, num_seg,
config.max_seg_len,
config.num_heads,
config.max_seg_len])
local_merged = tf.einsum('bsqhk,bskhd->bsqhd', local_att, cur_val)
prev_merged = tf.einsum('bqhk,bkhd->bqhd', prev_att, span_val)
joint_merged = prev_merged + tf.reshape(local_merged, prev_merged.shape)
output = ops.trail_dense(joint_merged, config.model_size, begin_axis=-2)
return output
|
3ee88f2adf767c6fb6e0f1c006ff301c45ffc322
| 3,646,567
|
def mcf_from_row(row, gene_to_dcid_list):
"""Generate data mcf from each row of the dataframe"""
gene = row['Gene name']
tissue = get_class_name(row['Tissue'])
cell = get_class_name(row['Cell type'])
expression = EXPRESSION_MAP[row['Level']]
reliability = RELIABILITY_MAP[row['Reliability']]
if gene not in gene_to_dcid_list:
# skip case when there is no gene to dcid mapping
return None
dcid_list = gene_to_dcid_list[gene]
mcf_list = []
for protein_dcid in dcid_list:
mcf_list.append(
generate_mcf(protein_dcid, tissue, cell, expression, reliability))
return '\n\n'.join(mcf_list)
|
ee78c68bb89a100fa4e0b972d0907e14dcb6d289
| 3,646,568
|
def loads(json_str, target=None):
"""
Shortcut for instantiating a new :class:`JSONDecoder` and calling the :func:`from_json_str` function.
.. seealso::
For more information you can look at the doc of :func:`JSONDecoder.from_json_str`.
"""
return _decoder.from_json_str(json_str, target)
|
76eab90dd544d695f55967969d81ef9cccb1c2fd
| 3,646,569
|
def discover(discover_system: bool = True) -> Discovery:
"""
Discover capabilities offered by this extension.
"""
logger.info("Discovering capabilities from aws-az-failure-chaostoolkit")
discovery = initialize_discovery_result(
"aws-az-failure-chaostoolkit", __version__, "aws"
)
discovery["activities"].extend(__load_exported_activities())
return discovery
|
ad9b7674f8f8f7cc06ce21dacba2138231b7e69c
| 3,646,570
|
def getter_nofancy(a, b, asarray=True, lock=None):
""" A simple wrapper around ``getter``.
Used to indicate to the optimization passes that the backend doesn't
support fancy indexing.
"""
return getter(a, b, asarray=asarray, lock=lock)
|
63e355eb3245c8f394c345fb2ebd4e469fcd7500
| 3,646,571
|
def xy_to_array_origin(image):
"""Return view of image transformed from Cartesian to array origin."""
return rgb_transpose(image[:, ::-1])
|
e2e47f95093e1808cfbe7c2ba28af8c3e5b40307
| 3,646,572
|
import csv
def read_csv(infile, delimiter=',', encoding='utf-8', named=False):
"""Reads a csv as a list of lists (unnamed) or a list of named tuples (named)
Args:
string infile: the file to read in
OPTIONAL:
string delimiter: the delimiter used (default ',')
encoding encoding: the encoding of the file (default 'utf-8')
boolean named: if true, loads rows as named tuples
(default lists), (default False)
Returns list of lists or named tuples"""
with open(infile, encoding=encoding) as f:
reader = csv.reader(f, delimiter=delimiter)
if named:
headers = next(reader)
# strip spaces and annoying things from headers
names = [identifier.replace('-', '_').replace(' ', '_').lower()
for identifier in headers]
Data = namedtuple("Data", names)
named_rows = map(Data._make, reader)
return [row for row in named_rows]
else:
return list(list(row for row in reader))
|
7318293d884fa80a7d93d8046f66b3801d809f42
| 3,646,573
|
def is_ivy_enabled(ctx):
"""Determine if the ivy compiler should be used to by the ng_module.
Args:
ctx: skylark rule execution context
Returns:
Boolean, Whether the ivy compiler should be used.
"""
# Check the renderer flag to see if Ivy is enabled.
# This is intended to support a transition use case for google3 migration.
# The `_renderer` attribute will never be set externally, but will always be
# set internally as a `string_flag()` with the allowed values of:
# "view_engine" or "ivy".
if ((hasattr(ctx.attr, "_renderer") and
ctx.attr._renderer[BuildSettingInfo].value == "ivy")):
return True
# This attribute is only defined in google's private ng_module rule and not
# available externally. For external users, this is effectively a no-op.
if hasattr(ctx.attr, "ivy") and ctx.attr.ivy == True:
return True
if ctx.var.get("angular_ivy_enabled", None) == "True":
return True
# Enable Angular targets extracted by Kythe Angular indexer to be compiled with the Ivy compiler architecture.
# TODO(ayazhafiz): remove once Ivy has landed as the default in g3.
if ctx.var.get("GROK_ELLIPSIS_BUILD", None) != None:
return True
# Return false to default to ViewEngine compiler
return False
|
57018cd180e3cfb580874249a31731f6762f4900
| 3,646,574
|
def get_directions_id(destination):
"""Get place ID for directions, which is place ID for associated destination, if an event"""
if hasattr(destination, 'destination'):
# event with a related destination; use it for directions
if destination.destination:
return destination.destination.id
else:
# event without a destination
return None
else:
# not an event
return destination.id
|
f7cd182cb5ea344c341bf9bfaa7a4389335ae353
| 3,646,575
|
def decode_token(params, token_field=None):
"""
This function is used to decode the jwt token into the data that was used
to generate it
Args:
session_obj: sqlalchemy obj used to interact with the db
params: json data received with request
token_field: name of the field that token can be found in
Return:
resulting data from the token decode process
"""
try:
if not token_field:
token = params[TOKEN_FIELD]
else:
token = params[token_field]
# token_use_details = find_token_use(session_obj, token)
# check_token_validate_period(session_obj, token_use_details)
account_details = jwt.decode(token, _SECRET, algorithms=ALGORITHM)
# check_login_access_revoked(
# session_obj, account_details, token_use_details
# )
# extend_token_validity(session_obj, token_use_details)
return account_details
except orm_exc.NoResultFound:
raise exc.LoggedOutError()
|
8adac31df7d5659c06f5c4d66fc86ae556531aae
| 3,646,576
|
from matplotlib import pyplot
import numpy
def gas_arrow(ods, r, z, direction=None, snap_to=numpy.pi / 4.0, ax=None, color=None, pad=1.0, **kw):
"""
Draws an arrow pointing in from the gas valve
:param ods: ODS instance
:param r: float
R position of gas injector (m)
:param z: float
Z position of gas injector (m)
:param direction: float
Direction of injection (radians, COCOS should match ods.cocos). None = try to guess.
:param snap_to: float
Snap direction angle to nearest value. Set snap to pi/4 to snap to 0, pi/4, pi/2, 3pi/4, etc. No in-between.
:param ax: axes instance into which to plot (default: gca())
:param color: matplotlib color specification
:param pad: float
Padding between arrow tip and specified (r,z)
"""
def pick_direction():
"""Guesses the direction for the arrow (from injector toward machine) in case you don't know"""
dr = ods['equilibrium']['time_slice'][0]['global_quantities']['magnetic_axis']['r'] - r
dz = ods['equilibrium']['time_slice'][0]['global_quantities']['magnetic_axis']['z'] - z
theta = numpy.arctan2(dz, -dr)
if snap_to > 0:
theta = snap_to * round(theta / snap_to)
return theta
if direction is None:
direction = pick_direction()
else:
direction = cocos_transform(ods.cocos, 11)['BP'] * direction
if ax is None:
ax = pyplot.gca()
shaft_len = 3.5 * (1 + pad) / 2.
da = numpy.pi / 10 # Angular half width of the arrow head
x0 = numpy.cos(-direction) * pad
y0 = numpy.sin(-direction) * pad
head_mark = [
(x0, y0),
(x0 + numpy.cos(-direction + da), y0 + numpy.sin(-direction + da)),
(x0 + numpy.cos(-direction), y0 + numpy.sin(-direction)),
(x0 + shaft_len * numpy.cos(-direction), y0 + shaft_len * numpy.sin(-direction)),
(x0 + numpy.cos(-direction), y0 + numpy.sin(-direction)),
(x0 + numpy.cos(-direction - da), y0 + numpy.sin(-direction - da)),
]
kw.pop('marker', None) # Ignore this
return ax.plot(r, z, marker=head_mark, color=color, markersize=100 * (pad + shaft_len) / 5, **kw)
|
04ef7568cf6b6f7e9357fa05ae3996ecf74ab3c1
| 3,646,577
|
import sys
def get_word_vector(text, model, num):
"""
:param text: list of words
:param model: word2vec model in Gensim format
:param num: number of the word to exclude
:return: average vector of words in text
"""
# Creating list of all words in the document which are present in the model
excl_word = text[num]
words = [w for w in text if w in model and w != excl_word]
lexicon = list(set(words))
lw = len(lexicon)
if lw < 1:
print('Empty lexicon in', text, file=sys.stderr)
return np.zeros(model.vector_size)
vectors = np.zeros((lw, model.vector_size)) # Creating empty matrix of vectors for words
for i in list(range(lw)): # Iterate over words in the text
word = lexicon[i]
vectors[i, :] = model[word] # Adding word and its vector to matrix
semantic_fingerprint = np.sum(vectors, axis=0) # Computing sum of all vectors in the document
semantic_fingerprint = np.divide(semantic_fingerprint, lw) # Computing average vector
return semantic_fingerprint
|
586ea6fdd7845ed21322d824b0bc7155345899ed
| 3,646,578
|
def find_storage_pool_type(apiclient, storagetype='NetworkFileSystem'):
"""
@name : find_storage_pool_type
@Desc : Returns true if the given storage pool type exists
@Input : type : type of the storage pool[NFS, RBD, etc.,]
@Output : True : if the type of storage is found
False : if the type of storage is not found
FAILED In case the cmd failed
"""
cmd = listStoragePools.listStoragePoolsCmd()
cmd_out = apiclient.listStoragePools(cmd)
if validateList(cmd_out)[0] != PASS:
return FAILED
for storage_pool in cmd_out:
if storage_pool.type.lower() == storagetype:
return True
return False
|
1d3e64185e0361f02a8cc7e2e4316895e22e517e
| 3,646,579
|
from typing import Any
from typing import cast
def parse_year(candidate: Any) -> int:
"""Parses the given candidate as a year literal. Raises a ValueError
when the candidate is not a valid year."""
if candidate is not None and not isinstance(candidate, int):
raise TypeError("Argument year is expected to be an int, "
"but is {}".format(type(candidate)))
return cast(int, candidate)
|
337cc3be16e1e1246d1d1f02b55665c655fe131f
| 3,646,580
|
def dropout2d(tensor: Tensor, p: float = 0.2) -> Tensor:
"""
Method performs 2D channel-wise dropout with a autograd tensor.
:param tensor: (Tensor) Input tensor
:param p: (float) Probability that a activation element is set to zero
:return: (Tensor) Output tensor
"""
# Check argument
assert 0.0 <= p <= 1.0, 'Parameter p must be in the range of [0, 1].'
# Apply dropout
mask = (np.random.randint(0, 2, size=tensor.shape[0]) > p).astype(float).reshape(1, -1, 1, 1)
output = tensor.data * mask
# Check if grad is needed
requires_grad = tensor.requires_grad
# Add grad function
dependencies = [Dependency(tensor, lambda grad: grad * mask)] if requires_grad else None
return Tensor(data=output, requires_grad=requires_grad, dependencies=dependencies)
|
6719fa5a3e55665770faf1034677642d78561f83
| 3,646,581
|
def svn_repos_finish_report(*args):
"""svn_repos_finish_report(void * report_baton, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_finish_report(*args)
|
19b42660beb7fa5995a8c5e6e0cb5df39116ddb5
| 3,646,582
|
import array
import itertools
def problem451():
"""
Consider the number 15.
There are eight positive numbers less than 15 which are coprime to 15: 1,
2, 4, 7, 8, 11, 13, 14.
The modular inverses of these numbers modulo 15 are: 1, 8, 4, 13, 2, 11,
7, 14
because
1*1 mod 15=1
2*8=16 mod 15=1
4*4=16 mod 15=1
7*13=91 mod 15=1
11*11=121 mod 15=1
14*14=196 mod 15=1
Let I(n) be the largest positive number m smaller than n-1 such that the
modular inverse of m modulo n equals m itself.
So I(15)=11.
Also I(100)=51 and I(7)=1.
Find ∑I(n) for 3≤n≤2·10^7
"""
LIMIT = 20000000
# Build table of smallest prime factors
smallestprimefactor = array.array("L", itertools.repeat(0, LIMIT + 1))
end = eulerlib.sqrt(len(smallestprimefactor) - 1)
for i in range(2, len(smallestprimefactor)):
if smallestprimefactor[i] == 0:
smallestprimefactor[i] = i
if i <= end:
for j in range(i * i, len(smallestprimefactor), i):
if smallestprimefactor[j] == 0:
smallestprimefactor[j] = i
# Returns all the solutions (in ascending order) such that
# for each k, 1 <= k < n and k^2 = 1 mod n.
def get_solutions(n):
if smallestprimefactor[n] == n: # n is prime
return (1, n - 1)
else:
temp = []
p = smallestprimefactor[n]
sols = solutions[n // p]
for i in range(0, n, n // p):
for j in sols:
k = i + j
if k * k % n == 1:
temp.append(k)
return tuple(temp)
# Process every integer in range
solutions = [(), (), (1,)]
ans = 0
for i in range(3, LIMIT + 1):
sols = get_solutions(i)
if i <= LIMIT // 2:
solutions.append(sols)
ans += sols[-2] # Second-largest solution
return ans
|
efb000a8f367cf13e7aec2117efed092e3d5a5f3
| 3,646,583
|
import torch
def collate_molgraphs(data):
"""Batching a list of datapoints for dataloader.
Parameters
----------
data : list of 3-tuples or 4-tuples.
Each tuple is for a single datapoint, consisting of
a SMILES, a DGLGraph, all-task labels and optionally
a binary mask indicating the existence of labels.
Returns
-------
smiles : list
List of smiles
bg : BatchedDGLGraph
Batched DGLGraphs
labels : Tensor of dtype float32 and shape (B, T)
Batched datapoint labels. B is len(data) and
T is the number of total tasks.
masks : Tensor of dtype float32 and shape (B, T)
Batched datapoint binary mask, indicating the
existence of labels. If binary masks are not
provided, return a tensor with ones.
"""
assert len(data[0]) in [3, 4], \
'Expect the tuple to be of length 3 or 4, got {:d}'.format(len(data[0]))
if len(data[0]) == 3:
smiles, graphs, labels = map(list, zip(*data))
masks = None
else:
smiles, graphs, labels, masks = map(list, zip(*data))
bg = dgl.batch(graphs)
bg.set_n_initializer(dgl.init.zero_initializer)
bg.set_e_initializer(dgl.init.zero_initializer)
labels = torch.stack(labels, dim=0)
if masks is None:
masks = torch.ones(labels.shape)
else:
masks = torch.stack(masks, dim=0)
return smiles, bg, labels, masks
|
3ff726fca71ab64ec1e2e665babd8f46b027e819
| 3,646,584
|
def reshape_practice(x):
"""
Given an input tensor of shape (24,), return a reshaped tensor y of shape
(3, 8) such that
y = [
[x[0], x[1], x[2], x[3], x[12], x[13], x[14], x[15]],
[x[4], x[5], x[6], x[7], x[16], x[17], x[18], x[19]],
[x[8], x[9], x[10], x[11], x[20], x[21], x[22], x[23]],
]
You must construct y by performing a sequence of reshaping operations on x
(view, t, transpose, permute, contiguous, reshape, etc). The input tensor
should not be modified.
Input:
- x: A tensor of shape (24,)
Returns:
- y: A reshaped version of x of shape (3, 8) as described above.
"""
y = None
#############################################################################
# TODO: Implement this function #
#############################################################################
# Replace "pass" statement with your code
y = x.contiguous().view(2, 3 , 4)
y = y.transpose(0,1)
y = y.contiguous().view(3, -1)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return y
|
b7d71df428ca13729d908771b6e5ce14aa5662e2
| 3,646,585
|
def recouvrement_view(request, id):
"""
Fonction Detail
"""
user = request.user
recouvrement = Recouvrement.objects.filter(user=user).get(id=id)
context = {
'recouvrement': recouvrement,
}
template_name = 'pages/recouvrement/recouvrement_view.html'
return render(request, template_name, context)
|
f0e26257a39ef385b9dfaa51bff68b0fec51a263
| 3,646,586
|
def getFileServicesNames(fileServices=None, verbose=True):
"""
Returns the names and description of the fileServices available to the user.
:param fileServices: a list of FileService objects (dictionaries), as returned by Files.getFileServices(). If not set, then an extra internal call to Jobs.getFileServices() is made.
:param verbose: boolean parameter defining whether warnings will be printed (set to True) or not (set to False).
:return: an array of dicts, where each dict has the name and description of a file service available to the user.
:raises: Throws an exception if the user is not logged into SciServer (use Authentication.login for that purpose). Throws an exception if the HTTP request to the RACM API returns an error.
:example: fileServiceNames = Files.getFileServicesNames();
.. seealso:: Files.getFileServices
"""
if fileServices is None:
fileServices = getFileServices(verbose);
fileServiceNames = [];
for fileService in fileServices:
fileServiceNames.append({"name":fileService.get('name'),"description":fileService.get('description')})
return fileServiceNames
|
ef476f2c661dadebee8e8a16863ff2f4c286d99e
| 3,646,587
|
def username_in_path(username, path_):
"""Checks if a username is contained in URL"""
if username in path_:
return True
return False
|
131a8fa102fd0a0f036da81030b005f92ea9aab0
| 3,646,588
|
def str_parse_as_utf8(content) -> str:
"""Returns the provided content decoded as utf-8."""
return content.decode('utf-8')
|
75b8d5f1f8867c50b08146cc3edc1d0ab630280a
| 3,646,589
|
def TypeProviderClient(version):
"""Return a Type Provider client specially suited for listing types.
Listing types requires many API calls, some of which may fail due to bad
user configurations which show up as errors that are retryable. We can
alleviate some of the latency and usability issues this causes by tuning
the client.
Args:
version: DM API version used for the client.
Returns:
A Type Provider API client.
"""
main_client = apis.GetClientInstance('deploymentmanager', version.id)
main_client.num_retries = 2
return main_client.typeProviders
|
2e735b37d01b9a9a0b44d5cf04acd89d2a8d9b90
| 3,646,590
|
from typing import Optional
from datetime import datetime
from typing import List
from typing import Dict
from typing import Any
def create_indicator(
pattern: str,
pattern_type: str,
created_by: Optional[Identity] = None,
name: Optional[str] = None,
description: Optional[str] = None,
valid_from: Optional[datetime] = None,
kill_chain_phases: Optional[List[KillChainPhase]] = None,
labels: Optional[List[str]] = None,
confidence: Optional[int] = None,
object_markings: Optional[List[MarkingDefinition]] = None,
x_opencti_main_observable_type: Optional[str] = None,
x_opencti_score: Optional[int] = None,
) -> STIXIndicator:
"""Create an indicator."""
custom_properties: Dict[str, Any] = {X_OPENCTI_SCORE: DEFAULT_X_OPENCTI_SCORE}
if x_opencti_score is not None:
custom_properties[X_OPENCTI_SCORE] = x_opencti_score
if x_opencti_main_observable_type is not None:
custom_properties[
X_OPENCTI_MAIN_OBSERVABLE_TYPE
] = x_opencti_main_observable_type
return STIXIndicator(
id=_create_random_identifier("indicator"),
created_by_ref=created_by,
name=name,
description=description,
pattern=pattern,
pattern_type=pattern_type,
valid_from=valid_from,
kill_chain_phases=kill_chain_phases,
labels=labels,
confidence=confidence,
object_marking_refs=object_markings,
custom_properties=custom_properties,
)
|
421e9d1d060709facb9a8b8d6831b6a45ef479c9
| 3,646,591
|
def import_data(
path_to_csv: str,
response_colname: str,
standards_colname: str,
header: int = 0,
nrows: int = None,
skip_rows: int = None,
) -> pd.DataFrame:
"""Import standard curve data from a csv file.
Args:
path_to_csv: Refer to pd.read_csv docs.
response_colname: Name of column with response data.
standards_colname: Name of column with standard concentrations.
header: Refer to pd.read_csv().
nrows: Refer to pd.read_csv().
skip_rows: Skips the first n rows when reading data.
# kwargs: Additional arguments to parse to pd.read_csv().
Returns:
Formatted data as a dataframe.
Raises:
ValueError: If response_colname or standards_colname not in data.columns
"""
data = pd.read_csv(path_to_csv, header=header, nrows=nrows)
if skip_rows:
data = data.iloc[skip_rows:, :]
data.dropna(axis=1, how="all", inplace=True)
data.dropna(inplace=True)
data.rename({response_colname: "response", standards_colname: "standard_concentrations"}, axis=1, inplace=True)
try:
return data.loc[:, ["standard_concentrations", "response"]]
except KeyError:
raise ValueError("Check `response_colname` and `standards_colname` values are valid column names.")
|
fffc650ac7b672e0585b0dc307977c4adf9a0a69
| 3,646,592
|
def plus(x: np.ndarray, y: np.ndarray) -> np.ndarray:
""" 矩阵相加"""
if x.shape == y.shape:
return x + y
|
9d042d90c8d3ca9588c02ddd9ed53ec725785d13
| 3,646,593
|
def add_noise(wave, noise, fs, snr, start_time, duration, wave_power):
"""Add a noise to wave.
"""
noise_power = np.dot(noise, noise) / noise.shape[0]
scale_factor = np.sqrt(10**(-snr/10.0) * wave_power / noise_power)
noise = noise * scale_factor
offset = int(start_time * fs)
add_length = min(wave.shape[0] - offset, int(duration * fs), noise.shape[0])
if add_length > 0:
wave[offset: offset + add_length] += noise[0: add_length]
return wave
|
3f8df3098751b081f93b61da16682bdac2bf6a02
| 3,646,594
|
def _sort_factors(factors, **args):
"""Sort low-level factors in increasing 'complexity' order."""
def order_if_multiple_key(factor):
f, n = factor
return len(f), n, default_sort_key(f)
def order_no_multiple_key(f):
return len(f), default_sort_key(f)
if args.get('multiple', True):
return sorted(factors, key=order_if_multiple_key)
else:
return sorted(factors, key=order_no_multiple_key)
|
60be823e0f12b0e33d6a9567458cc98d95d1f900
| 3,646,595
|
def get_affix(text):
"""
This method gets the affix information
:param str text: Input text.
"""
return " ".join(
[word[-4:] if len(word) >= 4 else word for word in text.split()])
|
eb0aa68e803ce6c0ae218f4e0e2fd1855936b50f
| 3,646,596
|
async def async_setup_entry(hass, config_entry):
"""Konfigurowanie integracji na podstawie wpisu konfiguracyjnego."""
_LOGGER.info("async_setup_entry " + str(config_entry))
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, "sensor")
)
return True
|
0bbef08a544cccede0efd18fbbf9a0b7dddfbec9
| 3,646,597
|
def analyse_branching(geom,ordering_system,conversionFactor,voxelSize):
""" Does a branching analysis on the tree defined by 'geom'
Inputs:
- geom: A geometry structure consisting of element list, node location and radii/lengths
- ordering_system: the ordering system to be used in analysis (e.g. 'strahler', 'horsfield'
Returns: Prints to screen a table of branching properties (one per generation, one per order) and overall summary statistics
"""
elem_cnct = pg_utilities.element_connectivity_1D(geom['nodes'], geom['elems'])
orders = evaluate_orders(geom['nodes'], geom['elems'])
# Find Results
branchGeom = arrange_by_branches(geom, elem_cnct['elem_up'], orders[ordering_system],orders['generation'])
[geom, branchGeom] = find_branch_angles(geom, orders, elem_cnct, branchGeom, voxelSize, conversionFactor)
major_minor_results=major_minor(geom, elem_cnct['elem_down']) #major/minor child stuff
# tabulate data
generation_summary_statistics(geom, orders, major_minor_results)
summary_statistics(branchGeom, geom, orders, major_minor_results,'strahler')
return geom
|
25c72b51094c59317e167cca6662c5ccfa8805b0
| 3,646,598
|
def remove_start(s: str) -> str:
"""
Clear string from start '-' symbol
:param s:
:return:
"""
return s[1:] if s.startswith('-') else s
|
03504a3094798f6582bcae40233f7215e8d4d780
| 3,646,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.