content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def is_fouling_team_in_penalty(event):
"""Returns True if fouling team over the limit, else False"""
fouls_to_give_prior_to_foul = event.previous_event.fouls_to_give[event.team_id]
return fouls_to_give_prior_to_foul == 0
| 5,340,000
|
def is_img_id_valid(img_id):
"""
Checks if img_id is valid.
"""
t = re.sub(r'[^a-z0-9_:\-\.]', '', img_id, re.IGNORECASE)
t = re.sub(r'\.+', '.', t)
if img_id != t or img_id.count(':') != 1:
return False
profile, base_name = img_id.split(':', 1)
if not profile or not base_name:
return False
try:
get_profile_configs(profile)
except ValueError:
return False
return True
| 5,340,001
|
def lmc(wave, tau_v=1, **kwargs):
""" Pei 1992 LMC extinction curve.
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:returns tau:
The optical depth at each wavelength.
"""
if (wave < 1e3).any():
warnings.warn('LMC: extinction extrapolation below 1000AA is poor')
mic = wave * 1e-4
aa = [175., 19., 0.023, 0.005, 0.006, 0.020]
ll = [0.046, 0.08, 0.22, 9.7, 18., 25.]
bb = [90., 5.50, -1.95, -1.95, -1.80, 0.00]
nn = [2.0, 4.5, 2.0, 2.0, 2.0, 2.0]
abs_ab = mic * 0.
norm_v = 0 # hack to go from tau_b to tau_v
mic_5500 = 5500 * 1e-4
for i, a in enumerate(aa):
norm_v += aa[i] / ((mic_5500 / ll[i])**nn[i] +
(ll[i] / mic_5500)**nn[i] + bb[i])
abs_ab += aa[i] / ((mic / ll[i])**nn[i] + (ll[i] / mic)**nn[i] + bb[i])
return tau_v * (abs_ab / norm_v)
| 5,340,002
|
def minMax(xs):
"""Calcule le minimum et le maximum d'un tableau de valeur xs (non-vide !)"""
min, max = xs[0], xs[0]
for x in xs[1:]:
if x < min:
min = x
elif x > max:
max = x
return min,max
| 5,340,003
|
def events(request):
"""Events"""
# Get profile
profile = request.user.profile
# Get a QuerySet of events for this user
events = Event.objects.filter(user=request.user)
# Create a new paginator
paginator = Paginator(events, profile.entries_per_page)
# Make sure page request is an int, default to 1st page
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
# If page request is out of range, deliver last page of results
try:
events = paginator.page(page)
except (EmptyPage, InvalidPage):
events = paginator.page(paginator.num_pages)
# Render template
return render_page(
'thing/events.html',
{
'events': events,
'user': request.user
},
request,
)
| 5,340,004
|
def print_describe(name: str, item: Any):
"""Print an item with a description, where the item is properly indented.
Args:
name (str): The name of the item to print.
item (Any): The item to print, indented by one tab.
"""
print(f"{name}: ")
print(textwrap.indent(str(item), "\t"))
| 5,340,005
|
def latent_posterior_factory(x: np.ndarray, y: np.ndarray) -> Tuple[Callable]:
"""Factory function that yields further functions to compute the log-posterior
of the stochastic volatility model given parameters `x`. The factory also
constructs functions for the gradient of the log-posterior and the Fisher
information metric.
Args:
x: The stochastic volatilities.
y: Observations from the stochastic volatility model.
Returns:
log_posterior: Function to compute the log-posterior.
grad_log_posterior: Function to compute the gradient of the log-posterior.
metric: Function to compute the Fisher information metric.
grad_metric: Function to compute the gradient of the Fisher information
metric.
"""
T = x.size
def _log_posterior(sigma: float, phi: float, beta: float) -> float:
"""The log-posterior of the stochastic volatility model given the stochastic
volatilities. The inference is over the model parameters `sigma`, `phi`,
and `beta`.
Args:
sigma: Parameter of the stochastic volatility model.
phi: Parameter of the stochastic volatility model.
beta: Parameter of the stochastic volatility model.
Returns:
lp: The log-posterior of the stochastic volatility model.
"""
phisq = np.square(phi)
ly = spst.norm.logpdf(y, 0.0, beta*np.exp(0.5 * x)).sum()
lxo = spst.norm.logpdf(x[0], 0.0, sigma / np.sqrt(1.0 - phisq))
lx = spst.norm.logpdf(x[1:], phi*x[:-1], sigma).sum()
lp = ly + lx + lxo + log_prior(sigma, phi, beta)
return lp
def _grad_log_posterior_helper(gamma, alpha, beta, sigmasq, phi, phisq):
dpgamma, dpalpha, dpbeta = grad_log_prior(gamma, alpha, beta)
dbeta = (-T / beta
+ np.sum(np.square(y) / np.exp(x)) / np.power(beta, 3.0)
+ dpbeta)
dgamma = (
-T + np.square(x[0])*(1.0 - phisq) / sigmasq
+ np.sum(np.square(x[1:] - phi*x[:-1])) / sigmasq
+ dpgamma)
dalpha = (
-phi + phi*np.square(x[0])*(1.0 - phisq) / sigmasq
+ np.sum(x[:-1] * (x[1:] - phi*x[:-1])) * (1.0 - phisq) / sigmasq
+ dpalpha)
return np.array([dgamma, dalpha, dbeta])
def _metric_helper(gamma, alpha, beta, sigmasq, phi, phisq):
# Note that this ordering of the variables differs from that presented
# in the Riemannian manifold HMC paper.
G = np.array([
# gamma alpha beta
[ 2.0*T, 2.0*phi, 0.0], # gamma
[2.0*phi, 2.0*phisq + (T - 1.0)*(1.0 - phisq), 0.0], # alpha
[ 0.0, 0.0, 2.0 * T / np.square(beta)] # beta
])
# Add in the negative Hessian of the log-prior.
H = hess_log_prior(gamma, alpha, beta)
G -= H
return G
def _grad_metric_helper(gamma, alpha, beta, sigmasq, phi, phisq):
dGbeta = np.array([
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -4.0 * T / np.power(beta, 3.0)]
])
dGgamma = np.zeros((3, 3))
a = 2.0*(1.0 - phisq)
b = 2.0*phi*(3.0 - T)*(1.0 - phisq)
dGalpha = np.array([
[0.0, a, 0.0],
[ a, b, 0.0],
[0.0, 0.0, 0.0]
])
dG = np.array([dGgamma, dGalpha, dGbeta]).swapaxes(0, -1)
dH = grad_hess_log_prior(gamma, alpha, beta)
return dG - dH
def _grad_log_posterior(gamma: float, alpha: float, beta: float) -> np.ndarray:
"""The gradient log-posterior of the stochastic volatility model given the
stochastic volatilities with respect to the (transformed) parameters
`gamma`, `alpha`, and `beta`.
Args:
gamma: Transformed parameter `sigma` of the stochastic volatility model.
alpha: Transformed parameter `phi` of the stochastic volatility model.
beta: Parameter of the stochastic volatility model.
Returns:
dgamma: The gradient of the log-posterior with respect to the
transformed parameter `sigma`.
dalpha: The gradient of the log-posterior with respect to the
transformed parameter `phi`.
dbeta: The gradient of the log-posterior with respect to `beta`.
"""
sigma = np.exp(gamma)
sigmasq = np.square(sigma)
phi = np.tanh(alpha)
phisq = np.square(phi)
return _grad_log_posterior_helper(gamma, alpha, beta, sigmasq, phi, phisq)
def _metric(gamma: float, alpha: float, beta: float) -> np.ndarray:
"""The Fisher information metric of the stochastic volatility model given the
stochastic volatilities.
Args:
gamma: Transformed parameter of the stochastic volatility model.
alpha: Transformed parameter of the stochastic volatility model.
beta: Parameter of the stochastic volatility model.
Returns:
G: The Fisher information metric.
"""
sigma = np.exp(gamma)
sigmasq = np.square(sigma)
phi = np.tanh(alpha)
phisq = np.square(phi)
return _metric_helper(gamma, alpha, beta, sigmasq, phi, phisq)
def _grad_metric(gamma: float, alpha: float, beta: float) -> np.ndarray:
"""The gradient of the Fisher information metric of the stochastic volatility
model given the stochastic volatilities with respect to the `sigma`,
`alpha`, and `beta` parameters of the stochastic volatility model.
Args:
gamma: Transformed parameter of the stochastic volatility model.
alpha: Transformed parameter of the stochastic volatility model.
beta: Parameter of the stochastic volatility model.
Returns:
dG: The gradient of the Fisher information metric.
"""
sigma = np.exp(gamma)
sigmasq = np.square(sigma)
phi = np.tanh(alpha)
phisq = np.square(phi)
return _grad_metric_helper(gamma, alpha, beta, sigmasq, phi, phisq)
def grad_log_posterior_and_metric_and_grad_metric(q):
gamma, alpha, beta = q
sigma = np.exp(gamma)
sigmasq = np.square(sigma)
phi = np.tanh(alpha)
phisq = np.square(phi)
glp = _grad_log_posterior_helper(gamma, alpha, beta, sigmasq, phi, phisq)
G = _metric_helper(gamma, alpha, beta, sigmasq, phi, phisq)
dG = _grad_metric_helper(gamma, alpha, beta, sigmasq, phi, phisq)
return glp, G, dG
# Convert functions defined for separate arguments to take a vector
# concatenation of the parameter.
log_posterior = lambda q: _log_posterior(*inverse_transform(q)[0])
grad_log_posterior = lambda q: _grad_log_posterior(q[0], q[1], q[2])
metric = lambda q: _metric(q[0], q[1], q[2])
grad_metric = lambda q: _grad_metric(q[0], q[1], q[2])
return (
log_posterior, grad_log_posterior, metric, grad_metric,
grad_log_posterior_and_metric_and_grad_metric)
| 5,340,006
|
def update_build_configuration_set(id, **kwargs):
"""
Update a BuildConfigurationSet
"""
data = update_build_configuration_set_raw(id, **kwargs)
if data:
return utils.format_json(data)
| 5,340,007
|
def create_app(settings_override: Optional[dict]=None) -> Flask:
"""
Create a Flask app
:param settings_override: any settings to override
:return: flask app
"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_object('config.settings')
app.config.from_pyfile('settings.py', silent=True)
if settings_override:
app.config.update(settings_override)
configure_logging(app)
initialize_extensions(app)
db.app = app
register_blueprints(app)
initialize_jinja2(app)
load_models()
return app
| 5,340,008
|
def compute_prefix_function(pattern):
"""
Computes the prefix array for KMP.
:param pattern:
:type pattern: str
:return:
"""
m = len(pattern)
prefixes = [0]*(m+1)
i = 0
for q in range(2, m + 1):
while i > 0 and pattern[i] != pattern[q - 1]:
i = prefixes[i]
if pattern[i] == pattern[q - 1]:
i += 1
prefixes[q] = i
return prefixes[1:]
| 5,340,009
|
def binary_indicator(states,
actions,
rewards,
next_states,
contexts,
termination_epsilon=1e-4,
offset=0,
epsilon=1e-10,
state_indices=None,
summarize=False):
"""Returns 0/1 by checking if next_states and contexts overlap.
Args:
states: A [batch_size, num_state_dims] Tensor representing a batch
of states.
actions: A [batch_size, num_action_dims] Tensor representing a batch
of actions.
rewards: A [batch_size] Tensor representing a batch of rewards.
next_states: A [batch_size, num_state_dims] Tensor representing a batch
of next states.
contexts: A list of [batch_size, num_context_dims] Tensor representing
a batch of contexts.
termination_epsilon: terminate if dist is less than this quantity.
offset: Offset the rewards.
epsilon: small offset to ensure non-negative/zero distance.
Returns:
A new tf.float32 [batch_size] rewards Tensor, and
tf.float32 [batch_size] discounts tensor.
"""
del states, actions # unused args
next_states = index_states(next_states, state_indices)
dist = tf.reduce_sum(tf.squared_difference(next_states, contexts[0]), -1)
dist = tf.sqrt(dist + epsilon)
discounts = dist > termination_epsilon
rewards = tf.logical_not(discounts)
rewards = tf.to_float(rewards) + offset
return tf.to_float(rewards), tf.ones_like(tf.to_float(discounts))
| 5,340,010
|
def sigmoid(num):
"""
Find the sigmoid of a number.
:type number: number
:param number: The number to find the sigmoid of
:return: The result of the sigmoid
:rtype: number
>>> sigmoid(1)
0.7310585786300049
"""
# Return the calculated value
return 1 / (1 + math.exp(-num))
| 5,340,011
|
def print_version(ctx, param, value):
"""Print out the version of opsdroid that is installed."""
if not value or ctx.resilient_parsing:
return
click.echo("opsdroid {version}".format(version=__version__))
ctx.exit(0)
| 5,340,012
|
def list_lattices(device_name: str = None, num_qubits: int = None,
connection: ForestConnection = None):
"""
Query the Forest 2.0 server for its knowledge of lattices. Optionally filters by underlying
device name and lattice qubit count.
:return: A dictionary keyed on lattice names and valued in dictionaries of the form
{
"device_name": device_name,
"qubits": num_qubits
}
"""
if connection and connection.session:
session = connection.session
else:
session = get_session()
if connection:
url = connection.sync_endpoint + "/lattices"
else:
config = PyquilConfig()
try:
url = config.forest_url + "/lattices"
except TypeError:
raise ValueError("""Encountered an error when querying the Forest 2.0 endpoint.
Most likely, you're missing an address for the Forest 2.0 server endpoint. This can
be set through the environment variable FOREST_URL or by changing the following lines
in the QCS config file:
[Rigetti Forest]
url = https://rigetti.com/valid/forest/url""")
try:
response = get_json(session, url,
params={"device_name": device_name,
"num_qubits": num_qubits})
return response["lattices"]
except Exception as e:
raise ValueError("""
list_lattices encountered an error when querying the Forest 2.0 endpoint.
Some common causes for this error include:
* You don't have valid user authentication information. Very likely this is because you
haven't yet been invited to try QCS. We plan on making our device information publicly
accessible soon, but in the meanwhile, you'll have to use default QVM configurations and
to use `list_quantum_computers` with `qpus = False`.
* You do have user authentication information, but it is missing or modified. You can find
this either in the environment variables FOREST_API_KEY and FOREST_USER_ID or in the
config file (stored by default at ~/.qcs_config, but with location settable through the
environment variable QCS_CONFIG), which contains the subsection
[Rigetti Forest]
user_id = your_user_id
key = your_api_key
* You're missing an address for the Forest 2.0 server endpoint, or the address is invalid.
This too can be set through the environment variable FOREST_URL or by changing the
following lines in the QCS config file:
[Rigetti Forest]
url = https://rigetti.com/valid/forest/url
For the record, here's the original exception: {}
""".format(repr(e)))
| 5,340,013
|
def read_vec_flt_ark(file_or_fd):
""" generator(key,vec) = read_vec_flt_ark(file_or_fd)
Create generator of (key,vector<float>) tuples, reading from an ark file/stream.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Read ark to a 'dictionary':
d = { u:d for u,d in kaldi_io.read_vec_flt_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
ali = read_vec_flt(fd)
yield key, ali
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
| 5,340,014
|
def inflate_tilegrid(
bmp_path=None,
target_size=(3, 3),
tile_size=None,
transparent_index=None,
bmp_obj=None,
bmp_palette=None,
):
"""
inflate a TileGrid of ``target_size`` in tiles from a 3x3 spritesheet by duplicating
the center rows and columns.
:param Optional[str] bmp_path: filepath to the 3x3 spritesheet bitmap file
:param Optional[tuple] target_size: desired size in tiles (target_width, target_height)
:param Optional[tuple] tile_size: size of the tiles in the 3x3 spritesheet. If
None is used it will equally divide the width and height of the Bitmap by 3.
:param Optional[Union[tuple, int]] transparent_index: a single index within the palette to
make transparent, or a tuple of multiple indexes to make transparent
:param Optional[OnDiskBitmap] bmp_obj: Already loaded 3x3 spritesheet in an OnDiskBitmap
:param Optional[Palette] bmp_palette: Already loaded spritesheet Palette
"""
# pylint: disable=too-many-arguments, too-many-locals, too-many-branches
if bmp_path is None and (bmp_obj is None and bmp_palette is None):
raise AttributeError("Must pass either bmp_path or bmp_obj and bmp_palette")
if bmp_path is not None:
image, palette = adafruit_imageload.load(bmp_path)
else:
image = bmp_obj
palette = bmp_palette
if transparent_index is not None:
if isinstance(transparent_index, tuple):
for index in transparent_index:
palette.make_transparent(index)
elif isinstance(transparent_index, int):
palette.make_transparent(transparent_index)
if tile_size is None:
tile_width = image.width // 3
tile_height = image.height // 3
else:
tile_width = tile_size[0]
tile_height = tile_size[1]
target_width = target_size[0]
target_height = target_size[1]
tile_grid = displayio.TileGrid(
image,
pixel_shader=palette,
height=target_height,
width=target_width,
tile_width=tile_width,
tile_height=tile_height,
)
# corners
tile_grid[0, 0] = 0 # upper left
tile_grid[tile_grid.width - 1, 0] = 2 # upper right
tile_grid[0, tile_grid.height - 1] = 6 # lower left
tile_grid[tile_grid.width - 1, tile_grid.height - 1] = 8 # lower right
for x in range(target_size[0] - 2):
tile_grid[x + 1, 0] = 1
tile_grid[x + 1, tile_grid.height - 1] = 7
for y in range(target_size[1] - 2):
tile_grid[0, y + 1] = 3
tile_grid[tile_grid.width - 1, y + 1] = 5
for y in range(target_size[1] - 2):
for x in range(target_size[0] - 2):
tile_grid[x + 1, y + 1] = 4
return tile_grid
| 5,340,015
|
def mark_ballot_stuffing_delta(row, i, rows,
duplicate_threshold,
random_order_threshold):
"""
Modifies the list elements in place adding a smelly attribute to each row
dictionary that is equal to the searched row within a timedelta
Added random ordering detection within a different smaller time delta
"""
timestamp = arrow.get(row['Timestamp'], 'M/D/YYYY H:m:s')
row_data = [v.lower().strip() for k, v in row.iteritems() if k in DUPE_DICT_KEYS]
while i < (len(rows) - 1):
i += 1
next_row = rows[i]
next_row_data = [v.lower().strip() for k, v in next_row.iteritems() if k in DUPE_DICT_KEYS]
next_timestamp = arrow.get(next_row['Timestamp'], 'M/D/YYYY H:m:s')
timedelta = next_timestamp - timestamp
if timedelta.total_seconds() < duplicate_threshold:
if find_dupe(row, next_row):
next_row['smelly'] = True
if timedelta.total_seconds() < random_order_threshold:
if collections.Counter(row_data) == collections.Counter(next_row_data):
next_row['smelly'] = True
else:
break
| 5,340,016
|
def get_name_and_version(requirements_line: str) -> tuple[str, ...]:
"""Get the name a version of a package from a line in the requirement file."""
full_name, version = requirements_line.split(" ", 1)[0].split("==")
name_without_extras = full_name.split("[", 1)[0]
return name_without_extras, version
| 5,340,017
|
def test_required_parameters_provided_valid_inputs():
"""
Unit test to check the required_parameters_provided function with valid inputs
"""
parameters = {}
keys = []
required_parameters_provided(
parameters=parameters,
keys=keys
)
| 5,340,018
|
def _compute_eval_stats(params, batch,
model,
pad_id):
"""Computes pre-training task predictions and stats.
Args:
params: Model state (parameters).
batch: Current batch of examples.
model: The model itself. Flax separates model state and architecture.
pad_id: Token ID representing padding. A mask is used to distinguish padding
from actual inputs.
Returns:
Model predictions and metrics.
"""
inputs = {
"input_ids": batch["input_ids"],
"input_mask": (batch["input_ids"] != pad_id).astype(np.int32),
"type_ids": batch["type_ids"],
"masked_lm_positions": batch["masked_lm_positions"],
"masked_lm_labels": batch["masked_lm_ids"],
"masked_lm_weights": batch["masked_lm_weights"],
"next_sentence_labels": batch["next_sentence_labels"],
"deterministic": True
}
return model.apply({"params": params}, **inputs)
| 5,340,019
|
def tail_conversation(conversation):
"""
Yield lines from the backlog of the specified conversation. Yields an empty
string when EOF is encountered, but will keep trying. This is based on
this clever SO answer: http://stackoverflow.com/a/1703997
"""
with open(os.path.join(MESSAGE_DIR, conversation), "r") as f:
while True:
yield f.readline()
| 5,340,020
|
def validateTextFile(fileWithPath):
"""
Test if a file is a plain text file and can be read
:param fileWithPath(str): File Path
:return:
"""
try:
file = open(fileWithPath, "r", encoding=locale.getpreferredencoding(), errors="strict")
# Read only a couple of lines in the file
for line in itertools.islice(file, 10):
line = line
file.readlines()
# Close the file handle
file.close()
# Return the systems preferred encoding
return locale.getpreferredencoding()
except:
validencodings = ["utf-8", "ascii", "utf-16", "utf-32", "iso-8859-1", "latin-1"]
for currentEncoding in validencodings:
try:
file = open(fileWithPath, "r", encoding=currentEncoding, errors="strict")
# Read only a couple of lines in the file
for line in itertools.islice(file, 10):
line = line
# Close the file handle
file.close()
# Return the succeded encoding
return currentEncoding
except:
# Error occured while reading the file, skip to next iteration
continue
# Error, no encoding was correct
return None
| 5,340,021
|
def get_title(filename="test.html"):
"""Read the specified file and load it into BeautifulSoup. Return the title tag
"""
with open(filename, "r") as my_file:
file_string = my_file.read()
file_soup = BeautifulSoup(file_string, 'html.parser')
#find all of the a tags with href attribute
title = file_soup.select("title")
return title
| 5,340,022
|
def make_job_files_public(jobs):
"""Given a list of jobs from the db, make their result files
publicly readble.
"""
for job in jobs:
for mimetype, url in job['results'].iteritems():
make_job_file_public(url)
| 5,340,023
|
def is_valid_dump_key(dump_key):
"""
True if the `dump_key` is in the valid format of
"database_name/timestamp.dump"
"""
regexmatch = re.match(
r'^[\w-]+/\d{4}_\d{2}_\d{2}_\d{2}_\d{2}_\d{2}_\d+\.\w+\.dump$',
dump_key,
)
return regexmatch
| 5,340,024
|
def GetProfileAtAngle( imdata, xc,yc, angle, radius, width=1 ):
"""
Returns a 1D profile cut through an image at specified angle, extending to
specified radius.
Note: this is designed to imitate pvect, so angles are measured CCW from +x axis!
This function uses IRAF coordinates (1-based, x = column number)
Parameters
----------
imdata : 2D ndarray of float
image data array
xc : int or float
x-coordinate of center to extract profile from (IRAF ordering, 1-based)
yc : int or float
y-coordinate of center to extract profile from (IRAF ordering, 1-based)
angle : float
angle measured CCW from +x axis, in degrees
radius : int
length of profile, in pixels
width : int, optional
width of profile (perpendicular to profile) in pixels
Returns
-------
rr,ii : tuple of 1D ndarray of float
rr = array of radius values (= 0 at (xc,yc))
ii = data pixel values along profile [= Nan if all pixels for that bin
were masked]
"""
angle_rad = math.radians(angle)
x_end = xc + math.cos(angle_rad) * radius
y_end = yc + math.sin(angle_rad) * radius
x_start = xc - math.cos(angle_rad) * radius
y_start = yc - math.sin(angle_rad) * radius
rr,ii = ExtractProfile(imdata, x_start,y_start, x_end,y_end, width=width)
rr = rr - radius
return rr, ii
| 5,340,025
|
def gen_dict_extract(key: str,
var: Union[Dict[str, Any], List[Any]]) -> Generator[Any, None, None]:
"""A generator that extracts all values in a nested dict with the given key.
Args:
key: The key we are looking for.
var: The target dictionary (maybe a list during the recursion).
Returns:
The value of the given key that can be any type.
"""
if hasattr(var, 'iteritems'):
for k, v in var.iteritems(): # type: ignore
if k == key:
yield v
if isinstance(v, dict):
for result in gen_dict_extract(key, v):
yield result
elif isinstance(v, list):
for elt in v:
for result in gen_dict_extract(key, elt):
yield result
| 5,340,026
|
async def test_light(opp, rfxtrx_automatic):
"""Test light entities."""
rfxtrx = rfxtrx_automatic
entity_id = "binary_sensor.x10_security_motion_detector_a10900_32"
await rfxtrx.signal(EVENT_LIGHT_DETECTOR_LIGHT)
assert opp.states.get(entity_id).state == "on"
await rfxtrx.signal(EVENT_LIGHT_DETECTOR_DARK)
assert opp.states.get(entity_id).state == "off"
| 5,340,027
|
def create_out_dir_name(params):
"""
Create output directory name for the experiment based on the current date
and time.
Args:
params (dict): The parameters of the experiment.
Returns:
str: The path to the output directory.
"""
current_timestamp = timestamp()
out_dir = os.path.join('out', current_timestamp)
return out_dir
| 5,340,028
|
def extract_axon_and_myelin_masks_from_image_data(image_data):
"""
Returns the binary axon and myelin masks from the image data.
:param image_data: the image data that contains the 8-bit greyscale data, with over 200 (usually 255 if following
the ADS convention) being axons, 100 to 200 (usually 127 if following the ADS convention) being myelin
and 0 being background
:return axon_mask: the binairy axon mask
:return myelin_mask: the binary myelin mask
"""
image_data_array = np.array(image_data)
axon_mask = image_data_array > 200
myelin_mask = (image_data_array > 100) & (image_data_array < 200)
axon_mask = axon_mask.astype(np.uint8)
myelin_mask = myelin_mask.astype(np.uint8)
return axon_mask, myelin_mask
| 5,340,029
|
async def app(
jupyter: MockJupyter, cachemachine: MockCachemachine
) -> AsyncIterator[FastAPI]:
"""Return a configured test application.
Wraps the application in a lifespan manager so that startup and shutdown
events are sent during test execution.
Notes
-----
This must depend on the Jupyter mock since otherwise the JupyterClient
mocking is undone before the app is shut down, which causes it to try to
make real web socket calls.
A tests in business/jupyterloginloop_test.py depends on the exact shutdown
timeout.
"""
async with LifespanManager(main.app, shutdown_timeout=10):
yield main.app
| 5,340,030
|
def main():
"""
Main - program execute
"""
print (str(datetime.datetime.now()) + ' Starting ...')
datadir = 'C:/Dev/covid-19-vic-au/'
processXlsx(datadir)
print (str(datetime.datetime.now()) + ' Finished!')
exit()
| 5,340,031
|
def create_mne_array(recording, ch_names=None):
"""
Populate a full mne raw array object with information.
Parameters
----------
lfp_odict : bvmpc.lfp_odict.LfpODict
The lfp_odict object to convert to numpy data.
ch_names : List of str, Default None
Optional. What to name the mne eeg channels, default: region+chan_idx.
Returns
-------
mne.io.RawArray
"""
# TODO work with quantities here to avoid magic division to uV
raw_data = recording.get_np_signals() / 1000
if ch_names is None:
try:
ch_names = [
"{}-{}".format(x, y)
for x, y in zip(
recording.get_signals().get_property("region"),
recording.get_signal_channels(as_idx=True),
)
]
except BaseException:
ch_names = [str(i) for i in range(len(recording.get_signals()))]
# Convert LFP data into mne format
example_lfp = recording.get_signals()[0]
sfreq = example_lfp.get_sampling_rate()
ch_types = ["eeg"] * len(recording.get_signals())
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
raw = mne.io.RawArray(raw_data, info)
return raw
| 5,340,032
|
def expand_advanced(var, vars_, nounset, indirect, environ, var_symbol):
"""Expand substitution."""
if len(vars_) == 0:
raise MissingClosingBrace(var)
if vars_[0] == "-":
return expand_default(
var,
vars_[1:],
set_=False,
nounset=nounset,
indirect=indirect,
environ=environ,
var_symbol=var_symbol,
)
if vars_[0] == "=":
return expand_default(
var,
vars_[1:],
set_=True,
nounset=nounset,
indirect=indirect,
environ=environ,
var_symbol=var_symbol,
)
if vars_[0] == "+":
return expand_substitute(
var, vars_[1:], nounset=nounset, environ=environ, var_symbol=var_symbol
)
if vars_[0] == "?":
return expand_strict(
var, vars_[1:], nounset=nounset, environ=environ, var_symbol=var_symbol
)
return expand_offset(
var, vars_, nounset=nounset, environ=environ, var_symbol=var_symbol
)
| 5,340,033
|
def overlap(n2, lamda_g, gama):
""" Calculates the 1/Aeff (M) from the gamma given.
The gamma is supposed to be measured at lamda_g
(in many cases we assume that is the same as where
the dispersion is measured at).
"""
M = gama / (n2*(2*pi/lamda_g))
return M
| 5,340,034
|
def fill_none(pre_made_replays_list):
"""Fill none and reformat some fields in a pre-made replays list.
:param pre_made_replays_list: pre-made replays list from ballchasing.com.
:return: formatted list.
"""
for replay in pre_made_replays_list:
if replay["region"] is None:
replay["region"] = "North America"
replay["phase"] = "Qualifier"
replay["stage"] = "Tiebreaker"
replay["round"] = "Finals"
if replay['region'] == 'Main Event':
replay['region'] = 'World'
elif replay['region'] == 'Europe' and replay['phase'] == 'Tiebreaker':
replay["phase"] = "Qualifier"
replay["stage"] = "Tiebreaker"
if replay["match"] == "EG vs 00":
replay["round"] = "Lower Finals"
else:
replay["round"] = "Upper Finals"
return pre_made_replays_list
| 5,340,035
|
def main(args):
"""
Main function of script
:param :args: args from command line
:return: None
"""
username = args.username if args.username != '' else input(
'Username [> ')
file_name = username.lower() + '.api'
if os.path.exists(file_name):
data = ih.load_data(file_name)
else:
password = args.password if args.password != '' else input(
'Password [> ')
data = ih.open_api(username, password)
should_work = True
while should_work or args.work_on_loop:
now = datetime.datetime.now()
is_work_time = (now.time().hour == args.start_hour and
now.time().minute == args.start_min or
not args.work_on_loop)
if should_work and is_work_time:
if args.unfollow:
not_follows = ih.unfollow_not_followers(data['api'])
ih.results_log(str(not_follows), args.work_on_loop, args.log_file)
if args.estimate:
estimation = ih.user_estimate(data['api'], data['api'].username_id)
ih.results_log('Estimation: {1}/{2} = {0}'.format(estimation[0],
estimation[1],
estimation[2]),
args.work_on_loop, args.log_file)
if not args.no_like_follow:
followings = data['api'].getTotalSelfFollowings()
user = random.choice(followings)
ih.results_log(user['username'], args.work_on_loop, args.log_file)
res = ih.user_followers_like_follow_helper(data, user['pk'],
args.user_count,
args.likes_count,
(args.min_estimate,
args.max_estimate))
ih.results_log(res, args.work_on_loop, args.log_file)
should_work = args.work_on_loop
time.sleep(50)
| 5,340,036
|
def parse_args_from_str(arg_str, arg_defs): # , context=None):
"""
Args:
args_str (str): argument string, optionally comma-separated
arg_defs (tuple): list of argument definitions
context (dict, optional):
When passed, the arguments are parsed for ``$(var_name)`` macros,
to lookup values from that dict.
Returns:
(dict) keyword args
Raises:
TypeError: if `argument` is of an unexpected type
ValueError: if `argument` does not fulfill the optional condition
AssertionError:
if `parse_args_from_str` was called with a wrong syntax, i.e.
`arg_defs` is not well-formed.
Examples::
arg_defs = (
("name", str),
("amount", float),
("options", dict, {}),
)
def order(raw_arg_string):
kwargs = parse_args_from_str(arg_defs)
assert isisnstance(kwargs["name"], str)
assert type(kwargs["amount"]) is float
assert isisnstance(kwargs["options"], dict)
"""
check_arg(arg_str, str)
check_arg(arg_defs, (list, tuple))
res = {}
# Special case: '$name()' should not be interpreted as having one "" arg
# if arg_defs defines a default for the first arg
if arg_str.strip() == "" and len(arg_defs[0]) == 3:
arg_str = str(arg_defs[0][2])
arg_list = [a.strip() for a in arg_str.split(",")]
optional_mode = False
for arg_def in arg_defs:
check_arg(arg_def, (list, tuple))
if len(arg_def) == 2:
arg_name, arg_type = arg_def
arg_default = NO_DEFAULT
if optional_mode:
raise AssertionError(
"Mandatory arg definition must not follow optional args: `{}`".format(
arg_def
)
)
elif len(arg_def) == 3:
arg_name, arg_type, arg_default = arg_def
optional_mode = True
else:
raise AssertionError("Expected 2- or 3-tuple: {}".format(arg_def))
if arg_type not in (float, int, str):
raise AssertionError(
"Unsupported argument definition type: {}".format(arg_def)
)
try:
# Get next arg
arg_val = arg_list.pop(0)
# Allow quotes
is_quoted = (arg_val.startswith('"') and arg_val.endswith('"')) or (
arg_val.startswith("'") and arg_val.endswith("'")
)
if is_quoted:
# Strip quotes and return as string (don't cast to other types)
arg_val = arg_val[1:-1]
elif "$(" in arg_val:
# The arg seems to be a macro: don't try to cast.
pass
else:
# Raises ValueError:
arg_val = arg_type(arg_val)
except IndexError:
if arg_default is NO_DEFAULT:
raise ValueError(
"Missing mandatory arg `{}` in '{}'.".format(arg_name, arg_str)
)
arg_val = arg_default
res[arg_name] = arg_val
if arg_list:
raise ValueError("Extra args `{}`.".format(", ".join(arg_list)))
return res
| 5,340,037
|
def test_nb_regression_ini_setting_init(testdir):
"""Test the nb_regression fixture is initialised with the config file settings."""
testdir.makeini(
r"""
[pytest]
nb_exec_cwd = {path}
nb_exec_allow_errors = True
nb_exec_timeout = 100
nb_diff_use_color = True
nb_diff_color_words = True
nb_diff_ignore =
/metadata/language_info/version
/cells/*/execution_count
/cells/*/outputs/*/traceback
/cells/*/outputs/*/execution_count
/cells/12/outputs/0/data/text/latex
/cells/9/outputs/0/metadata/application/json
nb_post_processors =
nb_diff_replace =
/cells/*/outputs/*/traceback \<ipython\-input\-[\-0-9a-zA-Z]*\> "< >"
""".format(
path=os.path.join(PATH, "raw_files")
)
)
testdir.makepyfile(
"""
import attr
def test_nb(nb_regression):
assert attr.asdict(nb_regression) == {config}
""".format(
config=attr.asdict(
NBRegressionFixture(
**{
"exec_cwd": os.path.join(PATH, "raw_files"),
"exec_allow_errors": True,
"exec_timeout": 100,
"post_processors": (),
"diff_ignore": (
"/metadata/language_info/version",
"/cells/*/execution_count",
"/cells/*/outputs/*/traceback",
"/cells/*/outputs/*/execution_count",
"/cells/12/outputs/0/data/text/latex",
"/cells/9/outputs/0/metadata/application/json",
),
"diff_replace": (
(
"/cells/*/outputs/*/traceback",
"\\<ipython\\-input\\-[\\-0-9a-zA-Z]*\\>",
"< >",
),
),
"diff_use_color": True,
"diff_color_words": True,
# the following are the defaults for pytest-cov
"cov_source": (),
"cov_config": ".coveragerc",
}
)
)
)
)
result = testdir.runpytest("-vv")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_nb PASSED*"])
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
| 5,340,038
|
def model_evaluation(
data_loader,
ml_model_name,
ml_model,
smiles_dictionary,
max_length_smiles,
device_to_use,
):
"""
Evaluation per batch of a pytorch machine learning model.
Parameters
----------
data_loader : torch.utils.data
The training data as seen by Pytorch for mini-batches.
ml_model_name : str
Name of the machine learning model. It can be either "CONV1D", "CONV2D", or "RNN".
ml_model : nn.Module
Instance of the pytorch machine learning model.
smiles_dictionary : dict
The dictionary of SMILES characters.
max_length_smiles : int
The length of the longest SMILES.
device_to_use : torch.device
The device to use for model instance, "cpu" or "cuda".
Returns
-------
tuple of dict:
Dictionary of the predicted, true output values, respectively, in the data loader, with SMILES as keys.
"""
ml_model.eval()
with torch.no_grad():
all_output_pred = {}
all_output_true = {}
for _, data in enumerate(data_loader):
# SMILES and target
smiles, target = data
input_true, output_true = data_to_pytorch_format(
smiles,
target,
smiles_dictionary,
max_length_smiles,
ml_model_name,
device_to_use,
)
# Prediction
output_pred = ml_model(input_true)
# Convert to numpy arrays
output_pred = output_pred.cpu().detach().numpy()
output_true = output_true.cpu().detach().numpy()
for smile in smiles:
all_output_pred[smile] = output_pred
all_output_true[smile] = output_true
return (all_output_pred, all_output_true)
| 5,340,039
|
def compSeq(s1, s2, lineL=50):
"""Print two sequences showing mismatches.
Parameters
----------
s1, s2 : str
Strings representing aligned AA or NT sequences
lineL : int
Wrap line at lineL"""
lineN = int(np.ceil(min(len(s1), len(s2))/lineL))
count = 0
samecount = 0
outStr = ''
for linei in range(lineN):
if (linei+1) * lineL < min(len(s1), len(s2)):
end = (linei+1) * lineL
else:
end = min(len(s1), len(s2))
outStr += 'Pos %d - %d\n' % (linei*lineL+1, end-1+1)
for sitei in range(linei*lineL, end):
outStr += s1[sitei]
outStr += '\n'
for sitei in range(linei*lineL, end):
out = ' ' if s1[sitei] == s2[sitei] else '|'
outStr += out
count += 1
samecount += 1 if s1[sitei]==s2[sitei] else 0
outStr += '\n'
for sitei in range(linei*lineL, end):
out = '.' if s1[sitei] == s2[sitei] else s2[sitei]
outStr += s2[sitei]
outStr += '\n\n'
outStr += 'Seq1 (%d) and Seq2 (%d) are %1.1f%% similar\n\n' % (len(s1), len(s2), 1e2*samecount/count)
print(outStr)
| 5,340,040
|
def reverse_dict2(d):
"""Reverses direction of dependence dict
>>> d = {'a': (1, 2), 'b': (2, 3), 'c':()}
>>> reverse_dict(d) # doctest: +SKIP
{1: ('a',), 2: ('a', 'b'), 3: ('b',)}
:note: dict order are not deterministic. As we iterate on the
input dict, it make the output of this function depend on the
dict order. So this function output order should be considered
as undeterministic.
"""
result = {}
for key in d:
for val in d[key]:
result[val] = result.get(val, tuple()) + (key, )
return result
| 5,340,041
|
def setup(argv):
"""
Setup the fido2 database
:return:
"""
db_path, encryption_key = handle_args(argv, "pe", ["db-path=",
"encryption-key="])
connection = get_db_connection(db_path, encryption_key)
cursor = connection.cursor()
sql_command = """
CREATE TABLE IF NOT EXISTS users(
user_id BINARY(64) NOT NULL PRIMARY KEY,
user_name VARCHAR(50),
display_name VARCHAR(50)
);
"""
cursor.execute(sql_command)
sql_command = """
CREATE TABLE IF NOT EXISTS certificates(
user_id BINARY(64) NOT NULL,
mode INTEGER NOT NULL,
certificate VARCHAR(65536) NOT NULL,
PRIMARY KEY (user_id, mode),
FOREIGN KEY (user_id) REFERENCES users(user_id)
);"""
cursor.execute(sql_command)
sql_command = """
CREATE TABLE IF NOT EXISTS credentials(
credential_id VARBINARY(65536) NOT NULL PRIMARY KEY,
aaguid BINARY(16) NOT NULL,
public_key VARBINARY(131072) NOT NULL,
signature_counter INTEGER NOT NULL DEFAULT 0,
user_id BINARY(64) NULL NULL,
mode INTEGER NOT NULL,
FOREIGN KEY (user_id) REFERENCES users(user_id)
);
"""
cursor.execute(sql_command)
sql_command = """
CREATE TABLE IF NOT EXISTS eph_user_names(
eph_user_name BINARY(32) NOT NULL PRIMARY KEY,
user_id BINARY(64),
valid_through DATETIME NOT NULL,
FOREIGN KEY (user_id) REFERENCES users(user_id)
);
"""
cursor.execute(sql_command)
sql_command = """
CREATE TRIGGER IF NOT EXISTS table_limit
AFTER INSERT ON eph_user_names
WHEN (SELECT COUNT(*) FROM eph_user_names) > 1000
BEGIN
DELETE FROM eph_user_names;
END;
"""
cursor.execute(sql_command)
connection.commit()
connection.close()
| 5,340,042
|
def Flip(p, y='Y', n='N'):
"""Returns y with probability p; otherwise n."""
return y if random.random() <= p else n
| 5,340,043
|
def plot_soma(ax, soma, plane='xy',
soma_outline=True,
linewidth=_LINEWIDTH,
color=None, alpha=_ALPHA):
"""Generates a 2d figure of the soma.
Args:
ax(matplotlib axes): on what to plot
soma(neurom.core.Soma): plotted soma
plane(str): Any pair of 'xyz'
soma_outline(bool): should the soma be drawn as an outline
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
"""
plane0, plane1 = _plane2col(plane)
color = _get_color(color, tree_type=NeuriteType.soma)
if isinstance(soma, SomaCylinders):
for start, end in zip(soma.points, soma.points[1:]):
common.project_cylinder_onto_2d(ax, (plane0, plane1),
start=start[COLS.XYZ], end=end[COLS.XYZ],
start_radius=start[COLS.R], end_radius=end[COLS.R],
color=color, alpha=alpha)
else:
if soma_outline:
ax.add_artist(Circle(soma.center[[plane0, plane1]], soma.radius,
color=color, alpha=alpha))
else:
points = [[p[plane0], p[plane1]] for p in soma.iter()]
if points:
points.append(points[0]) # close the loop
x, y = tuple(np.array(points).T)
ax.plot(x, y, color=color, alpha=alpha, linewidth=linewidth)
ax.set_xlabel(plane[0])
ax.set_ylabel(plane[1])
bounding_box = geom.bounding_box(soma)
ax.dataLim.update_from_data_xy(np.vstack(([bounding_box[0][plane0], bounding_box[0][plane1]],
[bounding_box[1][plane0], bounding_box[1][plane1]])),
ignore=False)
| 5,340,044
|
def prepare():
"""Prepare the database (create needed tables, indices, etc.)."""
con = connect()
with con:
con.execute(
"""
CREATE TABLE IF NOT EXISTS characters (
name string primary key,
attack int,
defense int
)
"""
)
| 5,340,045
|
def test_projected_cov_calc(lorenz_dataset):
"""Test the project_cross_cov_mats function by also directly projecting
the data."""
rng = np.random.RandomState(20200226)
T, d, X, _, _ = lorenz_dataset
X = X[:, :3]
N = 3
d = 2
T = 6
V = init_coef(N, d, rng, 'random_ortho')
tV = torch.tensor(V)
ccms = calc_cross_cov_mats_from_data(X, T)
tccms = torch.tensor(ccms)
pccms = project_cross_cov_mats(ccms, V)
cov = calc_cov_from_cross_cov_mats(pccms)
XL = form_lag_matrix(X, T)
big_V = np.zeros((T * N, T * d))
for ii in range(T):
big_V[ii * N:(ii + 1) * N, ii * d:(ii + 1) * d] = V
Xp = XL.dot(big_V)
cov2 = np.cov(Xp, rowvar=False)
cov2 = toeplitzify(cov2, T, d)
assert_allclose(cov, cov2)
tpccms = project_cross_cov_mats(tccms, tV)
tcov = calc_cov_from_cross_cov_mats(tpccms)
assert torch.allclose(tcov, torch.tensor(cov2))
assert_allclose(tcov.numpy(), cov2)
| 5,340,046
|
def show_status():
"""prints out "_ " or the letter itself from the current_capital_upper if
it is in the used_letters list"""
global current_capital_upper
global current_country
global used_letters
global life
current_life()
if life == 1:
print("\nOh! It's the capital of %s\n!" % current_country)
print("? ", end="")
for letter in current_capital_upper:
if letter == " ":
print(" ", end="")
elif letter in used_letters:
print(letter, end="")
else:
print(" _", end="")
print(" ?\n")
if used_letters:
print(" You already tried: ", used_letters)
| 5,340,047
|
def resolve_avicenna(d):
"""
.. todo::
WRITEME
"""
import pylearn2.datasets.avicenna
return pylearn2.config.checked_call(pylearn2.datasets.avicenna.Avicenna,d)
| 5,340,048
|
def sub_to_db(sub,
add_area=True,
area_srid=3005,
wkt=True,
wkb=False,
as_multi=True,
to_disk=False,
procs=1,
engine=None):
"""
Convert the object to a SQLite database. Returns the |db| module exposing
the database ORM and additional SQLAlchemy objects. Note that |procs|
greater than one results in the database being written to disk (if the
desired database is SQLite).
sub (SubOcgDataset) -- The object to convert to the database.
add_area=True -- Insert the geometric area.
area_srid=3005 -- SRID to use for geometric transformation.
wkt=True -- Insert the geomtry's WKT representation.
wkb=False -- Insert the geometry's WKB representation.
as_multi=True -- Convert geometries to shapely.MultiPolygon.
to_disk=False -- Write the database to disk (applicable for SQLite).
procs=1 -- Number of processes to use when loading data.
engine=None (sqlalchemy.Engine) -- An optional engine to pass overloading
the creation of other backends. Useful to use PostGRES instead of
SQLite for example.
"""
if engine is None:
use_lock = True
else:
use_lock = False
## initialize the db
db = init_db(engine=engine,to_disk=to_disk,procs=procs)
print(' loading geometry...')
## spatial reference for area calculation
sr = get_sr(4326)
sr2 = get_sr(area_srid)
# data = dict([[key,list()] for key in ['gid','wkt','wkb','area_m2']])
# for dd in self.dim_data:
# data['gid'].append(int(self.gid[dd]))
# geom = self.geometry[dd]
# if isinstance(geom,Polygon):
# geom = MultiPolygon([geom])
# if wkt:
# wkt = str(geom.wkt)
# else:
# wkt = None
# data['wkt'].append(wkt)
# if wkb:
# wkb = str(geom.wkb)
# else:
# wkb = None
# data['wkb'].append(wkb)
# data['area_m2'].append(get_area(geom,sr,sr2))
# self.load_parallel(db.Geometry,data,procs)
def f(idx,geometry=sub.geometry,gid=sub.gid,wkt=wkt,wkb=wkb,sr=sr,sr2=sr2,get_area=get_area):
geom = geometry[idx]
if isinstance(geom,Polygon):
geom = MultiPolygon([geom])
if wkt:
wkt = str(geom.wkt)
else:
wkt = None
if wkb:
wkb = str(geom.wkb)
else:
wkb = None
return(dict(gid=int(gid[idx]),
wkt=wkt,
wkb=wkb,
area_m2=get_area(geom,sr,sr2)))
fkwds = dict(geometry=sub.geometry,gid=sub.gid,wkt=wkt,wkb=wkb,sr=sr,sr2=sr2,get_area=get_area)
gen = pl.ParallelGenerator(db.Geometry,
sub.dim_data,
f,
fkwds=fkwds,
procs=procs,
use_lock=use_lock)
gen.load()
print(' loading time...')
## load the time data
data = dict([[key,list()] for key in ['tid','time','day','month','year']])
for dt in sub.dim_time:
data['tid'].append(int(sub.tid[dt]))
data['time'].append(sub.timevec[dt])
data['day'].append(sub.timevec[dt].day)
data['month'].append(sub.timevec[dt].month)
data['year'].append(sub.timevec[dt].year)
load_parallel(db.Time,data,procs,use_lock=use_lock)
print(' loading value...')
## set up parallel loading data
data = dict([key,list()] for key in ['gid','level','tid','value'])
for dt in sub.dim_time:
for dl in sub.dim_level:
for dd in sub.dim_data:
data['gid'].append(int(sub.gid[dd]))
data['level'].append(int(sub.levelvec[dl]))
data['tid'].append(int(sub.tid[dt]))
data['value'].append(float(sub.value[dt,dl,dd]))
load_parallel(db.Value,data,procs,use_lock=use_lock)
return(db)
| 5,340,049
|
def delete_keys(session, keys):
"""Removes list of key classes from data store
args:
session: Active database session
keys: list of key classes
returns:
nothing
"""
_delete(session, keys)
session.commit()
| 5,340,050
|
def T_autoignition_methods(CASRN):
"""Return all methods available to obtain T_autoignition for the desired
chemical.
Parameters
----------
CASRN : str
CASRN, [-]
Returns
-------
methods : list[str]
Methods which can be used to obtain T_autoignition with the given inputs.
See Also
--------
T_autoignition
"""
if not _safety_data_loaded: _load_safety_data()
return list_available_methods_from_df_dict(Tautoignition_sources, CASRN, 'T_autoignition')
| 5,340,051
|
def parse_config2(filename=None):
"""
https://docs.python.org/3.5/library/configparser.html
:param filename: filename to parse config
:return: config_parse result
"""
_config = configparser.ConfigParser(allow_no_value=True)
if filename:
# ConfigParser does not create a file if it doesn't exist, so I will create an empty one.
if not os.path.isfile(filename):
with open(filename, 'w', encoding='utf-8') as f:
print('', file=f)
_config.read_file(open(filename, encoding='utf-8'))
return _config
| 5,340,052
|
def test_lsp_type_serialization() -> None:
"""
LSP spec names are camel case while Python conventions are to use snake case.
"""
class MyLspType(lsp_types.LspModel):
snake_case_name: int
optional: Optional[int]
spec_compatible: Dict = {"snakeCaseName": 0}
v1 = MyLspType(snake_case_name=0)
v2 = MyLspType(snakeCaseName=0)
v3 = MyLspType.parse_obj(spec_compatible)
for v in [v1, v2, v3]:
assert v.dict() == spec_compatible
| 5,340,053
|
def masa(jd, place):
"""Returns lunar month and if it is adhika or not.
1 = Chaitra, 2 = Vaisakha, ..., 12 = Phalguna"""
ti = tithi(jd, place)[0]
critical = sunrise(jd, place)[0] # - tz/24 ?
last_new_moon = new_moon(critical, ti, -1)
next_new_moon = new_moon(critical, ti, +1)
this_solar_month = raasi(last_new_moon)
next_solar_month = raasi(next_new_moon)
is_leap_month = (this_solar_month == next_solar_month)
maasa = this_solar_month + 1
if maasa > 12: maasa = (maasa % 12)
return [int(maasa), is_leap_month]
| 5,340,054
|
def sigma(s):
"""The probablity a normal variate will be `<s` sigma from the mean.
Parameters
----------
s : float
The number of sigma from the mean.
Returns
-------
p : float
The probability that a value within +/-s would occur.
"""
from scipy.special import erf
return 0.5 * (erf(s / np.sqrt(2.0)) - erf(-s / np.sqrt(2.0)))
| 5,340,055
|
def addneq_parse_residualline(line: str) -> dict:
"""
Parse en linje med dagsløsningsresidualer fra en ADDNEQ-fil.
Udtræk stationsnavn, samt retning (N/E/U), spredning og derefter et vilkårligt
antal døgnresidualer.
En serie linjer kan se således ud:
GESR N 0.07 0.02 -0.06
GESR E 0.10 -0.00 -0.10
GESR U 0.23 -0.10 0.20
"""
params = line.split()
return {
"STATION NAME": params[0],
"DIRECTION": params[1],
"STDDEV": float(params[2]),
"RES": [float(x) for x in params[3:]],
}
| 5,340,056
|
async def threadsafe_async_pipe(queue: Queue, async_queue: asyncio.Queue, event: Event):
"""Checks the formal threadsafe queue for a message
then places it on the async_queue
Args:
queue:
async_queue:
event: The kill event
"""
while True and not event.is_set():
try:
item = queue.get(block=False)
await async_queue.put(item)
logging.debug(f"threadsafe_async_pipe put: {item}")
except Empty:
pass
finally:
await asyncio.sleep(0)
| 5,340,057
|
def extract_header(file_path):
"""
Loads the header from a PSG-type file at path 'file_path'.
Returns:
dictionary of header information
"""
fname = os.path.split(os.path.abspath(file_path))[-1]
_, ext = os.path.splitext(fname)
load_func = _EXT_TO_LOADER[ext[1:]]
header = load_func(file_path)
# Add file location data
file_path, file_name = os.path.split(file_path)
header['data_dir'] = file_path
header["file_name"] = file_name
return header
| 5,340,058
|
def get_xsd_schema(url):
"""Request the XSD schema from DOV webservices and return it.
Parameters
----------
url : str
URL of the XSD schema to download.
Returns
-------
xml : bytes
The raw XML data of this XSD schema as bytes.
"""
response = HookRunner.execute_inject_meta_response(url)
if response is None:
response = get_remote_url(url)
HookRunner.execute_meta_received(url, response)
return response
| 5,340,059
|
def get_char_pmi(data):
"""
获取 pmi
:param data:
:return:
"""
print('get_char_pmi')
model = kenlm.LanguageModel('../software/kenlm/test.bin')
res = []
for line in data:
words = line.strip().split()
length = len(words)
words.append('\n')
i = 0
pmi_out = ""
while i < length:
p_union = get_proba(model, words[i] + " " + words[i + 1])
p1 = get_proba(model, words[i])
p2 = get_proba(model, words[i + 1])
p = pmi(p_union, p1, p2)
# 拆到 char level
word = words[i]
if len(word) > 0:
# 拆到 char level
j = 0
char = word[j]
pmi_out += char + "@@"
pmi_out += "B#" + str(p) + " "
j += 1
while j < len(word):
pmi_out += word[j] + '@@'
pmi_out += 'I#' + str(p) + " "
j += 1
i += 1
# last_char = words[i]
# p_union = get_proba(model, last_char + " \n")
# p1 = get_proba(model, last_char)
# p2 = get_proba(model, '\n')
# p = pmi(p_union, p1, p2)
# pmi_out += last_char + "@@" + 'B#' + str(p)
res.append(pmi_out.strip())
return res
| 5,340,060
|
def fahrenheit_to_celsius(fahrenheit):
"""Convert a Fahrenheit temperature to Celsius."""
return (fahrenheit - 32.0) / 1.8
| 5,340,061
|
def test_insert_sort_on_one_item_list():
"""Test insert sort with single item list."""
from insert import insert_sort
assert insert_sort([5]) == [5]
| 5,340,062
|
def sample_blocks(num_layers, num_approx):
"""Generate approx block permutations by sampling w/o replacement. Leave the
first and last blocks as ReLU"""
perms = []
for _ in range(1000):
perms.append(sorted(random.sample(list(range(0,num_layers)), num_approx)))
# Remove duplicates
perms.sort()
return [p for p,_ in itertools.groupby(perms) if len(p) == num_approx]
| 5,340,063
|
def test_installed_no_source():
"""
test wusa.installed without passing source
"""
with pytest.raises(SaltInvocationError) as excinfo:
wusa.installed(name="KB123456", source=None)
assert excinfo.exception.strerror == 'Must specify a "source" file to install'
| 5,340,064
|
def reviewer_actions(org, repo, output_file_prefix, pr_count):
""" Generate metrics for tier reviewer groups, and general contributors
Will collect tier reviewer teams from the github org
Tier reviewer teams will read from settings file, and default to what SatelliteQE uses
"""
for repo_name in repo:
click.echo(f"Collecting metrics for {org}/{repo_name} ...")
t1_metrics, t2_metrics = metrics_calculators.reviewer_actions(
organization=org, repository=repo_name, pr_count=pr_count
)
header = f"Tier1 Reviewer actions by week for [{repo_name}]"
click.echo(f"\n{'-' * len(header)}")
click.echo(header)
click.echo("-" * len(header))
click.echo(tabulate(t1_metrics, headers="keys", tablefmt="github"))
header = f"Tier2 Reviewer actions by week for [{repo_name}]"
click.echo(f"\n{'-' * len(header)}")
click.echo(header)
click.echo("-" * len(header))
click.echo(tabulate(t2_metrics, headers="keys", tablefmt="github"))
tier1_metrics_filename = METRICS_OUTPUT.joinpath(
f"{Path(output_file_prefix).stem}-"
f"{org}-"
f"{repo_name}-"
"tier1_reviewers-"
f"{datetime.now().isoformat(timespec='minutes')}.html"
)
click.echo(f"\nWriting PR metrics as HTML to {tier1_metrics_filename}")
file_io.write_to_output(
tier1_metrics_filename,
tabulate(t1_metrics, headers="keys", tablefmt="html"),
)
tier2_metrics_filename = METRICS_OUTPUT.joinpath(
f"{Path(output_file_prefix).stem}-"
f"{org}-"
f"{repo_name}-"
"tier2_reviewers-"
f"{datetime.now().isoformat(timespec='minutes')}.html"
)
click.echo(f"\nWriting PR metrics as HTML to {tier2_metrics_filename}")
file_io.write_to_output(
tier2_metrics_filename,
tabulate(t2_metrics, headers="keys", tablefmt="html"),
)
| 5,340,065
|
def save_visualization(segmentation, original_image, path_to_output, alpha=0.5):
"""
Saves the visualization as png in the specified fied path.
"""
f = plt.figure()
a = f.add_subplot(131)
a.imshow(original_image, cmap='gray')
a.set_title('image')
a = f.add_subplot(132)
a.imshow(segmentation, cmap='gray')
a.set_title('model segmentation')
a = f.add_subplot(133)
a.imshow(original_image, cmap='gray')
a.imshow(segmentation, alpha=alpha)
a.set_title('visualization')
plt.savefig(path_to_output)
| 5,340,066
|
def adjacent_values(vals, q1, q3):
"""Helper function for violinplot visualisation (courtesy of
https://matplotlib.org/gallery/statistics/customized_violin.html#sphx-glr-gallery-statistics-customized-violin-py)
"""
upper_adjacent_value = q3 + (q3 - q1) * 1.5
upper_adjacent_value = np.clip(upper_adjacent_value, q3, vals[-1])
lower_adjacent_value = q1 - (q3 - q1) * 1.5
lower_adjacent_value = np.clip(lower_adjacent_value, vals[0], q1)
return lower_adjacent_value, upper_adjacent_value
| 5,340,067
|
async def test_get_weather_no_forecast_data(hass, coordinator_config):
"""Test missing forecast data."""
session = Mock()
response = Mock()
response.status = HTTPStatus.OK
mock_response_json = {}
response.json = AsyncMock(return_value=mock_response_json)
session.get = AsyncMock(return_value=response)
with patch.object(
coordinator,
"async_get_clientsession",
return_value=session,
), patch.object(coordinator, "_LOGGER"):
coord = coordinator.WeatherAPIUpdateCoordinator(hass, coordinator_config)
result = await coord.get_weather()
assert result
assert not result[DATA_FORECAST] # No data found
assert len(coordinator._LOGGER.warning.mock_calls) == 2
assert coordinator._LOGGER.warning.mock_calls[0] == call(
"No current data received."
)
assert coordinator._LOGGER.warning.mock_calls[1] == call(
"No forecast data received."
)
| 5,340,068
|
def l2_first_moment(freq, n_trials, weights):
"""Return the first raw moment of the squared l2-norm of a vector (f-p), where `f` is an MLE
estimate
of the `p` parameter of the multinomial distribution with `n_trials`."""
return (np.einsum("aiai,ai->", weights, freq) - np.einsum("aiaj,ai,aj->", weights, freq, freq)) / n_trials
| 5,340,069
|
def sigmoid(x: float, a: float = 1, b: float = 1, shift: float = 0) -> float:
"""
Sigmoid function represented by b * \frac{1}{1 + e^{-a * (x - shift)}}}
Args:
x (float): Input x
a (float, optional): Rate of inflection. Defaults to 1.
b (float, optional): Difference of lowest to highest value. Defaults to 1.
shift (float, optional): Horizontal shift. Defaults to 0.
Returns:
float: sigmoid function at x
"""
result = b * (1 / (1 + np.exp(-a * (x - shift))))
return result
| 5,340,070
|
def db_credentials():
"""Load creds and returns dict of postgres keyword arguments."""
creds = load_json('creds.json')
return {
'host': creds['db_host'],
'user': creds['db_username'],
'password': creds['db_password'],
'database': creds['db_database']
}
| 5,340,071
|
def generate_corpus_output( cfg, docL, tfidfL ):
""" Generate a list of OutputRecords where the number of key words
is limited to the cfg.corpusKeywordCount highest scoring terms.
(i.e. cfg.usePerDocWordCount == False)
"""
outL = []
# for the cfg.corpusKeyWordCount highest scoring keywords
for i,(wordL,tfidf,termNL) in enumerate(tfidfL[0:min(cfg.corpusKeyWordCount,len(tfidfL))]):
out_recd = OutputRecord(wordL,tfidf,termNL)
logging.info("%i %f %s",i,tfidf,wordL)
# for each document
for doc in docL:
doc.find_sentences_in_doc(out_recd)
outL.append(out_recd)
return outL
| 5,340,072
|
def test_top_level_overrides_environment_markers(PipenvInstance):
"""Top-level environment markers should take precedence.
"""
with PipenvInstance() as p:
with open(p.pipfile_path, 'w') as f:
contents = """
[packages]
apscheduler = "*"
funcsigs = {version = "*", os_name = "== 'splashwear'"}
""".strip()
f.write(contents)
c = p.pipenv('install')
assert c.returncode == 0
assert "markers" in p.lockfile['default']['funcsigs'], p.lockfile['default']['funcsigs']
assert p.lockfile['default']['funcsigs']['markers'] == "os_name == 'splashwear'", p.lockfile['default']['funcsigs']
| 5,340,073
|
def expand(directory: str) -> str:
"""Apply expanduser and expandvars to directory to expand '~' and env vars."""
temp1 = os.path.expanduser(directory)
return os.path.expandvars(temp1)
| 5,340,074
|
def test_apply_colormap():
"""Basic test of silx.math.colormap.apply_colormap"""
data = numpy.arange(256)
expected_colors = numpy.empty((256, 4), dtype=numpy.uint8)
expected_colors[:, :3] = numpy.arange(256, dtype=numpy.uint8).reshape(256, 1)
expected_colors[:, 3] = 255
colors = colormap.apply_colormap(
data,
colormap="gray",
norm="linear",
autoscale="minmax",
vmin=None,
vmax=None,
gamma=1.0)
assert numpy.array_equal(colors, expected_colors)
| 5,340,075
|
def gather_ensemble_info(nmme_model):
"""Gathers ensemble information based on NMME model."""
# Number of ensembles in the forecast (ens_num)
# Ensemble start index (ens_start)
# Ensemble end index (ens_end)
if nmme_model == "CFSv2":
ens_num=24
ens_start=1
ens_end=24
elif nmme_model == "GEOSv2":
ens_num=10
ens_start=25
ens_end=34
elif nmme_model == "CCM4":
ens_num=10
ens_start=35
ens_end=44
elif nmme_model == "GNEMO":
ens_num=10
ens_start=45
ens_end=54
elif nmme_model == "CCSM4":
ens_num=10
ens_start=55
ens_end=64
elif nmme_model == "GFDL":
ens_num=30
ens_start=65
ens_end=94
else:
print(f"[ERR] Invalid argument for nmme_model! Received {nmme_model}")
sys.exit(1)
return ens_num, ens_start, ens_end
| 5,340,076
|
def makedirs(pathname):
"""Create a directory, or make sure that the directory is world-writable"""
# This is a workaround for promblem caused by the Rayonix software running
# under a different user id on the Rayonix control computer, compared
# to the beamline control computer, so directories created via NFS on the
# control machine might not be writable on the Rayonix computer.
# E.g. user id 10660(xppopr) on "xpp-daq", versus user id 500(hsuser)
# on "con-ics-xpp-rayonix"
from os import makedirs,umask,chmod
from os.path import exists
from sys import stderr
if exists(pathname) and not iswritable(pathname):
try: chmod(pathname,0777)
except Exception,details: stderr.write("chmod: %r: %r" % (pathname,details))
if not exists(pathname):
umask(0000)
try: makedirs(pathname)
except Exception,details: stderr.write("makedirs: %r: %r" % (pathname,details))
| 5,340,077
|
def harmonic_fitter(progressions, J_thres=0.01):
"""
Function that will sequentially fit every progression
with a simple harmonic model defined by B and D. The
"B" value here actually corresponds to B+C for a near-prolate,
or 2B for a prolate top.
There are a number of filters applied in order to minimize
calculations that won't be meaningful - these parameters
may have to be tuned for different test cases.
Because the model is not actually quantized, J is
represented as a float. To our advantage, this will
actually separate real (molecular) progressions from
fake news; at least half of the J values must be
close to being an integer for us to consider fitting.
parameters:
---------------
progressions - iterable containing arrays of progressions
J_thres - optional argument corresponding to how close a
value must be to an integer
returns:
---------------
pandas dataframe containing the fit results; columns
are B, D, fit RMS, and pairs of columns corresponding
to the fitted frequency and approximate J value.
"""
BJ_fit_model = lmfit.models.Model(calc_harmonic_transition)
params = BJ_fit_model.make_params()
data = list()
fit_objs = list()
failed = list()
for index, progression in tqdm(enumerate(progressions)):
# Determine the approximate value of B based on
# the differences between observed transitions
approx_B = np.average(np.diff(progression))
# Calculate the values of J that are assigned based on B
J = (progression / approx_B) / 2.0
# We want at least half of the lines to be close to being integer
if len(progression) >= 2:
if np.sum(quant_check(J, J_thres)) >= len(progression) / 1.5:
# Let B vary a bit
params["B"].set(approx_B, min=approx_B * 0.9, max=approx_B * 1.1)
# Constrain D to be less than 5 MHz
params["D"].set(0.001, min=0.0, max=1.0)
fit = BJ_fit_model.fit(
data=progression, J=J, params=params, fit_kws={"maxfev": 100}
)
# Only include progressions that can be fit successfully
if fit.success is True:
# Calculate fit RMS
rms = np.sqrt(np.average(np.square(fit.residual)))
# Only add it to the list of the RMS is
# sufficiently low
return_dict = dict()
return_dict["RMS"] = rms
return_dict.update(fit.best_values)
# Make columns for frequency and J
for i, frequency in enumerate(progression):
return_dict[i] = frequency
return_dict["J{}".format(i)] = J[i]
data.append(return_dict)
fit_objs.append(fit)
else:
failed.append([index, fit.fit_report()])
else:
failed.append(index)
else:
return_dict = dict()
return_dict["RMS"] = 0.0
return_dict["B"] = approx_B
# reformat the frequencies and approximate J values
for i, frequency in enumerate(progression):
return_dict[i] = frequency
return_dict["J{}".format(i)] = J[i]
data.append(return_dict)
full_df = pd.DataFrame(data=data)
full_df.sort_values(["RMS", "B", "D"], ascending=False, inplace=True)
return full_df, fit_objs
| 5,340,078
|
def print_KruskalWallisH(div_calc):
"""
Compute the Kruskal-Wallis H-test for independent samples. A typical rule is that
each group must have at least 5 measurements.
"""
calc = defaultdict(list)
try:
for k1, v1 in div_calc.iteritems():
for k2, v2 in v1.iteritems():
calc[k1].append(v2)
except:
return "Error setting up input arrays for Kruskal-Wallis H-Test. Skipping "\
"significance testing."
h, p = stats.kruskal(*calc.values())
print "\nKruskal-Wallis H-test statistic for {} groups: {}".format(str(len(div_calc)), h)
print "p-value: {}".format(p)
| 5,340,079
|
def test_build_payload_xml_config(mock_pynxos_device_xml):
"""Build payload with list of commands (XML)."""
mock_device = mock_pynxos_device_xml
payload = mock_device.api._build_payload(
["logging history size 200"], method="cli_conf"
)
xml_root = etree.fromstring(payload)
api_method = xml_root.find("./type")
api_cmd = xml_root.find("./input")
assert xml_root.tag == "ins_api"
assert api_cmd.text == "logging history size 200"
assert api_method.text == "cli_conf"
| 5,340,080
|
def visualize_roc_curves(prediction,
target,
output_dir,
report_gt_feature_n_positives=50,
style="seaborn-colorblind",
fig_title="Feature ROC curves",
dpi=500):
"""
Output the ROC curves for each feature predicted by a model
as an SVG.
Parameters
----------
prediction : numpy.ndarray
Value predicted by user model.
target : numpy.ndarray
True value that the user model was trying to predict.
output_dir : str
The path to the directory to output the figures. Directories that
do not currently exist will be automatically created.
report_gt_feature_n_positives : int, optional
Default is 50. Do not visualize an ROC curve for a feature with
less than 50 positive examples in `target`.
style : str, optional
Default is "seaborn-colorblind". Specify a style available in
`matplotlib.pyplot.style.available` to use.
fig_title : str, optional
Default is "Feature ROC curves". Set the figure title.
dpi : int, optional
Default is 500. Specify dots per inch (resolution) of the figure.
Returns
-------
None
Outputs the figure in `output_dir`.
"""
os.makedirs(output_dir, exist_ok=True)
import matplotlib
backend = matplotlib.get_backend()
if "inline" not in backend:
matplotlib.use("SVG")
import matplotlib.pyplot as plt
plt.style.use(style)
plt.figure()
for index, feature_preds in enumerate(prediction.T):
feature_targets = target[:, index]
if len(np.unique(feature_targets)) > 1 and \
np.sum(feature_targets) > report_gt_feature_n_positives:
fpr, tpr, _ = roc_curve(feature_targets, feature_preds)
plt.plot(fpr, tpr, 'r-', color="black", alpha=0.3, lw=1)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
if fig_title:
plt.title(fig_title)
plt.savefig(os.path.join(output_dir, "roc_curves.svg"),
format="svg",
dpi=dpi)
| 5,340,081
|
def test_get_last():
"""
Check get_last returns a json with necessary fields
"""
route = GetLast()
route_result = route.get(engine=ENGINE)
assert check_api_result(route_result)
| 5,340,082
|
def display_solution(board):
"""Display the current state of the board (and count a solution)."""
global _num_solutions
_num_solutions += 1
if QUIET:
return
print('Solution {}:'.format(_num_solutions))
for i in range(0, len(board), TOTAL_WIDTH):
print(''.join(c or ' ' for c in board[i:i + TOTAL_WIDTH]))
print('')
| 5,340,083
|
def _random_exptname():
"""Generate randome expt name NNNNNNNN_NNNNNN, where N is any number 0..9"""
r = ''.join(random.choice(string.digits) for _ in range(8))
r = r + '_' + ''.join(random.choice(string.digits) for _ in range(6))
return r
| 5,340,084
|
def remove_store(store_name):
""" Deletes the named data store.
:param store_name:
:return:
"""
return get_data_engine().remove_store(store_name)
| 5,340,085
|
def rules_pyo3_fetch_remote_crates():
"""This function defines a collection of repos and should be called in a WORKSPACE file"""
maybe(
http_archive,
name = "rules_pyo3__cfg_if__1_0_0",
url = "https://crates.io/api/v1/crates/cfg-if/1.0.0/download",
type = "tar.gz",
strip_prefix = "cfg-if-1.0.0",
build_file = Label("//cargo/remote:BUILD.cfg-if-1.0.0.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__ctor__0_1_16",
url = "https://crates.io/api/v1/crates/ctor/0.1.16/download",
type = "tar.gz",
strip_prefix = "ctor-0.1.16",
build_file = Label("//cargo/remote:BUILD.ctor-0.1.16.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__ghost__0_1_2",
url = "https://crates.io/api/v1/crates/ghost/0.1.2/download",
type = "tar.gz",
strip_prefix = "ghost-0.1.2",
build_file = Label("//cargo/remote:BUILD.ghost-0.1.2.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__indoc__1_0_3",
url = "https://crates.io/api/v1/crates/indoc/1.0.3/download",
type = "tar.gz",
strip_prefix = "indoc-1.0.3",
build_file = Label("//cargo/remote:BUILD.indoc-1.0.3.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__instant__0_1_9",
url = "https://crates.io/api/v1/crates/instant/0.1.9/download",
type = "tar.gz",
strip_prefix = "instant-0.1.9",
build_file = Label("//cargo/remote:BUILD.instant-0.1.9.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__inventory__0_1_10",
url = "https://crates.io/api/v1/crates/inventory/0.1.10/download",
type = "tar.gz",
strip_prefix = "inventory-0.1.10",
build_file = Label("//cargo/remote:BUILD.inventory-0.1.10.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__inventory_impl__0_1_10",
url = "https://crates.io/api/v1/crates/inventory-impl/0.1.10/download",
type = "tar.gz",
strip_prefix = "inventory-impl-0.1.10",
build_file = Label("//cargo/remote:BUILD.inventory-impl-0.1.10.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__libc__0_2_81",
url = "https://crates.io/api/v1/crates/libc/0.2.81/download",
type = "tar.gz",
strip_prefix = "libc-0.2.81",
build_file = Label("//cargo/remote:BUILD.libc-0.2.81.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__lock_api__0_4_2",
url = "https://crates.io/api/v1/crates/lock_api/0.4.2/download",
type = "tar.gz",
strip_prefix = "lock_api-0.4.2",
build_file = Label("//cargo/remote:BUILD.lock_api-0.4.2.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__parking_lot__0_11_1",
url = "https://crates.io/api/v1/crates/parking_lot/0.11.1/download",
type = "tar.gz",
strip_prefix = "parking_lot-0.11.1",
build_file = Label("//cargo/remote:BUILD.parking_lot-0.11.1.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__parking_lot_core__0_8_1",
url = "https://crates.io/api/v1/crates/parking_lot_core/0.8.1/download",
type = "tar.gz",
strip_prefix = "parking_lot_core-0.8.1",
build_file = Label("//cargo/remote:BUILD.parking_lot_core-0.8.1.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__paste__1_0_4",
url = "https://crates.io/api/v1/crates/paste/1.0.4/download",
type = "tar.gz",
strip_prefix = "paste-1.0.4",
build_file = Label("//cargo/remote:BUILD.paste-1.0.4.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__proc_macro2__1_0_24",
url = "https://crates.io/api/v1/crates/proc-macro2/1.0.24/download",
type = "tar.gz",
strip_prefix = "proc-macro2-1.0.24",
build_file = Label("//cargo/remote:BUILD.proc-macro2-1.0.24.bazel"),
)
maybe(
new_git_repository,
name = "rules_pyo3__pyo3__0_12_4",
remote = "https://github.com/cecini/pyo3.git",
commit = "cc4ff4b80eb769e8b788973dfb0f7f579b124d33",
build_file = Label("//cargo/remote:BUILD.pyo3-0.12.4.bazel"),
init_submodules = True,
)
maybe(
new_git_repository,
name = "rules_pyo3__pyo3_derive_backend__0_12_4",
remote = "https://github.com/cecini/pyo3.git",
commit = "cc4ff4b80eb769e8b788973dfb0f7f579b124d33",
build_file = Label("//cargo/remote:BUILD.pyo3-derive-backend-0.12.4.bazel"),
init_submodules = True,
)
maybe(
new_git_repository,
name = "rules_pyo3__pyo3cls__0_12_4",
remote = "https://github.com/cecini/pyo3.git",
commit = "cc4ff4b80eb769e8b788973dfb0f7f579b124d33",
build_file = Label("//cargo/remote:BUILD.pyo3cls-0.12.4.bazel"),
init_submodules = True,
)
maybe(
http_archive,
name = "rules_pyo3__quote__1_0_7",
url = "https://crates.io/api/v1/crates/quote/1.0.7/download",
type = "tar.gz",
strip_prefix = "quote-1.0.7",
build_file = Label("//cargo/remote:BUILD.quote-1.0.7.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__redox_syscall__0_1_57",
url = "https://crates.io/api/v1/crates/redox_syscall/0.1.57/download",
type = "tar.gz",
strip_prefix = "redox_syscall-0.1.57",
build_file = Label("//cargo/remote:BUILD.redox_syscall-0.1.57.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__scopeguard__1_1_0",
url = "https://crates.io/api/v1/crates/scopeguard/1.1.0/download",
type = "tar.gz",
strip_prefix = "scopeguard-1.1.0",
build_file = Label("//cargo/remote:BUILD.scopeguard-1.1.0.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__smallvec__1_5_1",
url = "https://crates.io/api/v1/crates/smallvec/1.5.1/download",
type = "tar.gz",
strip_prefix = "smallvec-1.5.1",
build_file = Label("//cargo/remote:BUILD.smallvec-1.5.1.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__syn__1_0_54",
url = "https://crates.io/api/v1/crates/syn/1.0.54/download",
type = "tar.gz",
strip_prefix = "syn-1.0.54",
build_file = Label("//cargo/remote:BUILD.syn-1.0.54.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__unicode_xid__0_2_1",
url = "https://crates.io/api/v1/crates/unicode-xid/0.2.1/download",
type = "tar.gz",
strip_prefix = "unicode-xid-0.2.1",
build_file = Label("//cargo/remote:BUILD.unicode-xid-0.2.1.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__unindent__0_1_7",
url = "https://crates.io/api/v1/crates/unindent/0.1.7/download",
type = "tar.gz",
strip_prefix = "unindent-0.1.7",
build_file = Label("//cargo/remote:BUILD.unindent-0.1.7.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__winapi__0_3_9",
url = "https://crates.io/api/v1/crates/winapi/0.3.9/download",
type = "tar.gz",
strip_prefix = "winapi-0.3.9",
build_file = Label("//cargo/remote:BUILD.winapi-0.3.9.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__winapi_i686_pc_windows_gnu__0_4_0",
url = "https://crates.io/api/v1/crates/winapi-i686-pc-windows-gnu/0.4.0/download",
type = "tar.gz",
strip_prefix = "winapi-i686-pc-windows-gnu-0.4.0",
build_file = Label("//cargo/remote:BUILD.winapi-i686-pc-windows-gnu-0.4.0.bazel"),
)
maybe(
http_archive,
name = "rules_pyo3__winapi_x86_64_pc_windows_gnu__0_4_0",
url = "https://crates.io/api/v1/crates/winapi-x86_64-pc-windows-gnu/0.4.0/download",
type = "tar.gz",
strip_prefix = "winapi-x86_64-pc-windows-gnu-0.4.0",
build_file = Label("//cargo/remote:BUILD.winapi-x86_64-pc-windows-gnu-0.4.0.bazel"),
)
| 5,340,086
|
def is_tracked_upstream(folder: Union[str, Path]) -> bool:
"""
Check if the current checked-out branch is tracked upstream.
"""
try:
command = "git rev-parse --symbolic-full-name --abbrev-ref @{u}"
subprocess.run(
command.split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
check=True,
cwd=folder,
)
return True
except subprocess.CalledProcessError as exc:
if "HEAD" in exc.stderr:
raise OSError("No branch checked out")
return False
| 5,340,087
|
def limit_checkins_per_user(checkins: list, num_checkins_per_user: int, random_seed=1):
"""
Limit for each user a maximum number of check-ins by randomly select check-ins.
Parameters
----------
checkins: list
list of check-ins
num_checkins_per_user: int
max number of check-ins per user, -1 for unlimited
random_seed: int
a random seed for random check-ins selection
Returns
-------
list
limited check-ins
"""
if num_checkins_per_user < 0:
return checkins
# convert check-in list to dict per user
checkins_per_user = defaultdict(list)
for c in checkins:
checkins_per_user[c.user_id].append(c)
# randomly select check-ins of users
limited_checkins = list()
for user_id, v in checkins_per_user.items():
if len(v) <= num_checkins_per_user:
# there are not enough check-ins, so get them all
limited_checkins.extend(v)
else:
# there are more check-ins than needed, so randomly choose some of them
random.seed(random_seed)
limited_checkins.extend(random.sample(v, k=num_checkins_per_user))
return limited_checkins
| 5,340,088
|
def expect_exit_code(step, exit_code):
"""Expect the exit code to be a certain integer"""
assert step.context.exit_code == exit_code, (
"Actual exit code was: {}\n".format(step.context.exit_code)
+ "stdout from radish run: '{}':\n".format(" ".join(step.context.command))
+ step.context.stdout.decode("utf-8")
)
| 5,340,089
|
async def main() -> None:
"""Show example on controlling your LaMetric device."""
# Create a alert notification, with 3 message frames.
# First frame is a text, the second is a goal, last one
# shows a chart. Additionally, the WIN notification sound
# is played.
notification = Notification(
icon_type=NotificationIconType.ALERT,
model=Model(
frames=[
Simple(text="Yeah", icon=18815),
Goal(
icon=7956,
data=GoalData(
current=65,
end=100,
start=0,
unit="%",
),
),
Chart(data=[1, 2, 3, 4, 5, 4, 3, 2, 1]),
],
sound=Sound(id=NotificationSound.WIN),
),
)
async with LaMetricDevice(
"192.168.1.11",
api_key="DEVICE_API_KEY",
) as lametric:
# Raise audio volume... to we can hear the notification
await lametric.audio(volume=100)
# Send notification
await lametric.notify(notification=notification)
| 5,340,090
|
def display_overview(ticker: str):
"""Alpha Vantage stock ticker overview
Parameters
----------
ticker : str
Fundamental analysis ticker symbol
"""
df_fa = av_model.get_overview(ticker)
if df_fa.empty:
console.print("No API calls left. Try me later", "\n")
return
if gtff.USE_TABULATE_DF:
print(
tabulate(
df_fa.drop(index=["Description"]), headers=[], tablefmt="fancy_grid"
)
)
else:
console.print(df_fa.drop(index=["Description"]).to_string(header=False))
console.print(f"\nCompany Description:\n\n{df_fa.loc['Description'][0]}")
console.print("")
| 5,340,091
|
def getCharacterFilmography(characterID, charIF, charDF, movieIF, movieKF,
personIF, personKF, limit=None):
"""Build a filmography list for the specified characterID."""
try:
ifptr = open(charIF, 'rb')
except IOError, e:
import warnings
warnings.warn('Unable to access characters information, '
'please run the characters4local.py script: %s' % e)
return None
ifptr.seek(4L*characterID)
piddata = ifptr.read(4)
ifptr.close()
if len(piddata) != 4:
return None
idx = convBin(piddata, 'fulloffset')
try:
dfptr = open(charDF, 'rb')
except IOError, e:
import warnings
warnings.warn('Unable to access characters information, '
'please run the characters4local.py script: %s' % e)
return None
dfptr.seek(idx)
# Check characterID.
chID = dfptr.read(3)
if characterID != convBin(chID, 'characterID'):
dfptr.close()
return None
length = convBin(dfptr.read(2), 'longlength')
# Skip character name.
latin2utf(dfptr.read(length))
nrItems = convBin(dfptr.read(3), 'nrCharacterItems')
if limit is not None and nrItems/2 > limit:
nrItems = limit*2
filmography = []
for i in xrange(nrItems/2):
personID = convBin(dfptr.read(3), 'personID')
name = getLabel(personID, personIF, personKF)
movieID = convBin(dfptr.read(3), 'movieID')
title = getLabel(movieID, movieIF, movieKF)
# XXX: notes are not retrieved: they can be found scanning
# actors.list and acresses.list, but it will slow down everything.
m = Movie(title=title, movieID=movieID, currentRole=name,
roleID=personID, roleIsPerson=True, accessSystem='local')
filmography.append(m)
dfptr.close()
return filmography
| 5,340,092
|
def sqrt_fixed_full(x, config, is_training=True, causal=True):
"""Full attention matrix with sqrt decomposition."""
bsize = x.shape[0]
query, key, value = attention.get_qkv(x, x, x, hidden_size=config.model_size,
num_heads=config.num_heads,
bias=config.dense_use_bias)
head_dim = config.model_size // config.num_heads
assert config.max_seq_len % config.max_seg_len == 0
num_seg = config.max_seq_len // config.max_seg_len
cur_query = tf.reshape(query, [-1,
num_seg,
config.max_seg_len,
config.num_heads,
head_dim])
with tf.variable_scope('pooling_query'):
merged_query = pooling_summary(cur_query, axis=2,
local_summary=config.local_summary,
keepdims=True)
cur_key = tf.reshape(key, cur_query.shape)
cur_val = tf.reshape(value, cur_query.shape)
span_val = attention.dot_product_attention(merged_query,
cur_key,
cur_val,
is_training=is_training,
attn_axis=1,
dropatt=config.dropatt)
span_val = tf.squeeze(span_val, axis=2)
with tf.variable_scope('pooling_key'):
span_key = pooling_summary(cur_key, axis=2,
local_summary=config.local_summary,
keepdims=False)
local_logits = tf.einsum('bsqhd,bskhd->bsqhk', cur_query, cur_key)
if causal:
local_mask = get_causal_mask(cur_query, axis=2, is_strict=False)
local_mask = tf.expand_dims(local_mask, axis=-2)
local_logits += local_mask
prev_logits = tf.einsum('bqhd,bkhd->bqhk', query, span_key)
if causal:
prev_mask = get_causal_mask(cur_query, axis=1, is_strict=True)
prev_mask = tf.repeat(prev_mask, [config.max_seg_len] * num_seg, axis=0)
prev_logits += tf.expand_dims(prev_mask, axis=1)
joint_logits = tf.concat([tf.reshape(local_logits,
[bsize, config.max_seq_len,
config.num_heads, -1]),
prev_logits], axis=-1)
attn_weights = attention.float32_softmax(joint_logits, axis=-1)
local_att, prev_att = tf.split(attn_weights, [config.max_seg_len, num_seg],
axis=-1)
if is_training:
local_att = tf.nn.dropout(local_att, rate=config.dropatt)
local_att = tf.reshape(local_att, [bsize, num_seg,
config.max_seg_len,
config.num_heads,
config.max_seg_len])
local_merged = tf.einsum('bsqhk,bskhd->bsqhd', local_att, cur_val)
prev_merged = tf.einsum('bqhk,bkhd->bqhd', prev_att, span_val)
joint_merged = prev_merged + tf.reshape(local_merged, prev_merged.shape)
output = ops.trail_dense(joint_merged, config.model_size, begin_axis=-2)
return output
| 5,340,093
|
def mcf_from_row(row, gene_to_dcid_list):
"""Generate data mcf from each row of the dataframe"""
gene = row['Gene name']
tissue = get_class_name(row['Tissue'])
cell = get_class_name(row['Cell type'])
expression = EXPRESSION_MAP[row['Level']]
reliability = RELIABILITY_MAP[row['Reliability']]
if gene not in gene_to_dcid_list:
# skip case when there is no gene to dcid mapping
return None
dcid_list = gene_to_dcid_list[gene]
mcf_list = []
for protein_dcid in dcid_list:
mcf_list.append(
generate_mcf(protein_dcid, tissue, cell, expression, reliability))
return '\n\n'.join(mcf_list)
| 5,340,094
|
def add_repo_information( pom, repo_url ):
"""Adds development maven repo to pom file so that the artifacts used are the development artifacts"""
to_insert = """
<repositories>
<repository>
<id>checker-framework-repo</id>
<url>%s</url>
</repository>
</repositories>
<pluginRepositories>
<pluginRepository>
<id>checker-framework-repo</id>
<url>%s</url>
</pluginRepository>
</pluginRepositories>
""" % (repo_url, repo_url)
result_str = execute( 'grep -nm 1 "<build>" %s' % pom, True, True )
line_no_str = result_str.split(":")[0]
line_no = int( line_no_str )
print(" LINE_NO: " + line_no_str )
insert_before_line( to_insert, pom, line_no )
| 5,340,095
|
def loads(json_str, target=None):
"""
Shortcut for instantiating a new :class:`JSONDecoder` and calling the :func:`from_json_str` function.
.. seealso::
For more information you can look at the doc of :func:`JSONDecoder.from_json_str`.
"""
return _decoder.from_json_str(json_str, target)
| 5,340,096
|
def discover(discover_system: bool = True) -> Discovery:
"""
Discover capabilities offered by this extension.
"""
logger.info("Discovering capabilities from aws-az-failure-chaostoolkit")
discovery = initialize_discovery_result(
"aws-az-failure-chaostoolkit", __version__, "aws"
)
discovery["activities"].extend(__load_exported_activities())
return discovery
| 5,340,097
|
def generate_feed(videos, baseurl, root_path):
"""Creates feed from items from db"""
fg = FeedGenerator()
fg.load_extension('podcast')
fg.title('My feed')
fg.link(href=baseurl, rel='alternate')
fg.description('Some description')
fg.author({"name":"makovako", "email":"test@example.com"})
fg.podcast.itunes_owner(name='makovako',email='test@example.com')
fg.podcast.itunes_author("makovako")
# videos = get_all_videos()
for video in videos:
fe = fg.add_entry()
fe.id('download/' + video.youtube_id)
fe.title(video.title)
fe.description(video.description)
fe.podcast.itunes_author(video.uploader)
fe.podcast.itunes_image(video.thumbnail)
fe.enclosure(baseurl+'download/'+video.youtube_id+'.mp3',0,'audio/mpeg')
fg.rss_str(pretty=True)
fg.rss_file(os.path.join(root_path, 'download', 'feed.xml'))
| 5,340,098
|
def getter_nofancy(a, b, asarray=True, lock=None):
""" A simple wrapper around ``getter``.
Used to indicate to the optimization passes that the backend doesn't
support fancy indexing.
"""
return getter(a, b, asarray=asarray, lock=lock)
| 5,340,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.