content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def int_not_in_range(bounds, inclusive=False):
"""Creates property that must be an int outside bounds[0] and bounds[1].
Parameters:
bounds: Subscriptable with len()==2, where bounds[0] is the lower
bound and bounds[1] is the upper bound.
Requires bounds[1] > bounds[0].
inclusive (bool): If set to False, values falling on the upper and
lower bounds will not be accepted. Can set one bound to be
inclusive and the other exclusive by setting this to a tuple
of 2 bools, e.g. (True,False) makes the lower bound inclusive
while the upper bound is not.
Returns:
property
"""
return not_in_range(bounds, inclusive, type_constraint=int)
|
6890cd827fb741329c958a001e48013466414d11
| 3,640,600
|
from typing import Dict
from typing import List
def plot_concordance_pr(
pr_df: pd.DataFrame,
snv: bool,
colors: Dict[str, str] = None,
size_prop: str = None,
bins_to_label: List[int] = None,
) -> Column:
"""
Generates plots showing Precision/Recall curves for truth samples:
Two tabs:
- One displaying the PR curve with ranking computed on the entire data
- One displaying the PR curve with ranking computed on the truth sample only
Within each tab, a row of n_truth_samples.
The input to this function should come out of the `get_binned_concordance_pd` function, which creates
a DataFrame containing the necessary metris for PR plotting and is grouped by 'rank_name', 'truth_sample', 'model' and 'snv'.
:param DataFrame pr_df: Input Dataframe
:param bool snv: Whether to plot SNVs or Indels
:param dict of str -> str colors: Optional colors to use (model name -> desired color)
:param str size_prop: Either 'radius' or 'area' can be specified. If either is specified, the points will be sized proportionally to the amount of data in that point.
:param list of int bins_to_label: Bins to label
:return: Bokeh grid of plots
:rtype: Tabs
"""
if colors is None:
# Get a palette automatically
models = sorted(list(set([g[2] for g in pr_df.groups])))
palette = d3['Category10'][max(3, len(models))]
colors = {model: palette[i] for i, model in enumerate(models)}
hover = HoverTool(
tooltips=[
('model', '@model'),
('bin', '@bin'),
('score (min, max)', '(@min_score, @max_score)'),
('n_alleles', '@n_alleles'),
('cum_alleles', '@cum_alleles'),
('data (x,y)', '($x, $y)'),
]
)
tabs = []
for rank in ['truth_sample_rank', 'global_rank']:
plot_row = []
for truth_sample in set([g[1] for g in pr_df.groups]):
p = figure(
title=truth_sample[0].upper() + truth_sample[1:],
x_axis_label='Recall',
y_axis_label='Precision',
tools=[hover] + [tool for tool in TOOLS.split(',') if tool != 'hover'],
)
p.xaxis[0].formatter = NumeralTickFormatter(format='0%')
p.yaxis[0].formatter = NumeralTickFormatter(format='0.0%')
circles = []
for model in set([g[2] for g in pr_df.groups]):
data = pr_df.get_group((rank, truth_sample, model, snv)).copy()
data['model'] = [model] * len(data)
data['size'] = get_point_size_col(data['n_alleles'], size_prop)
source = ColumnDataSource(data)
circles.append(
(
model,
[
p.circle(
'recall',
'precision',
size='size',
color=colors[model],
source=source,
)
],
)
)
if bins_to_label is not None:
label_data = data.loc[data.bin.isin(bins_to_label)].copy()
label_data['x_offset'] = label_data['recall'] + 0.025
label_data['y_offset'] = label_data['precision']
label_data['bin_str'] = [str(int(t)) for t in label_data['bin']]
label_source = ColumnDataSource(label_data)
p.add_layout(
LabelSet(
x='x_offset',
y='precision',
text='bin_str',
text_color=colors[model],
source=label_source,
)
)
p.multi_line(
xs=[[x, x + 0.05] for x in label_data.recall],
ys=[[y, y] for y in label_data.precision],
color=colors[model],
)
legend = Legend(
items=circles,
orientation='horizontal',
location=(0, 0),
click_policy='hide',
)
p.add_layout(legend, 'above')
_set_plots_defaults(p)
plot_row.append(p)
tabs.append(Panel(child=Row(children=plot_row), title=rank))
return Tabs(tabs=tabs)
|
8ad9541605c8f9f274faba03de7e19766341a562
| 3,640,601
|
from enum import Enum
def typehint_metavar(typehint):
"""Generates a metavar for some types."""
metavar = None
if typehint == bool:
metavar = '{true,false}'
elif is_optional(typehint, bool):
metavar = '{true,false,null}'
elif _issubclass(typehint, Enum):
enum = typehint
metavar = '{'+','.join(list(enum.__members__.keys()))+'}'
elif is_optional(typehint, Enum):
enum = typehint.__args__[0]
metavar = '{'+','.join(list(enum.__members__.keys())+['null'])+'}'
return metavar
|
31b42c29dd970d561917420e789eea6a7bd84cfa
| 3,640,602
|
from datetime import datetime
def generate_signed_url(filename):
"""
Generate a signed url to access publicly
"""
found_blob = find(filename)
expiration = datetime.now() + timedelta(hours=1)
return found_blob.generate_signed_url(expiration)
|
917f78cfa12496baf655a8ea707143b4922f99c0
| 3,640,603
|
def delete_old_layer_versions(client, table, region, package, prefix):
"""
Loops through all layer versions found in DynamoDB and deletes layer version if it's <maximum_days_older> than
latest layer version.
The latest layer version is always kept
Because lambda functions are created at a maximum rate of once per day, a maximum of 14 layers can exists at one
time.
"""
deleted_arns = []
layer_name = f"{prefix}{package}"
# Get deployed layer versions
deployed_layer_version_arns = list_layer_version_arns(client=client,
layer_name=layer_name)
# Get Live Layer versions (they automatically delete if they're old)
response = table.query(KeyConditionExpression=Key("deployed_region-package").eq(f"{region}.{package}"),
ScanIndexForward=False)
live_layer_version_arns = [item['layer_version_arn'] for item in response['Items']]
# Delete layer versions
for layer_version_arn in deployed_layer_version_arns:
if layer_version_arn not in live_layer_version_arns:
logger.info(f"Found dead layer version {layer_version_arn}...deleting")
layer_version = layer_version_arn.split(":")[-1]
client.delete_layer_version(
LayerName=layer_name,
VersionNumber=layer_version
)
deleted_arns.append(layer_version_arn)
else:
pass
return deleted_arns
|
291afac422a37cad59a8c2128776567b24c5a0c1
| 3,640,604
|
def _run_simulation(sim_desc):
"""Since _run_simulation() is always run in a separate process, its input
and output params must be pickle-friendly. Keep that in mind when
making changes.
This is what each worker executes.
Given a SimulationDescription object, calls the sequence & binning
code, traps any errors that arise and grabs results. Also verfies that
the results meet our criteria (e.g. converts to tuples/lists if necessary,
raises an exception if the ppms, areas and phases arrays are not all the
same length, etc.)
If an exception is raised at any point, it sets _worker_exception.
Returns a result dict. If an exception occurred, the repackaged
exception is in result["exception"].
"""
started = util_time.now()
# I make a copy of dims because I need to return them as part of the
# result dict, and the pulse seq code might alter or even delete what's
# attached to the sim_desc.
dims = sim_desc.dims[:]
exception = False
# Execute the user's sequence code
try:
result = _sequence_function(sim_desc)
except:
exception = _repackage_exception(SEQUENCE_CODE_ALIAS)
if not exception:
# Sequence code completed OK.
if result:
# Sequence code returned the result. There's no need to
# execute the binning code.
pass
else:
# Execute the user's binning code
try:
result = _binning_function(sim_desc)
except:
exception = _repackage_exception(BINNING_CODE_ALIAS)
if exception:
result = EMPTY_RESULT
else:
# Execution completed with no errors. Let's see if what was returned
# meets our criteria. First, the result must be an N-tuple, where
# N == RESULT_LENGTH. As of this writing, RESULT_LENGTH == 3.
result_length = _safe_len(result)
if result_length != RESULT_LENGTH:
result = EMPTY_RESULT
# I force an error here so I can get the exception including a traceback.
try:
raise ValueError("Result returned from your code must be a %d-tuple, but has length %d" % \
(RESULT_LENGTH, result_length))
except ValueError:
exception = _repackage_exception(GENERIC_CODE_ALIAS)
# Our second criteria is that each element of the 3-tuple must be the
# same length.
lengths = [_safe_len(element) for element in result]
for length in lengths:
if length != lengths[0]:
result = EMPTY_RESULT
# I force an error here so I can get the exception including a traceback.
try:
raise ValueError("Result elements differ in length: %s" % lengths)
except ValueError:
exception = _repackage_exception(GENERIC_CODE_ALIAS)
# The user's code is required to return a tuple of iterables. Those
# iterables might be lists, numpy arrays, PyGAMMA.DoubleVectors or any
# number of other things. PyGAMMA objects in particular don't pickle, and
# this function's result needs to be pickleable.
# So here we ensure that the result contains only ordinary Python objects.
result = list(map(_tuplify, result))
# Last but not least, ensure that each value is numeric and a native
# Python type.
f = lambda an_object: isinstance(an_object, (float, int))
# Loop through ppms, areas & phases lists
for result_chunk in result:
# map() allows me to test all the items in the list in one shot.
if not all(map(f, result_chunk)):
# Ooops, at least one of the results in this list isn't a float,
# int, or long.
# I force an error here so I can get the exception including
# a traceback.
try:
raise ValueError("Results must contain only floats, ints or longs")
except ValueError:
exception = _repackage_exception(GENERIC_CODE_ALIAS)
# The result (good or bad) is returned as a dict.
result = dict(list(zip(("ppms", "areas", "phases"), result)))
result["started"] = started
result["completed"] = util_time.now()
result["metabolite"] = dims[0]
result["dims"] = dims[1:]
if exception:
_worker_exception.value = 1
result["exception"] = exception
return result
|
16cacdb3eaf1fadff8769ab6316eb06e89c226eb
| 3,640,605
|
def view_filestorage_file(self, request):
""" Renders the given filestorage file in the browser. """
return getattr(request.app, self.storage).getsyspath(self.path)
|
ad65b83b9462c8b8efec7626d4751685df3aba8b
| 3,640,606
|
def enum_choice_list(data):
""" Creates the argparse choices and type kwargs for a supplied enum type or list of strings """
# transform enum types, otherwise assume list of string choices
if not data:
return {}
try:
choices = [x.value for x in data]
except AttributeError:
choices = data
def _type(value):
return next((x for x in choices if x.lower() == value.lower()), value) if value else value
params = {
'choices': CaseInsensitiveList(choices),
'type': _type
}
return params
|
4f91c76569a4b42e655ed198a5c4ec2e48d9e839
| 3,640,607
|
def chartset(request):
""" Conjunto de caracteres que determian la pagina
request: respuesta de la url"""
print "--------------- Obteniendo charset -------------------"
try:
charset = request.encoding
except AttributeError as error_atributo:
charset = "NA"
print "charset: " + str(error_atributo)
return charset
|
072ec863bd555706a64bab48d147afb24142fae4
| 3,640,608
|
import uuid
def generate_UUID():
"""
Generate a UUID and return it
"""
return str(uuid.uuid4())
|
feab11861e366ddf60cdc74c12f77f6a6ece2fa3
| 3,640,609
|
def streaming_recall_at_thresholds(predictions, labels, thresholds,
ignore_mask=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `streaming_recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds.
`recall[i]` is defined as the number of values in `predictions` above
`thresholds[i]` whose corresponding entry in `labels` is `True`
(`true_positives[i]`) divided by the number of True values in `labels`
(`true_positives[i] + false_negatives[i]`).
If `ignore_mask` is not None then only values whose corresponding value in
`ignore_mask` is `False` are considered.
`recall` are returned along with an `update_op` whose value equals that of
`recall`.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A binary `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
ignore_mask: An optional, binary tensor whose size matches `predictions`.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_op_scope name.
Returns:
recall: A float tensor of shape [len(thresholds)].
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If the shape of `predictions` and `labels` do not match or if
`ignore_mask` is not `None` and its shape doesn't match `predictions`
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_op_scope([predictions, labels], name,
'recall_at_thresholds'):
(true_positives, false_negatives, _, _, true_positives_compute_op,
false_negatives_compute_op, _, _,) = _tp_fn_tn_fp(
predictions, labels, thresholds, ignore_mask)
# avoid division by zero
epsilon = 1e-7
def compute_recall(name):
recall = math_ops.div(true_positives,
epsilon + true_positives + false_negatives,
name='recall_' + name)
return recall
recall = compute_recall('value')
with ops.control_dependencies([true_positives_compute_op,
false_negatives_compute_op]):
update_op = compute_recall('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, recall)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return recall, update_op
|
6cdaa7cf3b0d1c35204764fb78c4f9cefb09b577
| 3,640,610
|
def fib(n):
"""Returns the nth Fibonacci number."""
if n == 0:
return 1
elif n == 1:
return 1
else:
return fib(n - 1) + fib(n - 2)
|
397d5714f45491dde68c13379fe2a6acafe55002
| 3,640,611
|
import os
def load_saved_users(args) -> list:
"""
:param args:
:return: list
"""
data_frame = pd.read_csv(os.path.join(args.data_dir,
args.users_tweets_dir,
args.users_file),
header=None)
return list(data_frame[0])
|
efb4f9bbd055f39c30693fbea23d7e9254760e27
| 3,640,612
|
import logging
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
|
89f8be391587598db8fec97c3d6f9a46b4fdff54
| 3,640,613
|
def template_review(context, mapping):
""":phabreview: Object describing the review for this changeset.
Has attributes `url` and `id`.
"""
ctx = context.resource(mapping, b'ctx')
m = _differentialrevisiondescre.search(ctx.description())
if m:
return templateutil.hybriddict({
b'url': m.group(b'url'),
b'id': b"D{}".format(m.group(b'id')),
})
|
f475cf717026329ecc3c1ed1ccaff89089423e50
| 3,640,614
|
def addRandomEdges(graph: nx.Graph, nEdges: int) -> tuple:
""" Adds random edges to a given graph """
nodes = list(graph.nodes)
n = len(nodes)
edges = []
for i in range(nEdges):
newEdge = False
while not newEdge:
i_u, i_v = np.random.randint(0, n-1), np.random.randint(0, n-1)
edge = (nodes[i_u], nodes[i_v])
if edge not in graph.edges(data=False) and edge not in edges:
newEdge = True
edges.append(edge)
g = graph.copy()
g.add_edges_from(edges)
return g, edges
|
004723ac17a431a266bae27c91316a66ced507f9
| 3,640,615
|
def get_s3_buckets_for_account(account, region='us-east-1'):
""" Get S3 buckets for a specific account.
:param account: AWS account
:param region: AWS region
"""
session = boto3.session.Session() # create session for Thread Safety
assume = rolesession.assume_crossact_audit_role(
session, account['accountNum'], region)
s3_data = []
if assume:
s3_client = assume.client('s3')
s3_info = s3_client.list_buckets().get('Buckets')
if s3_info:
for bucket in s3_info:
s3_global = is_s3_bucket_global(assume, bucket)
s3_data.append(
dict(BucketName=bucket['Name'],
AccountNum=account['accountNum'],
AccountAlias=account.get('alias'),
GlobalAccess=s3_global))
return s3_data
|
bc2a334bb6c358c43fb97336d3092c27372bd2d0
| 3,640,616
|
import os
def create_uid_email(username=None, hostname=None):
"""Create an email address suitable for a UID on a GnuPG key.
:param str username: The username portion of an email address. If None,
defaults to the username of the running Python
process.
:param str hostname: The FQDN portion of an email address. If None, the
hostname is obtained from gethostname(2).
:rtype: str
:returns: A string formatted as <username>@<hostname>.
"""
if hostname:
hostname = hostname.replace(' ', '_')
if not username:
try: username = os.environ['LOGNAME']
except KeyError: username = os.environ['USERNAME']
if not hostname: hostname = gethostname()
uid = "%s@%s" % (username.replace(' ', '_'), hostname)
else:
username = username.replace(' ', '_')
if (not hostname) and (username.find('@') == 0):
uid = "%s@%s" % (username, gethostname())
elif hostname:
uid = "%s@%s" % (username, hostname)
else:
uid = username
return uid
|
ca8ad0204e188d850f93f94d559a5a2e04a9170c
| 3,640,617
|
def get_users():
""" Alle Benutzer aus der Datenbank laden. """
session = get_cassandra_session()
future = session.execute_async("SELECT user_id, username, email FROM users")
try:
rows = future.result()
except Exception:
log.exeception()
users = []
for row in rows:
users.append({
'user_id': row.user_id,
'username': row.username,
'email': row.email
})
return jsonify({'users': users}), 200
|
c6f7b49447dd187e188e9094af3443fe3e4ed218
| 3,640,618
|
import signal
def fastcorrelate(
input1, input2, usefft=True, zeropadding=0, weighting="None", displayplots=False, debug=False,
):
"""Perform a fast correlation between two arrays.
Parameters
----------
input1
input2
usefft
zeropadding
weighting
displayplots
debug
Returns
-------
corr
Notes
-----
From http://stackoverflow.com/questions/12323959/fast-cross-correlation-method-in-python.
"""
len1 = len(input1)
len2 = len(input2)
outlen = len1 + len2 - 1
if zeropadding < 0:
# autopad
newlen1 = len1 * 2
newlen2 = len2 * 2
paddedinput1 = np.zeros((newlen1), dtype=float)
paddedinput2 = np.zeros((newlen2), dtype=float)
paddedinput1[0:len1] = input1
paddedinput2[0:len2] = input2
startpt = (len1 + len2) // 2
elif zeropadding > 0:
# explicit pad
newlen1 = len1 + zeropadding
newlen2 = len2 + zeropadding
paddedinput1 = np.zeros((newlen1), dtype=float)
paddedinput2 = np.zeros((newlen2), dtype=float)
paddedinput1[0:len1] = input1
paddedinput2[0:len2] = input2
startpt = zeropadding
else:
# no pad
paddedinput1 = input1
paddedinput2 = input2
startpt = 0
if debug:
print(f"FASTCORRELATE - padding: {zeropadding}, startpt: {startpt}, outlen: {outlen}")
if usefft:
# Do an array flipped convolution, which is a correlation.
if weighting == "None":
return signal.fftconvolve(paddedinput1, paddedinput2[::-1], mode="full")[
startpt : startpt + outlen
]
else:
return convolve_weighted_fft(
paddedinput1,
paddedinput2[::-1],
mode="full",
weighting=weighting,
displayplots=displayplots,
)[startpt : startpt + outlen]
else:
return np.correlate(paddedinput1, paddedinput2, mode="full")
|
2cb18d598f6071806cd5cd9c6260fa5a58764cd7
| 3,640,619
|
def vgconv(xinput,yinput,fwhm, ppr=None):
"""convolution with a Gaussian in log lambda scale
for a constant resolving power
Parameters
----------
xinput: numpy float array
wavelengths
yinput: numpy array of floats
fluxes
fwhm: float
FWHM of the Gaussian (km/s)
ppr: float, optional
Points per resolution element to downsample the convolved spectrum
(default None, to keep the original sampling)
Returns
-------
x: numpy float array
wavelengths after convolution, will be a subset of xinput when that is equidistant
in log lambda, otherwise a subset of the resampled version
y: numpy array of floats
fluxes after convolution
"""
#resampling to ln(lambda) if need be
xx = np.diff(np.log(xinput))
if max(xx) - min(xx) > 1.e-7: #input not equidist in loglambda
nel = len(xinput)
minx = np.log(xinput[0])
maxx = np.log(xinput[-1])
x = np.linspace(minx,maxx,nel)
step = x[1] - x[0]
x = np.exp(x)
#y = np.interp( x, xinput, yinput)
y = interp_spl( x, xinput, yinput)
else:
x = xinput
y = yinput
step = np.log(xinput[1])-np.log(xinput[0])
fwhm = fwhm/clight # inverse of the resolving power
sigma=fwhm/2.0/np.sqrt(-2.0*np.log(0.5))
npoints = 2*int(3*fwhm/2./step)+1
half = npoints * step /2.
xx = np.linspace(-half,half,npoints)
kernel = np.exp(-(xx-np.mean(xx))**2/2./sigma**2)
kernel = kernel/np.sum(kernel)
y = np.convolve(y,kernel,'valid')
edge = int(npoints/2)
x = x[edge:-edge]
#print(xinput.size,x.size,y.size)
if ppr != None:
fac = int(fwhm / step / ppr)
print(fwhm,step,ppr,fac)
subset = np.arange(x.size / fac, dtype=int) * fac
x = x[subset]
y = y[subset]
return(x,y)
|
d4722c87881eca27bac45cd47f84269249591cd0
| 3,640,620
|
def attach_component_to_entity(entity_id, component_name):
# type: (azlmbr.entity.EntityId, str) -> azlmbr.entity.EntityComponentIdPair
"""
Adds the component if not added already.
:param entity_id: EntityId of the entity to attach the component to
:param component_name: name of the component
:return: If successful, returns the EntityComponentIdPair, otherwise returns None.
"""
type_ids_list = editor.EditorComponentAPIBus(
bus.Broadcast, 'FindComponentTypeIdsByEntityType', [component_name], 0)
general.log(f"Components found = {len(type_ids_list)}")
if len(type_ids_list) < 1:
general.log(f"ERROR: A component class with name {component_name} doesn't exist")
return None
elif len(type_ids_list) > 1:
general.log(f"ERROR: Found more than one component classes with same name: {component_name}")
return None
# Before adding the component let's check if it is already attached to the entity.
component_outcome = editor.EditorComponentAPIBus(bus.Broadcast, 'GetComponentOfType', entity_id, type_ids_list[0])
if component_outcome.IsSuccess():
return component_outcome.GetValue() # In this case the value is not a list.
component_outcome = editor.EditorComponentAPIBus(bus.Broadcast, 'AddComponentsOfType', entity_id, type_ids_list)
if component_outcome.IsSuccess():
general.log(f"{component_name} Component added to entity.")
return component_outcome.GetValue()[0]
general.log(f"ERROR: Failed to add component [{component_name}] to entity")
return None
|
f2c29f18ede8eef7accaf19970d18b0a432801ed
| 3,640,621
|
def get_raw_samples(sampling_strategy: sample_entry.Strategy, step: int) -> np.ndarray:
"""
Collects raw samples from database associated with sampling strategy. If the raw samples do not exists in
the database new ones will be created by calling the Sobol function
"""
sampling_strategy.reload()
for raw_sample in sampling_strategy.samples_raw:
if raw_sample.sequence_number == step:
logger.debug(f'Found existing raw sample with sequence #{step}')
return np.array(raw_sample.samples_raw)
logger.debug(f'Creating new raw sample with sequence #{step}')
distribution_dimension = len(sampling_strategy.strategy['distributions'].keys())
samples_raw = sobol(m=sampling_strategy.strategy['settings']['raw sample size'],
dimension=distribution_dimension, sets=1)
samples_raw_id = sampling_interactions.upload_raw_samples(samples_raw, step)
sampling_interactions.add_raw_samples_to_strategy(sampling_strategy, samples_raw_id)
return samples_raw
|
d0b2b4d643767eb1afc65611a78888d2c5b57824
| 3,640,622
|
import yaml
from io import StringIO
def mix_to_dat(probspec,isStringIO=True):
"""
Reads a YAML mix file and generates all of the GMPL dat components associated with
the mix inputs.
Inputs:
ttspec - the tour type spec object created from the mix file
param_name - string name of paramter in GMPL file
non_shiftlen_param_name - string name of non-shift length specific mix parameter key in YAML file
shiftlen_param_name - string name of shift length specific mix parameter key in YAML file
Output:
param tt_shiftlen_min_dys_weeks:=
1 6 1 3
1 6 2 5
1 6 3 5
1 6 4 5
...
"""
# Open the mix file and load it into a YAML object
fn_mix = probspec['reqd_files']['filename_mix']
fin = open(fn_mix,"r")
ttspec = yaml.load(fin)
mixout = StringIO.StringIO()
## print ttspec
## print ttspec['tourtypes']
## print ttspec['tourtypes'][0]
## print ttspec['tourtypes'][0]['min_days_week']
# Get set of shift lengths and order them ascending by length
lenset = set([])
for m in ttspec['tourtypes']:
for s in m['shiftlengths']:
lenset.add(s['numbins'])
lengths = list(lenset)
lengths.sort()
len_param = list_to_param('lengths', lengths)
# Number of shift lengths
n_lengths = size(lengths)
numlen_param = scalar_to_param('n_lengths', n_lengths)
# Number of tour types
n_ttypes = size(ttspec['tourtypes'])
numttypes_param = scalar_to_param('n_tts', n_ttypes)
# Tour type length sets
lenxset = get_length_x_from_mix(ttspec)
lenxset_set = list_to_indexedset('tt_length_x', lenxset)
# Midnight threshold for weekend assignments
midthresholds = [m['midnight_thresh'] for m in ttspec['tourtypes']]
midthresh_param = list_to_param('midnight_thresh', midthresholds)
# Parttime flag and bound
ptflags = [m['is_parttime'] for m in ttspec['tourtypes']]
ptflags_param = list_to_param('tt_parttime', ptflags)
ptfrac = ttspec['max_parttime_frac']
ptfrac_param = scalar_to_param('max_parttime_frac', ptfrac)
# Global start window width
width = ttspec['g_start_window_width']
width_param = scalar_to_param('g_start_window_width', width)
# Lower and upper bounds on number scheduled
if 'opt_files' in probspec and 'filename_ttbounds' in probspec['opt_files']:
fn_ttbnds = probspec['opt_files']['filename_ttbounds']
fin_ttbnds = open(fn_ttbnds,"r")
ttbndsspec = yaml.load(fin_ttbnds)
tt_lb = [m['tt_lb'] for m in ttbndsspec['tourtypes']]
tt_lb_param = list_to_param('tt_lb', tt_lb)
tt_ub = [m['tt_ub'] for m in ttbndsspec['tourtypes']]
tt_ub_param = list_to_param('tt_ub', tt_ub)
else:
tt_lb = [m['tt_lb'] for m in ttspec['tourtypes']]
tt_lb_param = list_to_param('tt_lb', tt_lb)
tt_ub = [m['tt_ub'] for m in ttspec['tourtypes']]
tt_ub_param = list_to_param('tt_ub', tt_ub)
# Cost multiplier
tt_cost_multiplier = [m['tt_cost_multiplier'] for m in ttspec['tourtypes']]
tt_cost_multiplier_param = list_to_param('tt_cost_multiplier',
tt_cost_multiplier)
# Min and max cumulative days and prds worked over the weeks
tt_min_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_min_dys_weeks','min_days_week',
'min_shiftlen_days_week')
tt_max_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_max_dys_weeks','max_days_week',
'max_shiftlen_days_week')
tt_min_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_min_prds_weeks','min_prds_week',
'min_shiftlen_prds_week')
tt_max_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_max_prds_weeks','max_prds_week',
'max_shiftlen_prds_week')
# Min and max days and prds worked over the weeks
# for each shift length workable in the tour type
tt_shiftlen_min_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_dys_weeks','min_days_week',
'min_shiftlen_days_week')
tt_shiftlen_max_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_dys_weeks','max_days_week',
'max_shiftlen_days_week')
tt_shiftlen_min_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_prds_weeks','min_prds_week',
'min_shiftlen_prds_week')
tt_shiftlen_max_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_prds_weeks','max_prds_week',
'max_shiftlen_prds_week')
# Min and max days and prds worked each week
tt_min_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_min_cumul_dys_weeks','min_cumul_days_week',
'min_shiftlen_cumul_days_week')
tt_max_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_max_cumul_dys_weeks','max_cumul_days_week',
'max_shiftlen_cumul_days_week')
tt_min_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_min_cumul_prds_weeks','min_cumul_prds_week',
'min_shiftlen_cumul_prds_week')
tt_max_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_max_cumul_prds_weeks','max_cumul_prds_week',
'max_shiftlen_cumul_prds_week')
# Min and max cumulative days and prds worked over the weeks
# for each shift length workable in the tour type
tt_shiftlen_min_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_cumul_dys_weeks','min_cumul_days_week',
'min_shiftlen_cumul_days_week')
tt_shiftlen_max_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_cumul_dys_weeks','max_cumul_days_week',
'max_shiftlen_cumul_days_week')
tt_shiftlen_min_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_cumul_prds_weeks','min_cumul_prds_week',
'min_shiftlen_cumul_prds_week')
tt_shiftlen_max_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_cumul_prds_weeks','max_cumul_prds_week',
'max_shiftlen_cumul_prds_week')
# Put the parameter pieces together into a single StringIO object
print >>mixout, numlen_param
print >>mixout, len_param
print >>mixout, numttypes_param
print >>mixout, lenxset_set
print >>mixout, midthresh_param
print >>mixout, tt_lb_param
print >>mixout, tt_ub_param
print >>mixout, tt_cost_multiplier_param
print >>mixout, ptflags_param
print >>mixout, ptfrac_param
print >>mixout, width_param
print >>mixout, tt_min_cumul_dys_weeks_param
print >>mixout, tt_max_cumul_dys_weeks_param
print >>mixout, tt_min_cumul_prds_weeks_param
print >>mixout, tt_max_cumul_prds_weeks_param
print >>mixout, tt_min_dys_weeks_param
print >>mixout, tt_max_dys_weeks_param
print >>mixout, tt_min_prds_weeks_param
print >>mixout, tt_max_prds_weeks_param
print >>mixout, tt_shiftlen_min_dys_weeks_param
print >>mixout, tt_shiftlen_max_dys_weeks_param
print >>mixout, tt_shiftlen_min_prds_weeks_param
print >>mixout, tt_shiftlen_max_prds_weeks_param
print >>mixout, tt_shiftlen_min_cumul_dys_weeks_param
print >>mixout, tt_shiftlen_max_cumul_dys_weeks_param
print >>mixout, tt_shiftlen_min_cumul_prds_weeks_param
print >>mixout, tt_shiftlen_max_cumul_prds_weeks_param
# print mixout.getvalue()
if isStringIO:
return mixout.getvalue()
else:
smixout = mixout.read()
return smixout
|
972c1118c8d7af6dc8f9ff87908b1ca7184c880e
| 3,640,623
|
from typing import Any
def get_setting(setting_name: str, default: Any=None) -> Any:
"""
Convenience wrapper to get the value of a setting.
"""
configuration = get_configuration()
if not configuration: # pragma: no cover
raise Exception('get_setting() called before configuration was initialised')
return configuration.get_setting_live(setting_name, default)
|
774ee06824a227ed66357cb46a5277c24ba11f09
| 3,640,624
|
def deceptivemultimodal(x: np.ndarray) -> float:
"""Infinitely many local optima, as we get closer to the optimum."""
assert len(x) >= 2
distance = np.sqrt(x[0]**2 + x[1]**2)
if distance == 0.:
return 0.
angle = np.arctan(x[0] / x[1]) if x[1] != 0. else np.pi / 2.
invdistance = int(1. / distance) if distance > 0. else 0.
if np.abs(np.cos(invdistance) - angle) > 0.1:
return 1.
return float(distance)
|
c08ab425bbc9803fcea9695c46acee71c2455873
| 3,640,625
|
import os
def get_basins_scores(memory_array, binarized_cluster_dict, basinscore_method="default"):
"""
Args:
- memory_array: i.e. xi matrix, will be N x K (one memory from each cluster)
- binarized_cluster_dict: {k: N x M array for k in 0 ... K-1 (i.e. cluster index)}
- basinscore_method: options for different basin scoring algos
(one based on crawling the basin exactly, other via low temp dynamics)
Returns:
- score_dict: {k: M x 1 array for k in 0 ... K-1 (i.e. cluster index)}
"""
assert basinscore_method in ['crawler', 'trajectories']
num_genes, num_clusters = memory_array.shape
cells_each_cluster = [binarized_cluster_dict[idx].shape[1] for idx in range(num_clusters)]
num_cells = np.sum(cells_each_cluster)
print("num_genes, num_clusters, num_cells:\n%d %d %d" % (num_genes, num_clusters, num_cells))
def basin_score_pairwise(basin_k, memory_vector, data_vector):
# OPTION 1 -- is cell in basin yes/no
# OPTION 2 -- is cell in basin - some scalar value e.g. projection onto that mem
# OPTION 3 -- based on compare if data vec in set of basin states (from aux fn)
hd = hamming(memory_vector, data_vector)
if tuple(data_vector) in basin_k[hd]:
print("data_vector in basin_k[hd]")
return 1.0
else:
print("data_vector NOT in basin_k[hd]")
return 0.0
# 1 is build J_ij from Xi
_, a_inv_arr = memory_corr_matrix_and_inv(memory_array, check_invertible=True)
eta = predictivity_matrix(memory_array, a_inv_arr)
intxn_matrix = interaction_matrix(memory_array, a_inv_arr, "projection")
# 2 is score each cell in each cluster based on method
score_dict = {k: 0 for k in range(num_clusters)}
# setup io
io_dict = run_subdir_setup(run_subfolder=ANALYSIS_SUBDIR)
if basinscore_method == 'crawler':
for k in range(num_clusters):
print("Scoring basin for cluster:", k)
binary_cluster_data = binarized_cluster_dict[k]
memory_k = memory_array[:,k]
basin_k = build_basin_states(intxn_matrix, memory_k)
for cell_data in binary_cluster_data.T: # TODO make sure his gives columns (len is N)
print(len(cell_data), num_genes, cell_data.shape)
score_dict[k] += basin_score_pairwise(basin_k, memory_k, cell_data)
print(score_dict)
else:
assert basinscore_method == 'trajectories'
for k in range(num_clusters):
print("Scoring basin for cluster:", k)
#init_conds = binarized_cluster_dict[k]
print("WARNING: only looking at first 10 cells in each cluster")
init_conds = binarized_cluster_dict[k][:,0:10]
trajectories = basin_projection_timeseries(k, memory_array, intxn_matrix, eta, init_conds, io_dict['plotdir'],
num_steps=3, plot=True, flag_write=False)
print(trajectories)
score_dict[k] = np.mean(trajectories[-1,:])
# save to file
scores = [score_dict[k] for k in range(num_clusters)]
np.savetxt(data_folder + os.sep + "scores.txt", scores)
return score_dict, io_dict
|
9b4bf91ed760767444a595fb575c81593189e486
| 3,640,626
|
from aiida.orm import Dict
from aiida_quantumespresso.utils.resources import get_default_options
def generate_inputs_ph(fixture_sandbox, fixture_localhost, fixture_code, generate_remote_data, generate_kpoints_mesh):
"""Generate default inputs for a `PhCalculation."""
def _generate_inputs_ph():
"""Generate default inputs for a `PhCalculation."""
inputs = {
'code': fixture_code('quantumespresso.matdyn'),
'parent_folder': generate_remote_data(fixture_localhost, fixture_sandbox.abspath, 'quantumespresso.pw'),
'qpoints': generate_kpoints_mesh(2),
'parameters': Dict(dict={'INPUTPH': {}}),
'metadata': {
'options': get_default_options()
}
}
return inputs
return _generate_inputs_ph
|
4ab1f46ff08094fccd4197a19ab56c31dc1ac93c
| 3,640,627
|
from urllib.parse import quote
def escape_url(raw):
"""
Escape urls to prevent code injection craziness. (Hopefully.)
"""
return quote(raw, safe="/#:")
|
4eee23f244998d2d2f4abd892a867f2e27f502a2
| 3,640,628
|
def split_sample(labels):
"""
Split the 'Sample' column of a DataFrame into a list.
Parameters
----------
labels: DataFrame
The Dataframe should contain a 'Sample' column for splitting.
Returns
-------
DataFrame
Updated DataFrame has 'Sample' column with a list of strings.
"""
sample_names = labels["Sample"].str.split(" ", n=1, expand=False)
labels['Sample'] = sample_names
return labels
|
483f1b78e07a2156aa3e48ae6c1f5ce41f5e60fe
| 3,640,629
|
def pmi_odds(pnx, pn, nnx, nn):
"""
Computes the PMI with odds
Args:
pnx (int): number of POSITIVE news with the term x
pn (int): number of POSITIVE news
nnx (int): number of NEGATIVE news with the term x
nn (int): number of NEGATIVE news
Returns:
float: PMI
"""
#print (pnx, pn, nnx, nn)
return _pmi_odds_(p_p(pnx, pn), p_n(nnx, nn))
|
5d4786f477fb12051a5a56887a7a7573aeab0802
| 3,640,630
|
def berDecodeLength(m, offset=0):
"""
Return a tuple of (length, lengthLength).
m must be atleast one byte long.
"""
l = ber2int(m[offset + 0:offset + 1])
ll = 1
if l & 0x80:
ll = 1 + (l & 0x7F)
need(m, offset + ll)
l = ber2int(m[offset + 1:offset + ll], signed=0)
return (l, ll)
|
e93252966e370088274f62bd512d59062e7431b2
| 3,640,631
|
def hasAspect(obj1, obj2, aspList):
""" Returns if there is an aspect between objects
considering a list of possible aspect types.
"""
aspType = aspectType(obj1, obj2, aspList)
return aspType != const.NO_ASPECT
|
71907043900d080f2254557fe0bd2420b9bf9ac3
| 3,640,632
|
def gen_decomposition(denovo_name, basis_names, weights, output_path, project, \
mtype, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \
reconstruction=False, statistics=None, sig_version=None, custom_text=None):
"""
Generate the correct plot based on mtype.
Parameters:
----------
denovo_name: (String) Name of denovo signature
basis_names: (List of Strings) Names of basis signatures
weights: (List of Strings) Percentile contribution for each basis signature
output_path: (String) Path to existing output directory
project: (String) Project name appended to file names
mtype: (String) The context 'mtype_options' has valid values
denovo_plots_dict (Dictionary) Signatures are keys, ByteIO plots are values
basis_plots_dict (Dictionary) Signatures are keys, ByteIO plots are values
reconstruction_plot_dict (Dictionary) Signatures are keys, ByteIO plots are values
reconstruction: (Boolean) True to generate plot w/ reconstruction
statistics: (Pandas Dataframe) Output from calculate_similarities()
"""
if mtype == "6":
print("Need to add support for SBS6 Decomposition")
elif mtype == "24":
print("Need to add support for SBS24 Decomposition")
elif mtype == "96":
byte_plot=spd_96.gen_decomposition(denovo_name, basis_names, weights, output_path, \
project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \
reconstruction, statistics, sig_version, custom_text)
return byte_plot
elif mtype == "288":
byte_plot=spd_288.gen_decomposition(denovo_name, basis_names, weights, output_path, \
project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \
reconstruction, statistics, sig_version, custom_text)
return byte_plot
elif mtype == "384":
print("Need to add support for SBS24 Decomposition")
elif mtype == "1536":
byte_plot=spd_1536.gen_decomposition(denovo_name, basis_names, weights, output_path, \
project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \
reconstruction, statistics, sig_version, custom_text)
return byte_plot
elif mtype == "6144":
print("Need to add support for SBS6144 Decomposition")
elif mtype == "28":
print("Need to add support for ID28 Decomposition")
elif mtype == "83":
byte_plot=spd_83.gen_decomposition(denovo_name, basis_names, weights, output_path, \
project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \
reconstruction, statistics, sig_version, custom_text)
return byte_plot
elif mtype == "415":
print("Need to add support for ID415 Decomposition")
elif mtype == "78":
byte_plot=spd_78.gen_decomposition(denovo_name, basis_names, weights, output_path, \
project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \
reconstruction, statistics, sig_version, custom_text)
return byte_plot
elif mtype == "186":
print("Need to add support for DBS186 Decomposition")
elif mtype == "1248":
print("Need to add support for DBS1248 Decomposition")
elif mtype == "2976":
print("Need to add support for DBS2976 Decomposition")
elif mtype == "48":
byte_plot=cnv_48.gen_decomposition(denovo_name, basis_names, weights, output_path, \
project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \
reconstruction, statistics, sig_version, custom_text)
return byte_plot
|
9bb65728017a3f9f2a64ae94cb1ae7e15268c93b
| 3,640,633
|
from unittest.mock import Mock
def org(gh):
"""Creates an Org instance and adds an spy attribute to check for calls"""
ret = Organization(gh, name=ORG_NAME)
ret._gh = Mock(wraps=ret._gh)
ret.spy = ret._gh
return ret
|
017d044015ff60c91742ea2eb12e2cd7720328c6
| 3,640,634
|
def merge_regions(
out_path: str, sample1_id: int, regions1_file: File, sample2_id: int, regions2_file: File
) -> File:
"""
Merge two sorted region files into one.
"""
def iter_points(regions):
for start, end, depth in regions:
yield (start, "start", depth)
yield (end, "end", -depth)
def iter_regions(points):
first_point = next(points, None)
if first_point is None:
return
start, _, depth = first_point
for pos, kind, delta in points:
if pos > start:
yield (start, pos, depth)
start = pos
depth += delta
regions1 = read_regions(regions1_file)
regions2 = read_regions(regions2_file)
points1 = iter_points(regions1)
points2 = iter_points(regions2)
points = iter_merge(points1, points2)
regions = iter_regions(points)
region_path = f"{out_path}/regions/{sample1_id}_{sample2_id}.regions"
return write_regions(region_path, regions)
|
eeb4b8bf73df45ae9d6af39d0d8c9db04251da41
| 3,640,635
|
import hashlib
def get_text_hexdigest(data):
"""returns md5 hexadecimal checksum of string/unicode data
NOTE
----
The md5 sum of get_text_hexdigest can differ from get_file_hexdigest.
This will occur if the line ending character differs from being read in
'rb' versus 'r' modes.
"""
data_class = data.__class__
# fmt: off
if data_class in ("".__class__, u"".__class__):
data = data.encode("utf-8")
elif data.__class__ != b"".__class__:
raise TypeError("can only checksum string, unicode or bytes data")
# fmt: on
md5 = hashlib.md5()
md5.update(data)
return md5.hexdigest()
|
762115178406c0b49080b3076859a3d1c13ad356
| 3,640,636
|
import json
def recipe(recipe_id):
"""
Display the recipe on-page for each recipe id that was requested
"""
# Update the rating if it's an AJAX call
if request.method == "POST":
# check if user is login in order to proceed with rating
if not session:
return json.dumps({'status': 'not logged in'})
# check if the recipe id hasn't been change
if not is_valid(recipe_id):
return json.dumps({'status': 'error'})
# the query for the specific recipe that has to be rated
recipe = mongo.db.recipes.find_one({"_id": ObjectId(recipe_id)})
# if user want to rate it's own recipe return denied
if recipe["created_by"] == session["user"]:
return json.dumps({'status': 'denied'})
# check if user didn't altered the form value
new_rating = request.form.get("stars")
if int(new_rating) > 0 and int(new_rating) <= 5:
# update the recipe rating
rating = update_recipe_rating(mongo, new_rating, recipe)
return json.dumps({'status': 'success', 'rating': rating})
return json.dumps({'status': 'error'})
# check if the recipe id hasn't been change
if not is_valid(recipe_id):
return redirect(url_for('error', code=404))
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# the query for the specific recipe that the user wants to access
recipe = mongo.db.recipes.find_one({"_id": ObjectId(recipe_id)})
# added in case the owner decide to delete the recipe while
# other users might by on this recipe page and cause an error
# after refresh the page as we access the recipe["recipe_name"] on page_set
# due to access None["recipe_name"]
if not recipe:
return redirect(url_for('error', code=404))
# set up the page_set object
page_set = {
"title": recipe["recipe_name"].title(),
"type": "recipe"
}
return render_template("pages/recipe.html",
recipe=recipe,
page_set=page_set,
nav_categories=nav_categories)
|
60db178d071d1880410e4e752ec484c4b59b0f96
| 3,640,637
|
def api_program_ordering(request, program):
"""Returns program-wide RF-aware ordering (used after indicator deletion on program page)"""
try:
data = ProgramPageIndicatorUpdateSerializer.load_for_pk(program).data
except Program.DoesNotExist:
logger.warning('attempt to access program page ordering for bad pk {}'.format(program))
return JsonResponse({'success': False, 'msg': 'bad Program PK'})
return JsonResponse(data)
|
d4966689b0ea65885456ad7b52cf5dfd845ac822
| 3,640,638
|
def isinteger(x):
"""
determine if a string can be converted to an integer
"""
try:
a = int(x)
except ValueError:
return False
else:
return True
|
180cea2f61733ada26b20ff046ae26deffa5d396
| 3,640,639
|
from typing import Tuple
from typing import List
def unmarshal_tools_pcr_values(
buf: bytes, selections: TPML_PCR_SELECTION
) -> Tuple[int, List[bytes]]:
"""Unmarshal PCR digests from tpm2_quote using the values format.
Args:
buf (bytes): content of tpm2_quote PCR output.
selections (TPML_PCR_SELECTION): The selected PCRs.
Returns:
A tuple of the number of bytes consumed from buf and a list of digests.
"""
trs = list()
for sel in selections:
digsize = _get_digest_size(sel.hash)
pb = bytes(reversed(bytes(sel.pcrSelect)))
pi = int.from_bytes(pb, "big")
for i in range(0, sel.sizeofSelect * 8):
if pi & (1 << i):
trs.append(digsize)
n = 0
digs = list()
for s in trs:
dig = buf[:s]
n += s
digs.append(dig)
buf = buf[s:]
return n, digs
|
3a5b9dd36ca787026bb9bade4b5e5cc175add9e9
| 3,640,640
|
def new_topic(request):
"""添加新主题"""
if request.method != 'POST':
#未提交数据,创建一个新表单
form = TopicForm()
else:
#POST提交的数据,对数据进行处理
form = TopicForm(request.POST)
if form.is_valid():
new_topic = form.save(commit = False)
new_topic.owner = request.user
new_topic.save()
form.save()
return HttpResponseRedirect(reverse('learning_logs:topics'))
context = {'form':form}
return render(request,'learning_logs/new_topic.html',context)
|
8d41cb1926d809742e89ec7e79ef7bd1ed14443c
| 3,640,641
|
from typing import Union
from typing import List
from typing import Any
from typing import Optional
def address(lst: Union[List[Any], str], dim: Optional[int] = None) -> Address:
"""
Similar to :meth:`Address.fromList`, except the name is shorter, and
the dimension is inferred if possible. Otherwise, an exception is thrown.
Here are some examples:
>>> address('*')
Address(*, 0)
>>> address([['*'], [], ['*', '*']])
Address([[*][][**]], 2)
"""
def dimension(k: Any) -> Optional[int]:
"""
Tries to infer the dimension.
"""
if k == []:
return None
elif k == '*':
return 0
elif isinstance(k, list):
i = None # type: Optional[int]
for a in k:
j = dimension(a)
if i is None:
i = j
elif j is not None and i != j: # Contradictory dim inferrences
return None
if i is None:
return None
else:
return i + 1
else:
raise NotImplementedError("[Address from list] Incompatible type: "
"a list representation of an address "
"(LA) for short, is either the string "
"'*', or a list of LA")
if isinstance(lst, str):
if lst == '*':
return Address.epsilon(0)
else:
raise DerivationError(
"Address from list",
"The following expression does not represent an address: "
"{lst}",
lst=lst)
elif dim is not None:
return Address.fromList(lst, dim)
d = dimension(lst)
if d is None:
raise DerivationError("Address from list",
"Cannot infer dimension of list {lst}",
lst=lst)
else:
return Address.fromList(lst, d)
|
ef9e21bb3ef98b12c8ad02c75ccf2cbf6552fd44
| 3,640,642
|
from .views.entrance_exam import remove_entrance_exam_milestone_reference
from .views.entrance_exam import add_entrance_exam_milestone
from sys import path
import base64
import os
import shutil
import tarfile
def import_olx(self, user_id, course_key_string, archive_path, archive_name, language):
"""
Import a course or library from a provided OLX .tar.gz archive.
"""
current_step = 'Unpacking'
courselike_key = CourseKey.from_string(course_key_string)
set_code_owner_attribute_from_module(__name__)
set_custom_attributes_for_course_key(courselike_key)
log_prefix = f'Course import {courselike_key}'
self.status.set_state(current_step)
data_root = path(settings.GITHUB_REPO_ROOT)
subdir = base64.urlsafe_b64encode(repr(courselike_key).encode('utf-8')).decode('utf-8')
course_dir = data_root / subdir
def validate_user():
"""Validate if the user exists otherwise log error. """
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist as exc:
with translation_language(language):
self.status.fail(UserErrors.USER_PERMISSION_DENIED)
LOGGER.error(f'{log_prefix}: Unknown User: {user_id}')
monitor_import_failure(courselike_key, current_step, exception=exc)
return
def user_has_access(user):
"""Return True if user has studio write access to the given course."""
has_access = has_course_author_access(user, courselike_key)
if not has_access:
message = f'User permission denied: {user.username}'
with translation_language(language):
self.status.fail(UserErrors.COURSE_PERMISSION_DENIED)
LOGGER.error(f'{log_prefix}: {message}')
monitor_import_failure(courselike_key, current_step, message=message)
return has_access
def file_is_supported():
"""Check if it is a supported file."""
file_is_valid = archive_name.endswith('.tar.gz')
if not file_is_valid:
message = f'Unsupported file {archive_name}'
with translation_language(language):
self.status.fail(UserErrors.INVALID_FILE_TYPE)
LOGGER.error(f'{log_prefix}: {message}')
monitor_import_failure(courselike_key, current_step, message=message)
return file_is_valid
def file_exists_in_storage():
"""Verify archive path exists in storage."""
archive_path_exists = course_import_export_storage.exists(archive_path)
if not archive_path_exists:
message = f'Uploaded file {archive_path} not found'
with translation_language(language):
self.status.fail(UserErrors.FILE_NOT_FOUND)
LOGGER.error(f'{log_prefix}: {message}')
monitor_import_failure(courselike_key, current_step, message=message)
return archive_path_exists
def verify_root_name_exists(course_dir, root_name):
"""Verify root xml file exists."""
def get_all_files(directory):
"""
For each file in the directory, yield a 2-tuple of (file-name,
directory-path)
"""
for directory_path, _dirnames, filenames in os.walk(directory):
for filename in filenames:
yield (filename, directory_path)
def get_dir_for_filename(directory, filename):
"""
Returns the directory path for the first file found in the directory
with the given name. If there is no file in the directory with
the specified name, return None.
"""
for name, directory_path in get_all_files(directory):
if name == filename:
return directory_path
return None
dirpath = get_dir_for_filename(course_dir, root_name)
if not dirpath:
message = UserErrors.FILE_MISSING.format(root_name)
with translation_language(language):
self.status.fail(message)
LOGGER.error(f'{log_prefix}: {message}')
monitor_import_failure(courselike_key, current_step, message=message)
return
return dirpath
user = validate_user()
if not user:
return
if not user_has_access(user):
return
if not file_is_supported():
return
is_library = isinstance(courselike_key, LibraryLocator)
is_course = not is_library
if is_library:
root_name = LIBRARY_ROOT
courselike_module = modulestore().get_library(courselike_key)
import_func = import_library_from_xml
else:
root_name = COURSE_ROOT
courselike_module = modulestore().get_course(courselike_key)
import_func = import_course_from_xml
# Locate the uploaded OLX archive (and download it from S3 if necessary)
# Do everything in a try-except block to make sure everything is properly cleaned up.
try:
LOGGER.info(f'{log_prefix}: unpacking step started')
temp_filepath = course_dir / get_valid_filename(archive_name)
if not course_dir.isdir():
os.mkdir(course_dir)
LOGGER.info(f'{log_prefix}: importing course to {temp_filepath}')
# Copy the OLX archive from where it was uploaded to (S3, Swift, file system, etc.)
if not file_exists_in_storage():
return
with course_import_export_storage.open(archive_path, 'rb') as source:
with open(temp_filepath, 'wb') as destination:
def read_chunk():
"""
Read and return a sequence of bytes from the source file.
"""
return source.read(FILE_READ_CHUNK)
for chunk in iter(read_chunk, b''):
destination.write(chunk)
LOGGER.info(f'{log_prefix}: Download from storage complete')
# Delete from source location
course_import_export_storage.delete(archive_path)
# If the course has an entrance exam then remove it and its corresponding milestone.
# current course state before import.
if is_course:
if courselike_module.entrance_exam_enabled:
fake_request = RequestFactory().get('/')
fake_request.user = user
# TODO: Is this really ok? Seems dangerous for a live course
remove_entrance_exam_milestone_reference(fake_request, courselike_key)
LOGGER.info(f'{log_prefix}: entrance exam milestone content reference has been removed')
# Send errors to client with stage at which error occurred.
except Exception as exception: # pylint: disable=broad-except
if course_dir.isdir():
shutil.rmtree(course_dir)
LOGGER.info(f'{log_prefix}: Temp data cleared')
self.status.fail(UserErrors.UNKNOWN_ERROR_IN_UNPACKING)
LOGGER.exception(f'{log_prefix}: Unknown error while unpacking', exc_info=True)
monitor_import_failure(courselike_key, current_step, exception=exception)
return
# try-finally block for proper clean up after receiving file.
try:
tar_file = tarfile.open(temp_filepath) # lint-amnesty, pylint: disable=consider-using-with
try:
safetar_extractall(tar_file, (course_dir + '/'))
except SuspiciousOperation as exc:
with translation_language(language):
self.status.fail(UserErrors.UNSAFE_TAR_FILE)
LOGGER.error(f'{log_prefix}: Unsafe tar file')
monitor_import_failure(courselike_key, current_step, exception=exc)
return
finally:
tar_file.close()
current_step = 'Verifying'
self.status.set_state(current_step)
self.status.increment_completed_steps()
LOGGER.info(f'{log_prefix}: Uploaded file extracted. Verification step started')
dirpath = verify_root_name_exists(course_dir, root_name)
if not dirpath:
return
if not validate_course_olx(courselike_key, dirpath, self.status):
return
dirpath = os.path.relpath(dirpath, data_root)
current_step = 'Updating'
self.status.set_state(current_step)
self.status.increment_completed_steps()
LOGGER.info(f'{log_prefix}: Extracted file verified. Updating course started')
courselike_items = import_func(
modulestore(), user.id,
settings.GITHUB_REPO_ROOT, [dirpath],
load_error_modules=False,
static_content_store=contentstore(),
target_id=courselike_key,
verbose=True,
)
new_location = courselike_items[0].location
LOGGER.debug('new course at %s', new_location)
LOGGER.info(f'{log_prefix}: Course import successful')
set_custom_attribute('course_import_completed', True)
except (CourseImportException, InvalidProctoringProvider, DuplicateCourseError) as known_exe:
handle_course_import_exception(courselike_key, known_exe, self.status)
except Exception as exception: # pylint: disable=broad-except
handle_course_import_exception(courselike_key, exception, self.status, known=False)
finally:
if course_dir.isdir():
shutil.rmtree(course_dir)
LOGGER.info(f'{log_prefix}: Temp data cleared')
if self.status.state == 'Updating' and is_course:
# Reload the course so we have the latest state
course = modulestore().get_course(courselike_key)
if course.entrance_exam_enabled:
entrance_exam_chapter = modulestore().get_items(
course.id,
qualifiers={'category': 'chapter'},
settings={'is_entrance_exam': True}
)[0]
metadata = {'entrance_exam_id': str(entrance_exam_chapter.location)}
CourseMetadata.update_from_dict(metadata, course, user)
add_entrance_exam_milestone(course.id, entrance_exam_chapter)
LOGGER.info(f'Course import {course.id}: Entrance exam imported')
|
39a83c292e82e5b9c86f793195f140f6249dc57e
| 3,640,643
|
import io
def create_scenario_dataframes_geco(scenario):
"""
Reads GECO dataset and creates a dataframe of the given scenario
"""
df_sc = pd.read_csv(io["scenario_geco_path"])
df_sc_europe = df_sc.loc[df_sc["Country"] == "EU28"]
df_scenario = df_sc_europe.loc[df_sc_europe["Scenario"] == scenario]
return df_scenario
|
8a17d452feeb506bc2c2bf61a91e8473f2097649
| 3,640,644
|
def escape_env_var(varname):
"""
Convert a string to a form suitable for use as an environment variable.
The result will be all uppercase, and will have all invalid characters
replaced by an underscore.
The result will match the following regex: [a-zA-Z_][a-zA-Z0-9_]*
Example:
"my.private.registry/cat/image" will become
"MY_PRIVATE_REGISTRY_CAT_IMAGE"
"""
varname = list(varname.upper())
if not varname[0].isalpha():
varname[0] = "_"
for i, c in enumerate(varname):
if not c.isalnum() and c != "_":
varname[i] = "_"
return "".join(varname)
|
c1e57ff3b9648e93a540202f00d0325f91bccde1
| 3,640,645
|
def rms(signal):
"""
rms(signal)
Measures root mean square of a signal
Parameters
----------
signal : 1D numpy array
"""
return np.sqrt(np.mean(np.square(signal)))
|
6643ad464b4048ad71c7fb115e97b42d58a84a9c
| 3,640,646
|
import os
def get_template_page(page):
"""method used to get the a page based on the theme
it will check the options.theme defined theme for the page first and if it isnt found
it will fallback to options.theme_dir/cling for the template page
"""
templates = ['%s.html' % os.path.join(options.theme_dir, 'cling', page)]
page_template = templates[0]
if options.theme is not None:
templates.append('%s.html' % os.path.join(options.theme_dir, options.theme, page))
for pt in reversed(templates):
if os.path.isfile(pt):
page_template = pt
break
return page_template
|
b8e642f9513300f984b5e701b1d18d07d1ab6554
| 3,640,647
|
def get_config(
config_path, trained: bool = False, runner="d2go.runner.GeneralizedRCNNRunner"
):
"""
Returns a config object for a model in model zoo.
Args:
config_path (str): config file name relative to d2go's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
trained (bool): If True, will set ``MODEL.WEIGHTS`` to trained model zoo weights.
If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used
instead; this will typically (though not always) initialize a subset of weights using
an ImageNet pre-trained model, while randomly initializing the other weights.
Returns:
CfgNode: a config object
"""
cfg_file = get_config_file(config_path)
runner = create_runner(runner)
cfg = runner.get_default_cfg()
cfg.merge_from_file(cfg_file)
if trained:
cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path)
return cfg
|
e6d2b57bcadd833d625bd0a291fbb8de9d333624
| 3,640,648
|
from typing import Optional
def make_game(
width: int = defaults.WIDTH,
height: int = defaults.HEIGHT,
max_rooms: int = defaults.MAX_ROOMS,
seed: Optional[int] = defaults.SEED,
slippery_coefficient: float = defaults.SLIPPERY_COEFFICIENT,
default_reward: float = defaults.DEFAULT_REWARD,
goal_reward: float = defaults.GOAL_REWARD,
catastrophe_reward: float = defaults.CATASTROPHE_REWARD,
) -> Engine:
"""Builds a gridworld `pycolab` game.
Args:
Returns:
A `pycolab` game.
"""
maze = labmaze.RandomMaze(
width=width,
height=height,
max_rooms=max_rooms,
random_seed=seed,
spawns_per_room=1,
spawn_token="P",
objects_per_room=1,
object_token="G",
)
# Keep only one agent position.
agent_positions = np.asarray(np.where(maze.entity_layer == "P"))
I_p = np.random.choice(agent_positions.shape[-1])
maze.entity_layer[maze.entity_layer == "P"] = " "
maze.entity_layer[tuple(agent_positions[:, I_p])] = "P"
# Keep only one goal.
goal_positions = np.asarray(np.where(maze.entity_layer == "G"))
I_g, I_c = np.random.choice(goal_positions.shape[-1], size=2, replace=False)
maze.entity_layer[maze.entity_layer == "G"] = " "
maze.entity_layer[tuple(goal_positions[:, I_g])] = "G"
maze.entity_layer[tuple(goal_positions[:, I_c])] = "C"
art = str(maze.entity_layer).split("\n")[:-1]
sprites = {
"P":
ascii_art.Partial(
AgentSprite,
default_reward=default_reward,
slippery_coefficient=slippery_coefficient,
seed=seed,
)
}
drapes = {
"G":
ascii_art.Partial(
BoxDrape,
reward=goal_reward,
terminal=True,
),
"C":
ascii_art.Partial(
BoxDrape,
reward=catastrophe_reward,
terminal=True,
)
}
return ascii_art.ascii_art_to_game(
art,
what_lies_beneath=" ",
sprites=sprites,
drapes=drapes,
)
|
908c772cdc3af5a891bce8c169d744e335da6e61
| 3,640,649
|
def weighted_var(x, weights=None):
"""Unbiased weighted variance (sample variance) for the components of x.
The weights are assumed to be non random (reliability weights).
Parameters
----------
x : np.ndarray
1d or 2d with observations in rows
weights : np.ndarray or None
1d array of weights. None defaults to standard variance.
Returns
-------
s2 : np.array
1d vector of component variances
References
----------
[1] https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
"""
if weights is None:
weights = np.ones(len(x))
V_1 = np.sum(weights)
V_2 = np.sum(weights ** 2)
xbar = np.average(x, weights=weights, axis=0)
numerator = weights.dot((x - xbar) ** 2)
s2 = numerator / (V_1 - (V_2 / V_1))
return s2
|
2166b214351da22117bf395fda950f1c79ccf0d1
| 3,640,650
|
def start_detailed_result_worker_route():
"""
Add detailed result worker if not exist
:return: JSON
"""
# check if worker already exist
if check_worker_result(RABBITMQ_DETAILED_RESULT_QUEUE_NAME) == env.HTML_STATUS.OK.value:
return jsonify(status=env.HTML_STATUS.OK.value)
if 'db_name' in request.json:
db_name = request.json["db_name"]
else:
return jsonify(status=env.HTML_STATUS.ERROR.value, mesasge="No database selected")
Process(target=start_result_worker, args=(RABBITMQ_DETAILED_RESULT_QUEUE_NAME,
DB_DETAILED_RESULT_COLLECTION_NAME, db_name)).start()
return jsonify(status=env.HTML_STATUS.OK.value, detailed_result_worker=check_worker_result(RABBITMQ_DETAILED_RESULT_QUEUE_NAME))
|
567595574a09ce99f3feb11988bed0ba403b04cd
| 3,640,651
|
def _find_next_pickup_item(not_visited_neighbors, array_of_edges_from_node):
"""
Args:
not_visited_neighbors:
array_of_edges_from_node:
Returns:
"""
# last node in visited_nodes is where the traveling salesman is.
cheapest_path = np.argmin(
array_of_edges_from_node[not_visited_neighbors])
return not_visited_neighbors[cheapest_path]
|
06dc80e09d5b87cbc94558bee53677c887844b4b
| 3,640,652
|
def deformable_conv(input,
offset,
mask,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
deformable_groups=None,
im2col_step=None,
param_attr=None,
bias_attr=None,
modulated=True,
name=None):
"""
:api_attr: Static Graph
**Deformable Convolution op**
Compute 2-D deformable convolution on 4-D input.
Given input image x, output feature map y, the deformable convolution operation can be expressed as follow:
Deformable Convolution v2:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}
Deformable Convolution v1:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}
Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location,
Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results
<https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
Offset shape: :math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})`
Mask shape: :math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Args:
input (Variable): The input image with [N, C, H, W] format. A Tensor with type
float32, float64.
offset (Variable): The input coordinate offset of deformable convolution layer.
A Tensor with type float32, float64.
Mask (Variable, Optional): The input mask of deformable convolution layer.
A Tensor with type float32, float64. It should be None when you use
deformable convolution v1.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|tuple): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
stride (int|tuple): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: stride = 1.
padding (int|tuple): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: padding = 0.
dilation (int|tuple): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: dilation = 1.
groups (int): The groups number of the deformable conv layer. According to
grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1.
deformable_groups (int): The number of deformable group partitions.
Default: deformable_groups = 1.
im2col_step (int): Maximum number of images per im2col computation;
The total batch size should be devisable by this value or smaller
than this value; if you face out of memory problem, you can try
to use a smaller value here.
Default: im2col_step = 64.
param_attr (ParamAttr, Optional): The parameter attribute for learnable parameters/weights
of deformable conv. If it is set to None or one attribute of ParamAttr,
deformable conv will create ParamAttr as param_attr.
If the Initializer of the param_attr is not set, the parameter is
initialized with :math:`Normal(0.0, std)`, and the
:math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool, Optional): The parameter attribute for the bias of
deformable conv layer. If it is set to False, no bias will be added
to the output units. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
modulated (bool): Make sure which version should be used between v1 and v2, where v2 is \
used while True. Default: True.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The tensor variable storing the deformable convolution \
result. A Tensor with type float32, float64.
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
#deformable conv v2:
import paddle.fluid as fluid
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
mask = fluid.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = fluid.layers.deformable_conv(input=data, offset=offset, mask=mask,
num_filters=2, filter_size=filter_size, padding=1, modulated=True)
#deformable conv v1:
import paddle.fluid as fluid
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = fluid.layers.deformable_conv(input=data, offset=offset, mask=None,
num_filters=2, filter_size=filter_size, padding=1, modulated=False)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'deformable_conv')
check_variable_and_dtype(offset, "offset", ['float32', 'float64'],
'deformable_conv')
check_type(mask, 'mask', (Variable, type(None)), 'deformable_conv')
num_channels = input.shape[1]
assert param_attr is not False, "param_attr should not be False here."
helper = LayerHelper('deformable_conv', **locals())
dtype = helper.input_dtype()
if not isinstance(input, Variable):
raise TypeError("Input of deformable_conv must be Variable")
if not isinstance(offset, Variable):
raise TypeError("Input Offset of deformable_conv must be Variable")
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 2, 'filter_size')
stride = utils.convert_to_list(stride, 2, 'stride')
padding = utils.convert_to_list(padding, 2, 'padding')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
input_shape = input.shape
filter_shape = [num_filters, int(num_filter_channels)] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
filter_param = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer())
pre_bias = helper.create_variable_for_type_inference(dtype)
if modulated:
helper.append_op(
type='deformable_conv',
inputs={
'Input': input,
'Filter': filter_param,
'Offset': offset,
'Mask': mask,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'deformable_groups': deformable_groups,
'im2col_step': im2col_step,
})
else:
helper.append_op(
type='deformable_conv_v1',
inputs={
'Input': input,
'Filter': filter_param,
'Offset': offset,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'deformable_groups': deformable_groups,
'im2col_step': im2col_step,
})
output = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
return output
|
0dce5c2333a0a3dcaa568a85f3a6dec1536d2cfb
| 3,640,653
|
def ieee():
"""IEEE fixture."""
return t.EUI64.deserialize(b"ieeeaddr")[0]
|
b00b13bb16c74bc96e52ad067dc0c523f5b5a249
| 3,640,654
|
def is_in(a_list):
"""Returns a *function* that checks if its argument is in list.
Avoids recalculation of list at every comparison."""
def check(arg): return arg in a_list
return check
|
34afbc269c164f0e095b1cbbf4e9576bafc7a9e1
| 3,640,655
|
def get_log_record_extra_fields(record):
"""Taken from `common` repo logging module"""
# The list contains all the attributes listed in
# http://docs.python.org/library/logging.html#logrecord-attributes
skip_list = (
'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename',
'funcName', 'id', 'levelname', 'levelno', 'lineno', 'module',
'msecs', 'msecs', 'message', 'msg', 'name', 'pathname', 'process',
'processName', 'relativeCreated', 'thread', 'threadName', 'extra',
'stack_info', 'exc_type', 'exc_msg')
easy_types = (str, bool, dict, float, int, list, type(None))
fields = {}
for key, value in record.__dict__.items():
if key not in skip_list:
if isinstance(value, easy_types):
fields[key] = value
else:
fields[key] = repr(value)
return fields
|
95fe6a74cd169c14ac32728f0bb1d16a2aa9e874
| 3,640,656
|
def ldap_is_intromember(member):
"""
:param member: A CSHMember instance
"""
return _ldap_is_member_of_group(member, 'intromembers')
|
d858afac4870cacc18be79a0b2d6d7d51dd33e07
| 3,640,657
|
def details(request, slug):
"""
Show product set
"""
productset = get_object_or_404(models.ProductSet, slug=slug)
context = {}
response = []
variant_instances = productset.variant_instances()
signals.product_view.send(
sender=type(productset), instances=variant_instances,
request=request, response=response, extra_context=context)
if len(response) == 1:
return response[0]
elif len(response) > 1:
raise ValueError, "Multiple responses returned."
context['variants'] = variant_instances
context['productset'] = productset
return direct_to_template(request,
'satchless/productset/details.html',
context)
|
9ac9b3f975a6501cfb94dd5d545c29c63a47a125
| 3,640,658
|
def applies(platform_string, to='current'):
""" Returns True if the given platform string applies to the platform
specified by 'to'."""
def _parse_component(component):
component = component.strip()
parts = component.split("-")
if len(parts) == 1:
if parts[0] in VALID_PLATFORMS_FILTER:
return parts[0], None
elif parts[0] in _ARCHBITS_TO_ARCH:
return "all", parts[0]
else:
raise ValueError(
"Invalid filter string: '{}'".format(component)
)
elif len(parts) == 2:
if (
parts[0] not in VALID_PLATFORMS_FILTER
or parts[1] not in _ARCHBITS_TO_ARCH
):
raise ValueError(
"Invalid filter string: '{}'".format(component)
)
return parts[0], parts[1]
else:
raise ValueError(
"Invalid filter string: '{}'".format(component)
)
def _are_compatible(short_left, short_right):
return short_left == short_right or \
short_left == "rh" and short_right.startswith("rh") \
or short_right == "rh" and short_left.startswith("rh") \
or short_left == "all"
if isinstance(to, str):
if to == 'current':
full = EPDPlatform.from_running_system()
to_platform = full.platform_name
to_arch_bits = full.arch_bits
elif '-' in to:
full = EPDPlatform.from_epd_string(to)
to_platform = full.platform_name
to_arch_bits = full.arch_bits
else:
if not (to in PLATFORM_NAMES or to == 'rh'):
raise ValueError("Invalid 'to' argument: {0!r}".format(to))
to_platform = to
to_arch_bits = None
else:
to_platform = to.platform_name
to_arch_bits = to.arch_bits
conditions = []
platform_string = platform_string.strip()
if platform_string.startswith("!"):
invert = True
platform_string = platform_string[1:]
else:
invert = False
platform_strings = [s for s in platform_string.split(",")]
for platform_string in platform_strings:
short, bits = _parse_component(platform_string)
if _are_compatible(short, to_platform):
if bits is None:
conditions.append(True)
else:
conditions.append(bits == to_arch_bits or to_arch_bits is None)
else:
conditions.append(False)
if invert:
return not any(conditions)
else:
return any(conditions)
|
4692fb0d302948e07a1b2586f614dfcfa5618503
| 3,640,659
|
import threading
def _GetClassLock(cls):
"""Returns the lock associated with the class."""
with _CLASS_LOCKS_LOCK:
if cls not in _CLASS_LOCKS:
_CLASS_LOCKS[cls] = threading.Lock()
return _CLASS_LOCKS[cls]
|
98b034a0984431a752801407bfbc5e5694ad44ae
| 3,640,660
|
from apysc._expression import event_handler_scope
def _get_expression_table_name() -> TableName:
"""
Get a expression table name. This value will be switched whether
current scope is event handler's one or not.
Returns
-------
table_name : str
Target expression table name.
"""
event_handler_scope_count: int = \
event_handler_scope.get_current_event_handler_scope_count()
if event_handler_scope_count == 0:
return TableName.EXPRESSION_NORMAL
return TableName.EXPRESSION_HANDLER
|
61d3207d51264e876a472cbd2eea43de730508ac
| 3,640,661
|
def measure_option(mode,
number=1,
repeat=1,
timeout=60,
parallel_num=1,
pack_size=1,
check_correctness=False,
build_option=None,
replay_db=None,
save_to_replay_db=True,
rpc_device_key=None,
rpc_priority=1,
rpc_timeout=60,
rpc_tracker_addr=None,
use_ndk=False,
custom_measure_batch=None):
"""Configure how to do measurement
Parameters
----------
mode: str
'local': use the local device for measurement. In this mode,
the tuner starts a tracker and a RPC server silently for the user.
'rpc': request devices for measurement from rpc tracker. In this mode,
you should start a rpc tracker in a separate processing.
'custom': use custom measure function
'local-nofork': use local device for measure but does not use multiprocessing.
This mode is suitable for debug, but does not support timeout and parallel.
number : int, optional
Number of times to do the measurement for average
repeat : int, optional
Number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up. The returned result contains `repeat` costs,
each of which is the average of `number` test run.
timeout: int, optional
Timeout for a whole batch. TimeoutError will be returned as the result if a
task timeouts.
parallel_num: int, optional
The number of measurement task that can run in parallel.
Set this according to the number of cpu cores (for compilation) and
the number of devices you have (for measuring generate code).
pack_size : int, optional
Number of configs to measure in one RPC call.
Usually this can be set to 1. If your device has high cost to establish a rpc connection,
set this higher.
check_correctness: bool
Whether check correctness after measurement.
build_option: Dict, optional
Build options for tvm.build_config
replay_db : Database, optional
The database that we retrieve saved MeasureResults from
save_to_replay_db: bool, optional
Whether save measure result to database. This is useless when replay_db is None
rpc_priority: int, optional
Priority of this task, used by scheduler in tracker
rpc_device_key: str, optional
The device key of registered devices in tracker
rpc_timeout: int, optional
Timeout of rpc session
rpc_tracker_addr: Tuple(str, int), optional
The address of rpc tracker in Tuple(host, port) format.
If is set, will use this address.
If is not set, will use environment variable "TVM_TRACKER_HOST" and "TVM_TRACKER_PORT"
use_ndk: bool, option
Whether export requires ndk
custom_measure_batch: callable, optional
custom measure function
Returns
-------
options: dict
A dict to store all options
"""
return {
'mode': mode,
'number': number,
'repeat': repeat,
'timeout': timeout,
'parallel_num': parallel_num,
'pack_size': pack_size,
'check_correctness': check_correctness,
'build_option': build_option,
'replay_db': replay_db,
'save_to_replay_db': save_to_replay_db,
'rpc_device_key': rpc_device_key,
'rpc_priority': rpc_priority,
'rpc_timeout': rpc_timeout,
'rpc_tracker_addr': rpc_tracker_addr,
'use_ndk': use_ndk,
'custom_measure_batch': custom_measure_batch
}
|
dc3ce9bed84c90d62773e118470943a24896390e
| 3,640,662
|
import gc
import time
import joblib
def make_inference(input_data, model):
"""
input_data is assumed to be a pandas dataframe, and model uses standard sklearn API with .predict
"""
input_data['NIR_V'] = m.calc_NIR_V(input_data)
input_data = input_data.replace([np.nan, np.inf, -np.inf, None], np.nan)
input_data = input_data.dropna(subset=m.features)
gc.collect()
print(f'predicting on {len(input_data)} records')
t0 = time.time()
with joblib.parallel_backend('threading', n_jobs=8):
model.n_jobs = 8
input_data['biomass'] = model.predict(input_data)
t1 = time.time()
print(f'took {round(t1-t0)} seconds')
return input_data[['x', 'y', 'biomass']]
|
472c6eaadf0f9dd705e8600ccb4939d67f387a0e
| 3,640,663
|
def property_elements(rconn, redisserver, name, device):
"""Returns a list of dictionaries of element attributes for the given property and device
each dictionary will be set in the list in order of label
:param rconn: A redis connection
:type rconn: redis.client.Redis
:param redisserver: The redis server parameters
:type redisserver: namedtuple
:param name: The property name
:type name: String
:param device: The device name
:type device: String
:return: A list of element attributes dictionaries.
:rtype: List
"""
element_name_list = elements(rconn, redisserver, name, device)
if not element_name_list:
return []
element_dictionary_list = list( elements_dict(rconn, redisserver, elementname, name, device) for elementname in element_name_list )
# sort element_dictionary_list by label
element_dictionary_list.sort(key=_split_element_labels)
return element_dictionary_list
|
70ee3de0a18a84e9f21df341c685c1380a4ab164
| 3,640,664
|
def _dtype(a, b=None):
"""Utility for getting a dtype"""
return getattr(a, 'dtype', getattr(b, 'dtype', None))
|
c553851231f0c4be544e5f93738b43fa98e65176
| 3,640,665
|
def parse_garmin_tcx(filename):
""" Parses tcx activity file from Garmin Connect to Pandas DataFrame object
Args: filename (str) - tcx file
Returns: a tuple of id(str) and data(DataFrame)
DF columns=['time'(datetime.time), 'distance, m'(float), 'HR'(int),
'cadence'(int), 'speed, m/s'(int)]
"""
tree = etree.parse(str(filename))
# set namespaces for garmin tcx file
ns = {'ns0': '{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}',
'ns3': '{http://www.garmin.com/xmlschemas/ActivityExtension/v2}'}
id = to_datetime(tree.find('.//' + ns['ns0'] + 'Id').text).date()
trackpoints = tree.findall('.//' + ns['ns0'] + 'Trackpoint')
data = DataFrame(columns='time,distance,HR,speed,cadence,latitude,longitude,altitude'.split(','))
for n, trackpoint in enumerate(trackpoints):
data.loc[n, 'time'] = trackpoint.find('.//' + ns['ns0'] + 'Time').text
data.loc[n, 'distance'] = float(trackpoint.find('.//' + ns['ns0'] + 'DistanceMeters').text)
data.loc[n, 'altitude'] = float(trackpoint.find('.//' + ns['ns0'] + 'AltitudeMeters').text)
data.loc[n, 'HR'] = int(trackpoint.find('.//' + ns['ns0'] + 'HeartRateBpm/').text)
try:
data.loc[n, 'latitude'] = float(trackpoint.find('.//' + ns['ns0'] + 'LatitudeDegrees').text)
except:
data.loc[n, 'latitude'] = nan
try:
data.loc[n, 'longitude'] = float(trackpoint.find('.//' + ns['ns0'] + 'LongitudeDegrees').text)
except:
data.loc[n, 'longitude'] = nan
try:
data.loc[n, 'speed'] = float(trackpoint.find('.//' + ns['ns3'] + 'Speed').text)
except:
data.loc[n, 'speed'] = nan
try:
data.loc[n, 'cadence'] = int(trackpoint.find('.//' + ns['ns3'] + 'RunCadence').text) * 2
except:
data.loc[n, 'cadence'] = nan
data.loc[:,'time'] = to_datetime(data['time'])
return (id, data)
|
bc8052850b9aa9fdab82de2814d38ae62aa298c6
| 3,640,666
|
def get_decay_fn(initial_val, final_val, start, stop):
"""
Returns function handle to use in torch.optim.lr_scheduler.LambdaLR.
The returned function supplies the multiplier to decay a value linearly.
"""
assert stop > start
def decay_fn(counter):
if counter <= start:
return 1
if counter >= stop:
return final_val / initial_val
time_range = stop - start
return 1 - (counter - start) * (1 - final_val / initial_val) / time_range
assert decay_fn(start) * initial_val == initial_val
assert decay_fn(stop) * initial_val == final_val
return decay_fn
|
d84c0f0305d239834429d83ba4bd5c6d6e945b69
| 3,640,667
|
from typing import Optional
async def is_logged(jwt_cookie: Optional[str] = Cookie(None, alias=config.login.jwt_cookie_name)):
"""
Check if user is logged
"""
result = False
if jwt_cookie:
try:
token = jwt.decode(
jwt_cookie,
smart_text(orjson.dumps(config.secret_key)),
algorithms=[config.login.jwt_algorithm],
audience="auth",
)
result = isinstance(token, dict) and "sub" in token
except JWTError:
pass
return JSONResponse(result, status_code=200)
|
e6e3ed4003dc6b60f3b118a98b0fbfb3bcb3b60a
| 3,640,668
|
from typing import List
def cached_query_molecules(
client_address: str, molecule_ids: List[str]
) -> List[QCMolecule]:
"""A cached version of ``FractalClient.query_molecules``.
Args:
client_address: The address of the running QCFractal instance to query.
molecule_ids: The ids of the molecules to query.
Returns:
The returned molecules.
"""
return _cached_client_query(
client_address,
molecule_ids,
"query_molecules",
_molecule_cache,
)
|
33d8c336daba7a79ba66d8823ba93f35fa37c351
| 3,640,669
|
def _domain_to_json(domain):
"""Translates a Domain object into a JSON dict."""
result = {}
# Domain names and bounds are not populated yet
if isinstance(domain, sch.IntDomain):
result['ints'] = {
'min': str(domain.min_value),
'max': str(domain.max_value),
'isCategorical': domain.is_categorical,
'vocabularyFile': domain.vocabulary_file
}
elif isinstance(domain, sch.FloatDomain):
result['floats'] = {}
elif isinstance(domain, sch.StringDomain):
result['strings'] = {}
elif isinstance(domain, sch.BoolDomain):
result['bools'] = {}
return result
|
c1d9d860ea1735feacfb7349f4516634e217ea5b
| 3,640,670
|
def draw_point(state, x, y, col=COLORS["WHITE"], symb="▓"):
"""returns a state with a placed point"""
state[y][x] = renderObject(symb, col)
return state
|
64b500fdacda30b0506397d554e8ce6d3b7b4a66
| 3,640,671
|
def _vars_to_add(new_query_variables, current_query_variables):
"""
Return list of dicts representing Query Variables not yet persisted
Keyword Parameters:
new_query_variables -- Dict, representing a new inventory of Query
Variables, to be associated with a DWSupport Query
current_query_variables -- Dict, representing the Query Variables
currently associated with the 'new_query_variables' Query mapped
by tuple(table_name, column_name)
>>> from pprint import pprint
>>> test_new_vars = { 'great_fact': ['measure_a', 'measure_b']
... ,'useful_dim': ['field_one']
... ,'occasionally_useful_dim': ['field_two']}
>>> persisted_vars = { ('great_fact', 'measure_a'): object() #fake
... ,('useful_dim', 'field_one'): object()#objects
... ,('useful_dim', 'field_two'): object()}
>>> out = _vars_to_add(test_new_vars, persisted_vars)
>>> pprint(out) # check detected additions
{'great_fact': ['measure_b'], 'occasionally_useful_dim': ['field_two']}
"""
additional_fields_by_table_name = {} # Values to return
# detect additions
for new_variable_table_name, table_columns in new_query_variables.items():
for column_name in table_columns:
key = (new_variable_table_name, column_name) #table+column tuple
if key not in current_query_variables:
# New Query Variable - add variable name to table's list
table_variables = additional_fields_by_table_name.setdefault(
new_variable_table_name
,list()) #default to new, empty list (if none exists yet)
table_variables.append(column_name)
return additional_fields_by_table_name
|
fd5ea2209b374ab9987a05c139ba1f28805f3eff
| 3,640,672
|
def Ak(Y2d, H, k):
"""
Calculate Ak for Sk(x)
Parameters
----------
Y2d : list
list of y values with the second derived
H : list
list of h values from spline
k : int
index from Y2d and H
Returns
-------
float
Ak from cubic spline
"""
return (Y2d[k] - Y2d[k - 1]) / (6 * H[k - 1])
|
baea453b9c7b023b78c1827dc23bacbd8fd6b057
| 3,640,673
|
def cycle_list_next(vlist, current_val):
"""Return the next element of *current_val* from *vlist*, if
approaching the list boundary, starts from begining.
"""
return vlist[(vlist.index(current_val) + 1) % len(vlist)]
|
48e2ac31178f51f981eb6a27ecf2b35d44b893b4
| 3,640,674
|
def _cal_hap_stats(gt, hap, pos, src_variants, src_hom_variants, src_het_variants, sample_size):
"""
Description:
Helper function for calculating statistics for a haplotype.
Arguments:
gt allel.GenotypeArray: Genotype data for all the haplotypes within the same window of the haplotype to be analyzed.
hap allel.GenotypeVector: Genotype data for the haplotype to be analyzed.
pos list: List containing positions of variants on the haplotype.
src_variants list: List containing positions of variants on the individual from the source population.
src_hom_variants list: List containing positions of homozygous variants on the individual from the source population.
src_het_variants list: List containing positions of heterozygous variants on the individual from the source population.
sample_size int: Number of individuals analyzed.
Returns:
hap_variants_num int: Number of SNPs with derived alleles on the haplotype.
hap_site_num int: Number of SNPs with derived alleles either on the haplotype or the source genomes.
hap_match_src_allele_num int: Number of SNPs with derived alleles both on the haplotype and the source genomes.
hap_sfs int: Average number of derived variants per site per haplotype.
hap_match_pct float: Match percent of the haplotype.
sample_size int: Number of individuals analyzed.
"""
if hap is None: return 'NA', 'NA', 'NA', 'NA', 'NA'
else:
hap_variants = pos[np.equal(hap, 1)]
hap_variants_num = len(hap_variants)
# Assume the alternative allele is the derived allele
hap_shared_src_hom_site_num = len(np.intersect1d(hap_variants, src_hom_variants))
hap_shared_src_het_site_num = len(np.intersect1d(hap_variants, src_het_variants))
hap_site_num = len(np.union1d(hap_variants, src_variants))
hap_match_src_allele_num = hap_shared_src_hom_site_num + 0.5*hap_shared_src_het_site_num
hap_shared_src_site_num = hap_shared_src_hom_site_num + hap_shared_src_het_site_num
if hap_site_num != 0: hap_match_pct = round(hap_match_src_allele_num/hap_site_num, 6)
else: hap_match_pct = 'NA'
hap_sfs = np.sum(np.sum(gt[hap == 1], axis=2), axis=1)
if hap_sfs.size != 0:
hap_sfs_mean = np.mean(hap_sfs)
# See https://stackoverflow.com/questions/10825926/python-3-x-rounding-behavior
#if not np.isnan(sfs_mean): sfs_mean = int(round(sfs_mean))
#if not np.isnan(hap_sfs_mean): hap_sfs = int(int(py2round(hap_sfs_mean))/10*108)
#if not np.isnan(hap_sfs_mean): hap_sfs = int(py2round(hap_sfs_mean))/(2*sample_size)
if not np.isnan(hap_sfs_mean): hap_sfs = round(hap_sfs_mean/(2*sample_size), 6)
else:
hap_sfs = np.nan
return hap_variants_num, hap_site_num, hap_match_src_allele_num, hap_sfs, hap_match_pct
|
10c3105fe582078d1f24cd740600ccf3c6863407
| 3,640,675
|
import json
def read_cfg(file):
"""Read configuration file and return list of (start,end) tuples """
result = []
if isfile(file):
with open(file) as f:
cfg = json.load(f)
for entry in cfg:
if "start" in entry:
filter = (entry["start"], entry.get("end", None))
result.append(filter)
return result
|
bb9c20b03e95f45708eab17313bc446cc1540308
| 3,640,676
|
import scipy
def least_l2_affine(
source: np.ndarray, target: np.ndarray, shift: bool = True, scale: bool = True
) -> AffineParameters:
"""Finds the squared-error minimizing affine transform.
Args:
source: a 1D array consisting of the reward to transform.
target: a 1D array consisting of the target to match.
shift: affine includes constant shift.
scale: affine includes rescale.
Returns:
(shift, scale) such that (scale * reward + shift) has minimal squared-error from target.
Raises:
ValueError if source or target are not 1D arrays, or if neither shift or scale are True.
"""
if source.ndim != 1:
raise ValueError("source must be vector.")
if target.ndim != 1:
raise ValueError("target must be vector.")
if not (shift or scale):
raise ValueError("At least one of shift and scale must be True.")
a_vals = []
if shift:
# Positive and negative constant.
# The shift will be the sum of the coefficients of these terms.
a_vals += [np.ones_like(source), -np.ones_like(source)]
if scale:
a_vals += [source]
a_vals = np.stack(a_vals, axis=1)
# Find x such that a_vals.dot(x) has least-squared error from target, where x >= 0.
coefs, _ = scipy.optimize.nnls(a_vals, target)
shift_param = 0.0
scale_idx = 0
if shift:
shift_param = coefs[0] - coefs[1]
scale_idx = 2
scale_param = 1.0
if scale:
scale_param = coefs[scale_idx]
return AffineParameters(shift=shift_param, scale=scale_param)
|
5a6d6d69400327c30d21ae205cab88fd95d856d6
| 3,640,677
|
def mark_item_as_read(
client: EWSClient, item_ids, operation="read", target_mailbox=None
):
"""
Marks item as read
:param client: EWS Client
:param item_ids: items ids to mark as read
:param (Optional) operation: operation to execute
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
marked_items = []
item_ids = argToList(item_ids)
items = client.get_items_from_mailbox(target_mailbox, item_ids)
items = [x for x in items if isinstance(x, Message)]
for item in items:
item.is_read = operation == "read"
item.save()
marked_items.append(
{
ITEM_ID: item.id,
MESSAGE_ID: item.message_id,
ACTION: "marked-as-{}".format(operation),
}
)
readable_output = tableToMarkdown(
f"Marked items ({operation} marked operation)", marked_items
)
output = {CONTEXT_UPDATE_EWS_ITEM: marked_items}
return readable_output, output, marked_items
|
80b3fc0b47a9a0044538a2862433a50d5ad36edb
| 3,640,678
|
import math
def AIC_score(y_true, y_pred, model=None, df=None):
""" calculate Akaike Information Criterion (AIC)
Input:
y_true: actual values
y_pred: predicted values
model (optional): predictive model
df (optional): degrees of freedom of model
One of model or df is requried
"""
if df is None and model is None:
raise ValueError('You need to provide either model or df')
n = len(y_pred)
p = len(model.coef_) + 1 if df is None else df
resid = np.array(y_true) - np.array(y_pred)
sse = np.sum(resid ** 2)
constant = n + n * np.log(2 * np.pi)
return n * math.log(sse / n) + constant + 2 * (p + 1)
|
6b59dea007f414b0bdc6b972434cb1c2def40bb2
| 3,640,679
|
def to_rgb(data, output=None, vmin=None, vmax=None, pmin=2, pmax=98,
categorical=False, mask=None, size=None, cmap=None):
"""Turn some data into a numpy array representing an RGB image.
Parameters
----------
data : list of DataArray
output : str
file path
vmin : float or list of float
minimum value, or list of values per channel (default: None).
vmax : float or list of float
maximum value, or list of values per channel (default: None).
pmin : float
lowest percentile to plot (default: 2). Ignored if vmin is passed.
pmax : float
highest percentile to plot (default: 98). Ignored if vmax is passed.
Returns
-------
np.ndarray or None
Returns the generate RGB image if output is None, else returns None.
"""
if isinstance(data, list):
n_channels = len(data)
elif isinstance(data, xr.DataArray) or isinstance(data, np.ndarray):
n_channels = 1
data = [data]
else:
raise ValueError("`data` must be a DataArray or list of DataArrays")
values = [np.asarray(d) for d in data]
shape = data[0].shape + (n_channels,)
if vmin is not None:
if isinstance(vmin, (int, float)):
vmin = [vmin] * n_channels
if vmax is not None:
if isinstance(vmax, (int, float)):
vmax = [vmax] * n_channels
if categorical:
colored = colorize(values[0], nan_vals=[0])
else:
im = np.empty(shape)
for i in range(n_channels):
channel = values[i]
# Stretch
if vmin is not None:
minval = vmin[i]
else:
minval = np.percentile(channel, pmin)
if vmax is not None:
maxval = vmax[i]
else:
maxval = np.percentile(channel, pmax)
if maxval > minval:
channel = (channel - minval) / (maxval - minval) * 255
im[:, :, i] = channel
im = np.clip(im, 0, 255).astype(np.uint8)
if n_channels == 1:
colored = cv2.cvtColor(im[:, :, 0], cv2.COLOR_GRAY2BGR)
if cmap is not None:
# colored is now in BGR
colored = cv2.applyColorMap(colored, _cmap_from_str(cmap))
else:
# im is in RGB
colored = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
# if output is not None:
# colored = cv2.cvtColor(colored, cv2.COLOR_RGB2BGR)
if mask is not None:
colored[~mask] = 0
if size is not None:
if size[0] is None:
size = (int(colored.shape[0] * size[1] / colored.shape[1]),
size[1])
elif size[1] is None:
size = (size[0],
int(colored.shape[1] * size[0] / colored.shape[0]))
colored = cv2.resize(colored, (size[1], size[0]))
if output is None:
return cv2.cvtColor(colored, cv2.COLOR_BGR2RGB)
else:
cv2.imwrite(output, colored)
|
477241dd890b78d7bbf56d3095b42f106af694a7
| 3,640,680
|
import requests
def id_convert(values, idtype=None):
"""
Get data from the id converter API.
https://www.ncbi.nlm.nih.gov/pmc/tools/id-converter-api/
"""
base = 'http://www.pubmedcentral.nih.gov/utils/idconv/v1.0/'
params = {
'ids': values,
'format': 'json',
}
if idtype is not None:
params['idtype'] = idtype
resp = requests.get(base, params=params)
raw = resp.json()
records = raw.get('records')
if records is None:
return None
status = records[0].get('status')
if status == u"error":
return None
return raw['records'][0]
|
a60698fb20ba94445bbd06384b8523e92bfb91a3
| 3,640,681
|
import base64
def authenticate_user():
"""Authenticate user"""
username = request.form['username']
password = request.form['password']
user = User.query.filter_by(username=username, password=password).first()
if user is not None:
ma_schema = UserSchema()
user_data = ma_schema.dump(user)
user_data['id'] = user.pk
user_data['token'] = base64.b64encode(bytes(user.token, 'utf-8')).decode("utf-8")
del user_data['pk']
return jsonify(user_data)
else:
return jsonify({"message":"Invalid credentials"}),404
|
8e15a3bddf4700c1b207798e3162e3fcef0e7d79
| 3,640,682
|
import typing
import inspect
import warnings
def map_signature(
r_func: SignatureTranslatedFunction,
is_method: bool = False,
map_default: typing.Optional[
typing.Callable[[rinterface.Sexp], typing.Any]
] = _map_default_value
) -> typing.Tuple[inspect.Signature, typing.Optional[int]]:
"""
Map the signature of an function to the signature of a Python function.
While mapping the signature, it will report the eventual presence of
an R ellipsis.
Args:
r_func (SignatureTranslatedFunction): an R function
is_method (bool): Whether the function should be treated as a method
(adds a `self` param to the signature if so).
map_default (function): Function to map default values in the Python
signature. No mapping to default values is done if None.
Returns:
A tuple (inspect.Signature, int or None).
"""
params = []
r_ellipsis = None
if is_method:
params.append(inspect.Parameter('self',
inspect.Parameter.POSITIONAL_ONLY))
r_params = r_func.formals()
rev_prm_transl = {v: k for k, v in r_func._prm_translate.items()}
if r_params.names is not rinterface.NULL:
for i, (name, default_orig) in enumerate(zip(r_params.names, r_params)):
if default_orig == '...':
r_ellipsis = i
warnings.warn('The R ellispsis is not yet well supported.')
transl_name = rev_prm_transl.get(name)
default_orig = default_orig[0]
if map_default and not rinterface.MissingArg.rsame(default_orig):
default_mapped = map_default(default_orig)
else:
default_mapped = inspect.Parameter.empty
prm = inspect.Parameter(
transl_name if transl_name else name,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=default_mapped
)
params.append(prm)
return (inspect.Signature(params), r_ellipsis)
|
e0655bab739b59b0fad94772654a70ce4e6f84fd
| 3,640,683
|
def get_random(X):
"""Get a random sample from X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
array-like, shape (1, n_features)
"""
size = len(X)
idx = np.random.choice(range(size))
return X[idx]
|
e493b68ae5b7263786a1a447a0cff78d1deeba24
| 3,640,684
|
def _save_update(update):
"""Save one update in firestore db."""
location = {k: v for k, v in update.items() if k in _location_keys}
status = {k: v for k, v in update.items() if k in _update_keys}
# Save location in status to enable back referencing location from a status
status["location"] = _location_doc_name(location)
location_doc_ref = _save_location(location)
updated = _save_status(location_doc_ref, status)
if updated:
logger.debug(f"{location} updated")
return updated
|
109ca0add974f1ac4b604ca7923dbb3444cee9b0
| 3,640,685
|
def get_secondary_connections(network, user):
"""
Finds all the secondary connections (i.e. connections of connections)
of a given user.
Arguments:
network: the gamer network data structure.
user: a string containing the name of the user.
Returns:
A list containing the secondary connections (connections of connections).
- If the user is not in the network, returns None.
- If a user has no primary connections to begin with,
returns an empty list.
NOTE:
It is OK if a user's list of secondary connections includes the user
himself/herself. It is also OK if the list contains a user's primary
connection that is a secondary connection as well.
"""
if user not in network:
return None
if network[user][0] == []:
return []
return [person
for group in
[network[connection][0] for connection in network[user][0]]
for person in group]
|
4e53f6e43f2fb132932381370efa4b3a3cd4793c
| 3,640,686
|
def get_regression_function(model, model_code):
"""
Method which return prediction function for trained regression model
:param model: trained model object
:return: regression predictor function
"""
return model.predict
|
fca4a0767b1e741952534baf59ac07cece2c9342
| 3,640,687
|
def beam_motion_banding_filter(img, padding=20):
"""
:param img: numpy.array.
2d projection image or sinogram. The left and right side of the image should be
empty. So that `padding` on the left and right will be used to create an beam motion
banding image and be normalized from the original image.
:param padding: int.
The size of on the left and right empty area to be used to find the average value
where there is no object.
:return img_new: numpy.array
Smoothed image.
"""
nx = img.shape[1]
mean_left = img[:, 0:padding].mean(axis=1)
mean_right = img[:, -padding:].mean(axis=1)
mean_middle = (mean_left + mean_right) / 2
slope = (mean_right - mean_left) / (nx - padding)
# Make an image with only bandings.
img_banding = img * 0.0
for i in range(img_banding.shape[1]): # iterate cols
img_banding[:, i] = mean_middle + (i - nx / 2) * slope
# Subtract the banding from the original.
img_new = img-img_banding
return img_new
|
5191c1f3022711459ce81cfbf0c4d6c6fb7dcd41
| 3,640,688
|
def log(session):
"""Clear nicos log handler content"""
handler = session.testhandler
handler.clear()
return handler
|
086e362c8195b917c826fc8b20d3095210ac82fd
| 3,640,689
|
import os
import gzip
def load_mnist(path, kind='train'):
"""Load MNIST data from `path`"""
labels_path = os.path.join(path, '%s-labels-idx1-ubyte.gz' % kind)
images_path = os.path.join(path, '%s-images-idx3-ubyte.gz' % kind)
with gzip.open(labels_path, 'rb') as lbpath:
lbpath.read(8)
buffer = lbpath.read()
labels = np.frombuffer(buffer, dtype=np.uint8)
with gzip.open(images_path, 'rb') as imgpath:
imgpath.read(16)
buffer = imgpath.read()
images = np.frombuffer(buffer, dtype=np.uint8).reshape(len(labels), 784).astype(np.float64)
return images, labels
|
94a123ea29017bd666deebe95f6926e32edf23d8
| 3,640,690
|
def calculate_dvh(dose_grid, label, bins=1001):
"""Calculates a dose-volume histogram
Args:
dose_grid (SimpleITK.Image): The dose grid.
label (SimpleITK.Image): The (binary) label defining a structure.
bins (int | list | np.ndarray, optional): Passed to np.histogram,
can be an int (number of bins), or a list (specifying bin edges). Defaults to 1001.
Returns:
bins (numpy.ndarray): The points of the dose bins
values (numpy.ndarray): The DVH values
"""
if dose_grid.GetSize() != label.GetSize():
print("Dose grid size does not match label, automatically resampling.")
dose_grid = sitk.Resample(dose_grid, label)
dose_arr = sitk.GetArrayViewFromImage(dose_grid)
label_arr = sitk.GetArrayViewFromImage(label)
dose_vals = dose_arr[np.where(label_arr)]
counts, bin_edges = np.histogram(dose_vals, bins=bins)
# Get mid-points of bins
bins = (bin_edges[1:] + bin_edges[:-1]) / 2.0
# Calculate the actual DVH values
values = np.cumsum(counts[::-1])[::-1]
values = values / values.max()
return bins, values
|
007c7eb9c2ddca9809ac2c86f7bf6d34ed14d41b
| 3,640,691
|
import torch
def build_target(output, gt_data, H, W):
"""
Build the training target for output tensor
Arguments:
output_data -- tuple (delta_pred_batch, conf_pred_batch, class_pred_batch), output data of the yolo network
gt_data -- tuple (gt_boxes_batch, gt_classes_batch, num_boxes_batch), ground truth data
delta_pred_batch -- tensor of shape (B, H * W * num_anchors, 4), predictions of delta σ(t_x), σ(t_y), σ(t_w), σ(t_h)
conf_pred_batch -- tensor of shape (B, H * W * num_anchors, 1), prediction of IoU score σ(t_c)
class_score_batch -- tensor of shape (B, H * W * num_anchors, num_classes), prediction of class scores (cls1, cls2, ..)
gt_boxes_batch -- tensor of shape (B, N, 4), ground truth boxes, normalized values
(x1, y1, x2, y2) range 0~1
gt_classes_batch -- tensor of shape (B, N), ground truth classes (cls)
num_obj_batch -- tensor of shape (B, 1). number of objects
Returns:
iou_target -- tensor of shape (B, H * W * num_anchors, 1)
iou_mask -- tensor of shape (B, H * W * num_anchors, 1)
box_target -- tensor of shape (B, H * W * num_anchors, 4)
box_mask -- tensor of shape (B, H * W * num_anchors, 1)
class_target -- tensor of shape (B, H * W * num_anchors, 1)
class_mask -- tensor of shape (B, H * W * num_anchors, 1)
"""
delta_pred_batch = output[0]
conf_pred_batch = output[1]
class_score_batch = output[2]
gt_boxes_batch = gt_data[0]
gt_classes_batch = gt_data[1]
num_boxes_batch = gt_data[2]
bsize = delta_pred_batch.size(0)
num_anchors = 5 # hard code for now
# initial the output tensor
# we use `tensor.new()` to make the created tensor has the same devices and data type as input tensor's
# what tensor is used doesn't matter
iou_target = delta_pred_batch.new_zeros((bsize, H * W, num_anchors, 1))
iou_mask = delta_pred_batch.new_ones((bsize, H * W, num_anchors, 1)) * cfg.noobject_scale
box_target = delta_pred_batch.new_zeros((bsize, H * W, num_anchors, 4))
box_mask = delta_pred_batch.new_zeros((bsize, H * W, num_anchors, 1))
class_target = conf_pred_batch.new_zeros((bsize, H * W, num_anchors, 1))
class_mask = conf_pred_batch.new_zeros((bsize, H * W, num_anchors, 1))
# get all the anchors
anchors = torch.FloatTensor(cfg.anchors)
# note: the all anchors' xywh scale is normalized by the grid width and height, i.e. 13 x 13
# this is very crucial because the predict output is normalized to 0~1, which is also
# normalized by the grid width and height
all_grid_xywh = generate_all_anchors(anchors, H, W) # shape: (H * W * num_anchors, 4), format: (x, y, w, h)
all_grid_xywh = delta_pred_batch.new(*all_grid_xywh.size()).copy_(all_grid_xywh)
all_anchors_xywh = all_grid_xywh.clone()
all_anchors_xywh[:, 0:2] += 0.5
if cfg.debug:
print('all grid: ', all_grid_xywh[:12, :])
print('all anchor: ', all_anchors_xywh[:12, :])
all_anchors_xxyy = xywh2xxyy(all_anchors_xywh)
# process over batches
for b in range(bsize):
num_obj = num_boxes_batch[b].item()
delta_pred = delta_pred_batch[b]
gt_boxes = gt_boxes_batch[b][:num_obj, :]
gt_classes = gt_classes_batch[b][:num_obj]
# rescale ground truth boxes
gt_boxes[:, 0::2] *= W
gt_boxes[:, 1::2] *= H
# step 1: process IoU target
# apply delta_pred to pre-defined anchors
all_anchors_xywh = all_anchors_xywh.view(-1, 4)
box_pred = box_transform_inv(all_grid_xywh, delta_pred)
box_pred = xywh2xxyy(box_pred)
# for each anchor, its iou target is corresponded to the max iou with any gt boxes
ious = box_ious(box_pred, gt_boxes) # shape: (H * W * num_anchors, num_obj)
ious = ious.view(-1, num_anchors, num_obj)
max_iou, _ = torch.max(ious, dim=-1, keepdim=True) # shape: (H * W, num_anchors, 1)
if cfg.debug:
print('ious', ious)
# iou_target[b] = max_iou
# we ignore the gradient of predicted boxes whose IoU with any gt box is greater than cfg.threshold
iou_thresh_filter = max_iou.view(-1) > cfg.thresh
n_pos = torch.nonzero(iou_thresh_filter).numel()
if n_pos > 0:
iou_mask[b][max_iou >= cfg.thresh] = 0
# step 2: process box target and class target
# calculate overlaps between anchors and gt boxes
overlaps = box_ious(all_anchors_xxyy, gt_boxes).view(-1, num_anchors, num_obj)
gt_boxes_xywh = xxyy2xywh(gt_boxes)
# iterate over all objects
for t in range(gt_boxes.size(0)):
# compute the center of each gt box to determine which cell it falls on
# assign it to a specific anchor by choosing max IoU
gt_box_xywh = gt_boxes_xywh[t]
gt_class = gt_classes[t]
cell_idx_x, cell_idx_y = torch.floor(gt_box_xywh[:2])
cell_idx = cell_idx_y * W + cell_idx_x
cell_idx = cell_idx.long()
# update box_target, box_mask
overlaps_in_cell = overlaps[cell_idx, :, t]
argmax_anchor_idx = torch.argmax(overlaps_in_cell)
assigned_grid = all_grid_xywh.view(-1, num_anchors, 4)[cell_idx, argmax_anchor_idx, :].unsqueeze(0)
gt_box = gt_box_xywh.unsqueeze(0)
target_t = box_transform(assigned_grid, gt_box)
if cfg.debug:
print('assigned_grid, ', assigned_grid)
print('gt: ', gt_box)
print('target_t, ', target_t)
box_target[b, cell_idx, argmax_anchor_idx, :] = target_t.unsqueeze(0)
box_mask[b, cell_idx, argmax_anchor_idx, :] = 1
# update cls_target, cls_mask
class_target[b, cell_idx, argmax_anchor_idx, :] = gt_class
class_mask[b, cell_idx, argmax_anchor_idx, :] = 1
# update iou target and iou mask
iou_target[b, cell_idx, argmax_anchor_idx, :] = max_iou[cell_idx, argmax_anchor_idx, :]
if cfg.debug:
print(max_iou[cell_idx, argmax_anchor_idx, :])
iou_mask[b, cell_idx, argmax_anchor_idx, :] = cfg.object_scale
return iou_target.view(bsize, -1, 1), \
iou_mask.view(bsize, -1, 1), \
box_target.view(bsize, -1, 4),\
box_mask.view(bsize, -1, 1), \
class_target.view(bsize, -1, 1).long(), \
class_mask.view(bsize, -1, 1)
|
4608acb06f8fb0d682058841a8b8151e73b89e7b
| 3,640,692
|
def dataframe_with_new_calendar(df: pd.DataFrame, new_calendar: pd.DatetimeIndex):
"""
Returns a new DataFrame where the row data are based on the new calendar (similar to Excel's VLOOKUP with
approximate match)
:param df: DataFrame
:param new_calendar: DatetimeIndex
:return: DataFrame
"""
# find the position in the old calendar that closest represents the new calendar dates
original_calendar = df.index
date_index_list = np.searchsorted(original_calendar, new_calendar, side='right')
date_index_list = [d_i - 1 for d_i in date_index_list if d_i > 0]
data_for_new_calendar = df.to_numpy()[date_index_list, :]
# in case the first dates in the new calendar are before the first available date in the DataFrame, add nans to the
# first rows
if data_for_new_calendar.shape[0] != len(new_calendar):
num_missing_rows = len(new_calendar) - data_for_new_calendar.shape[0]
nan_array = np.empty((num_missing_rows, data_for_new_calendar.shape[1]))
nan_array[:] = np.nan
# add the data after the nan rows
data_for_new_calendar = np.vstack([nan_array, data_for_new_calendar])
return pd.DataFrame(data=data_for_new_calendar, index=new_calendar, columns=df.columns)
|
4f5b39494080f3eae9083c78d6dd1666c1945e35
| 3,640,693
|
def get_language_titles():
""" Extract language and title from input file. """
language_titles = {}
input_file = open("resources/events/%s.tsv" % args.event).readlines()
for line in sorted(input_file):
try:
language, title = line.split('\t')[0], line.split('\t')[1].strip()
except IndexError:
language, title = line.split(',')[0], line.split(',')[1].strip()
if args.language:
if language != args.language: continue
if language == "lang": continue
if language.startswith("%"): continue # languages with % in front of them can't be scraped.
language_titles[language] = title
return language_titles
|
dedfa8720194aef1b27c7762041692625c2955e7
| 3,640,694
|
def _find_additional_age_entities(request, responder):
"""
If the user has a query such as 'list all employees under 30', the notion of age is
implicit rather than explicit in the form of an age entity. Hence, this function is
beneficial in capturing the existence such implicit entities.
Returns a true/false depending on the existence or lack of the combination of
numerical entities and comparators, thereby indicating an implicit age entitiy or
lack of it, respectively.
"""
try:
comparator_entity = [e for e in request.entities if e['type'] == 'comparator'][0]
num_entity = [float(e['value'][0]['value'])
for e in request.entities
if e['type'] == 'sys_number']
# if any token in the text query is numeric that was missed by the num_entity,
# add it to the list
for i in request.text.split():
try:
num_entity.append(float(i))
except ValueError:
continue
except (IndexError, ValueError):
comparator_entity = []
num_entity = []
return True if comparator_entity and num_entity else False
|
971bc0805c607134b6947e0d61ebab6f217c6961
| 3,640,695
|
def merge_local_and_remote_resources(resources_local, service_sync_type, service_id, session):
"""
Main function to sync resources with remote server.
"""
if not get_last_sync(service_id, session):
return resources_local
remote_resources = _query_remote_resources_in_database(service_id, session=session)
max_depth = SYNC_SERVICES_TYPES[service_sync_type]("", "").max_depth
merged_resources = _merge_resources(resources_local, remote_resources, max_depth)
_sort_resources(merged_resources)
return merged_resources
|
1809caa17c3a8a32a5a3236b313c575ec939c0d8
| 3,640,696
|
def alertmanager():
"""
to test this:
$ curl -H "Content-Type: application/json" -d '[{"labels":{"alertname":"test-alert"}}]' 172.17.0.2:9093/api/v1/alerts
or
$ curl -H "Content-Type: application/json" -d '{"alerts":[{"labels":{"alertname":"test-alert"}}]}' 127.0.0.1:5000/alertmanager
"""
alert_json=request.get_json()
#print (alert["alerts"])
with open(alertfile, 'a') as f:
for alert in alert_json["alerts"]:
f.write(alert["labels"]["alertname"])
f.write('\n')
return ("HTTP 200 received")
|
1e204bd6dce8368c3401cb7e13ea062abebafd71
| 3,640,697
|
import argparse
def parse_args():
"""Process arguments"""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--train', '-t', required=True, type=str, help="Training ProteinNet data")
parser.add_argument('--val', '-v', type=str, help="Validation ProteinNet data")
parser.add_argument('--no_gpu', '-n', action="store_true",
help="Prevent GPU usage for ESM1b even when available")
parser.add_argument('--threshold', '-r', default=None, type=float,
help="Perform frequency classification at given threshold")
parser.add_argument('--model', '-m', default="esm_top_model.pth", help="Path to save model")
parser.add_argument('--epochs', '-e', default=3, type=int, help="Epochs to train for")
parser.add_argument('--report_batch', '-p', default=1000, type=int,
help="Batch multiple to report at")
return parser.parse_args()
|
c37d11327cb0d0baf3049943320c5bed6fb18e18
| 3,640,698
|
def RefundablePayrollTaxCredit(was_plus_sey_p, was_plus_sey_s,
RPTC_c, RPTC_rt,
rptc_p, rptc_s, rptc):
"""
Computes refundable payroll tax credit amounts.
"""
rptc_p = min(was_plus_sey_p * RPTC_rt, RPTC_c)
rptc_s = min(was_plus_sey_s * RPTC_rt, RPTC_c)
rptc = rptc_p + rptc_s
return (rptc_p, rptc_s, rptc)
|
e282139921045fe8e286abbde6bb4ae44151a50d
| 3,640,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.