content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def add_data(
dates=None,
product="AOD15",
*,
inv_type=None,
latlonbox=None,
siteid=None,
daily=False,
lunar=False,
#
# post-proc
freq=None,
detect_dust=False,
interp_to_aod_values=None,
#
# joblib
n_procs=1,
verbose=10,
):
"""Load AERONET data from the AERONET Web Service.
Parameters
----------
dates : array-like of datetime-like
Expressing the desired min and max dates to retrieve.
If unset, the current day will be fetched.
product : str
inv_type : str
Inversion product type.
latlonbox : array-like of float
``[lat1, lon1, lat2, lon2]``,
where ``lat1, lon1`` is the lower-left corner
and ``lat2, lon2`` is the upper-right corner.
siteid : str
Site identifier string.
See https://aeronet.gsfc.nasa.gov/aeronet_locations_v3.txt for all valid site IDs.
.. warning::
Whether you will obtain data depends on the sites active
during the `dates` time period.
.. note::
`siteid` takes precendence over `latlonbox`
if both are specified.
daily : bool
Load daily averaged data.
lunar : bool
Load provisional lunar "Direct Moon" data instead of the default "Direct Sun".
Only for non-inversion products.
freq : str
Frequency used to resample the DataFrame.
detect_dust : bool
interp_to_aod_values : array-like of float
Values to interpolate AOD values to.
Currently requires pytspack.
n_procs : int
For joblib.
verbose : int
For joblib.
Returns
-------
pandas.DataFrame
"""
a = AERONET()
if interp_to_aod_values is not None:
interp_to_aod_values = np.asarray(interp_to_aod_values)
kwargs = dict(
product=product,
inv_type=inv_type,
latlonbox=latlonbox,
siteid=siteid,
daily=daily,
lunar=lunar,
detect_dust=detect_dust,
interp_to_aod_values=interp_to_aod_values,
)
requested_parallel = n_procs > 1 or n_procs == -1
if has_joblib and requested_parallel:
# Split up by day
min_date = dates.min()
max_date = dates.max()
days = pd.date_range(start=min_date, end=max_date, freq="D") # TODO: subtract 1?
days1 = days + pd.Timedelta(days=1)
dfs = Parallel(n_jobs=n_procs, verbose=verbose)(
delayed(_parallel_aeronet_call)(pd.DatetimeIndex([d1, d2]), **kwargs, freq=None)
for d1, d2 in zip(days, days1)
)
df = pd.concat(dfs, ignore_index=True).drop_duplicates()
if freq is not None:
df.index = df.time
df = df.groupby("siteid").resample(freq).mean().reset_index()
return df.reset_index(drop=True)
else:
if not has_joblib and requested_parallel:
print(
"Please install joblib to use the parallel feature of monetio.aeronet. "
"Proceeding in serial mode..."
)
df = a.add_data(
dates=dates,
**kwargs,
freq=freq,
)
return df | a5ef56efa84d64b569a2ecb506f541d4556c7391 | 23,600 |
def _func(*args, **kwargs):
"""Test function used in some tests."""
return args, kwargs | 7fb2aa947806578e5378e66ce7dc1b4f3f593dbe | 23,601 |
def combine_parallel_circuits(IVprev_cols, pvconst):
"""
Combine crosstied circuits in a substring
:param IVprev_cols: lists of IV curves of crosstied and series circuits
:return:
"""
# combine crosstied circuits
Irows, Vrows = [], []
Isc_rows, Imax_rows = [], []
for IVcols in zip(*IVprev_cols):
Iparallel, Vparallel = zip(*IVcols)
Iparallel = np.asarray(Iparallel)
Vparallel = np.asarray(Vparallel)
Irow, Vrow = pvconst.calcParallel(
Iparallel, Vparallel, Vparallel.max(),
Vparallel.min()
)
Irows.append(Irow)
Vrows.append(Vrow)
Isc_rows.append(np.interp(np.float64(0), Vrow, Irow))
Imax_rows.append(Irow.max())
Irows, Vrows = np.asarray(Irows), np.asarray(Vrows)
Isc_rows = np.asarray(Isc_rows)
Imax_rows = np.asarray(Imax_rows)
return pvconst.calcSeries(
Irows, Vrows, Isc_rows.mean(), Imax_rows.max()
) | 31d6a96189b703bca0e9cf212472cb0c8870d3cd | 23,602 |
def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str,
module_file: str, serving_model_dir: str,
metadata_path: str) -> tfx.dsl.Pipeline:
"""Creates a three component penguin pipeline with TFX."""
# Brings data into the pipeline.
example_gen = tfx.components.CsvExampleGen(input_base=data_root)
# Uses user-provided Python function that trains a model.
trainer = tfx.components.Trainer(
module_file=module_file,
examples=example_gen.outputs['examples'],
train_args=tfx.proto.TrainArgs(num_steps=100),
eval_args=tfx.proto.EvalArgs(num_steps=5))
# NEW: Get the latest blessed model for Evaluator.
model_resolver = tfx.dsl.Resolver(
strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy,
model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model),
model_blessing=tfx.dsl.Channel(
type=tfx.types.standard_artifacts.ModelBlessing)).with_id(
'latest_blessed_model_resolver')
# NEW: Uses TFMA to compute evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='species')],
slicing_specs=[
# An empty slice spec means the overall slice, i.e. the whole dataset.
tfma.SlicingSpec(),
# Calculate metrics for each penguin species.
tfma.SlicingSpec(feature_keys=['species']),
],
metrics_specs=[
tfma.MetricsSpec(per_slice_thresholds={
'sparse_categorical_accuracy':
tfma.config.PerSliceMetricThresholds(thresholds=[
tfma.PerSliceMetricThreshold(
slicing_specs=[tfma.SlicingSpec()],
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
)]),
})],
)
evaluator = tfx.components.Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = tfx.components.Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'], # Pass an evaluation result.
push_destination=tfx.proto.PushDestination(
filesystem=tfx.proto.PushDestination.Filesystem(
base_directory=serving_model_dir)))
components = [
example_gen,
trainer,
# Following two components were added to the pipeline.
model_resolver,
evaluator,
pusher,
]
return tfx.dsl.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
metadata_connection_config=tfx.orchestration.metadata
.sqlite_metadata_connection_config(metadata_path),
components=components) | 6254505a2d0309a8576a277b95a521294f9f8901 | 23,603 |
def create_global_step() -> tf.Variable:
"""Creates a `tf.Variable` suitable for use as a global step counter.
Creating and managing a global step variable may be necessary for
`AbstractTrainer` subclasses that perform multiple parameter updates per
`Controller` "step", or use different optimizers on different steps.
In these cases, an `optimizer.iterations` property generally can't be used
directly, since it would correspond to parameter updates instead of iterations
in the `Controller`'s training loop. Such use cases should simply call
`step.assign_add(1)` at the end of each step.
Returns:
A non-trainable scalar `tf.Variable` of dtype `tf.int64`, with only the
first replica's value retained when synchronizing across replicas in
a distributed setting.
"""
return tf.Variable(
0,
dtype=tf.int64,
trainable=False,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) | d1fc499b60d09d50e555977b73eec04971e11b3b | 23,604 |
def supported_coins_balance(balance, tickers):
"""
Return the balance with non-supported coins removed
"""
supported_coins_balance = {}
for coin in balance.keys():
if coin != "BTC":
if f"{coin}/BTC" in tickers:
supported_coins_balance[coin] = balance[coin]
else:
try:
supported_coins_balance["BTC"] = balance[coin]
except KeyError:
print("BTC not in balance")
return supported_coins_balance | aaea856c728d04f47f52c1b07c66be57ff17d8cf | 23,605 |
def _identity_map(size):
"""Function returning list of lambdas mapping vector to itself."""
return [lambda x, id: x[id] for _ in range(size)] | 6236d42d359fdc9b006bffcc597fccbc161eb53d | 23,606 |
def With(prop, val):
"""The 'with <property> <value>' specifier.
Specifies the given property, with no dependencies.
"""
return Specifier(prop, val) | fc4a167322ab5bde74eabf1b69efb5d37f643405 | 23,607 |
def pad_node_id(node_id: np.uint64) -> str:
""" Pad node id to 20 digits
:param node_id: int
:return: str
"""
return "%.20d" % node_id | 28cdaad2aa327143432c5be58598271139574a50 | 23,608 |
def ballcurve(x: ArrayLike, xi: float) -> ArrayLike:
"""
function to generate the curve for the nested structure, given a shape
parameter xi. If xi= 1 is linear.
input:
----------
x: 1D array, [0,1]
initial values to be evaluated on the function
xi: number, >=1
shape parameter of how stylised is the curve
output:
----------
y: 1D array, [0,1]
evaluated function
"""
return 1 - (1 - (x) ** (1 / xi)) ** xi | 6b261793e1bdccc39bc66f25f4013d07d3bfc376 | 23,609 |
def center_vertices(vertices, faces, flip_y=True):
"""
Centroid-align vertices.
Args:
vertices (V x 3): Vertices.
faces (F x 3): Faces.
flip_y (bool): If True, flips y verts to keep with image coordinates convention.
Returns:
vertices, faces
"""
vertices = vertices - vertices.mean(dim=0, keepdim=True)
if flip_y:
vertices[:, 1] *= -1
faces = faces[:, [2, 1, 0]]
return vertices, faces | 85743c3b3e3838533e78c66b137cc9c8c7702519 | 23,610 |
def fingerprint_atompair(fpSize=2048, count=False):
"""Atom pair fingerprint (list of int).
Args:
fpSize: Size of the generated fingerprint (defaults to 2048).
count: The default value of False will generate fingerprint bits
(0 or 1) whereas a value of True will generate the count of each
fingerprint value.
"""
generator = rdFingerprintGenerator.GetAtomPairGenerator(fpSize=fpSize)
if count:
fingerprint_fn = _fingerprint_fn_count(generator)
else:
fingerprint_fn = _fingerprint_fn_bits(generator)
fingerprint_fn.__name__ = 'fingerprint_atompair(' + \
f'fpSize={fpSize},count={count})'
return fingerprint_fn | cbacf8bdaae11520f2bb71ae825ea574258f6242 | 23,611 |
def bend_euler_s(**kwargs) -> Component:
"""Sbend made of euler bends."""
c = Component()
b = bend_euler(**kwargs)
b1 = c.add_ref(b)
b2 = c.add_ref(b)
b2.mirror()
b2.connect("o1", b1.ports["o2"])
c.add_port("o1", port=b1.ports["o1"])
c.add_port("o2", port=b2.ports["o2"])
return c | 55c3d4dc5cc2766463f088ede2b4f04c6018eac6 | 23,612 |
def phot_error(star_ADU,n_pix,n_b,sky_ADU,dark,read,gain=1.0):
"""
Photometric error including
INPUT:
star_ADU - stellar flux in ADU (total ADU counts within aperture)
n_pix - number of pixels in aperture
n_b - number of background pixels
sky_ADU - in ADU/pix
dark - in e/pix
read - in e^2/pix
gain - gain in e/ADU
OUTPUT:
Photometric error N in ADUs
NOTES:
This is not the normalized error. To normalize, have to do sigma_rel = N / star_ADU
This does not include scintillation
"""
noise = np.sqrt( gain*star_ADU + n_pix *((1. + n_pix/n_b) * (gain*sky_ADU + dark + read**2. + (gain*0.289)**2. )) )/gain
return noise | 45d3f335c2b7fad1e2e0f8e8e415a0bda0f774e8 | 23,613 |
def tan(x):
"""
tan(x) -> number
Return the tangent of x; x in radians.
"""
try:
res, x = _init_check_mpfr(x)
gmp.mpfr_tan(res, x, gmp.MPFR_RNDN)
return mpfr._from_c_mpfr(res)
except TypeError:
res, x = _init_check_mpc(x)
gmp.mpc_tan(res, x, gmp.MPC_RNDNN)
return mpc._from_c_mpc(res) | 651119ccd44f313b25f49e03a3f5094fa1c1a829 | 23,614 |
def test_wrapped_func():
"""
Test uncertainty-aware functions obtained through wrapping.
"""
########################################
# Function which can automatically handle numbers with
# uncertainties:
def f_auto_unc(angle, *list_var):
return umath.cos(angle) + sum(list_var)
def f(angle, *list_var):
# We make sure that this function is only ever called with
# numbers with no uncertainty (since it is wrapped):
assert not isinstance(angle, uncert_core.UFloat)
assert not any(isinstance(arg, uncert_core.UFloat)
for arg in list_var)
return f_auto_unc(angle, *list_var)
f_wrapped = uncert_core.wrap(f)
my_list = [1, 2, 3]
########################################
# Test of a wrapped function that only calls the original
# function: it should obtain the exact same result:
assert f_wrapped(0, *my_list) == f(0, *my_list)
# 1 == 1 +/- 0, so the type must be checked too:
assert type(f_wrapped(0, *my_list)) == type(f(0, *my_list))
########################################
# Call with uncertainties:
angle = uncert_core.ufloat(1, 0.1)
list_value = uncert_core.ufloat(3, 0.2)
# The random variables must be the same (full correlation):
assert ufloats_close(f_wrapped(angle, *[1, angle]),
f_auto_unc(angle, *[1, angle]))
assert ufloats_close(f_wrapped(angle, *[list_value, angle]),
f_auto_unc(angle, *[list_value, angle]))
########################################
# Non-numerical arguments, and explicit and implicit derivatives:
def f(x, y, z, t, u):
return x+2*z+3*t+4*u
f_wrapped = uncert_core.wrap(
f, [lambda *args: 1, None, lambda *args:2, None]) # No deriv. for u
assert f_wrapped(10, 'string argument', 1, 0, 0) == 12
x = uncert_core.ufloat(10, 1)
assert numbers_close(f_wrapped(x, 'string argument', x, x, x).std_dev,
(1+2+3+4)*x.std_dev) | 9f432e6fd0c796c733e43c2ca66d2d0373148ee4 | 23,615 |
def T2str_mag_simplified(K, TE, T2str, N):
"""Signal Model of T2str-weighted UTE GRE Magnitude Image
S = K * [ exp(-TE/T2*) ] + N
parameters:
K :: constant (proportional to proton density)
TE :: sequence echo time
T2str :: relaxation due to spin-spin effects and dephasing
N :: constant offset "noise" term
@return expected (magnitude) signal
"""
S = K * np.exp((-1.0 * TE)/T2str) + N
return S | ddf829cf8e209602b141f1b13c8fbf5af566a8d7 | 23,616 |
def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
dna_size = len(tuning_options.tune_params.keys())
pop_size = 20
generations = 100
tuning_options["scaling"] = False
tune_params = tuning_options.tune_params
population = random_population(dna_size, pop_size, tune_params)
best_time = 1e20
all_results = []
cache = {}
for generation in range(generations):
if tuning_options.verbose:
print("Generation %d, best_time %f" % (generation, best_time))
#determine fitness of population members
weighted_population = []
for dna in population:
time = _cost_func(dna, kernel_options, tuning_options, runner, all_results, cache)
weighted_population.append((dna, time))
population = []
#'best_time' is used only for printing
if tuning_options.verbose and all_results:
best_time = min(all_results, key=lambda x: x["time"])["time"]
#population is sorted such that better configs have higher chance of reproducing
weighted_population.sort(key=lambda x: x[1])
#crossover and mutate
for _ in range(pop_size//2):
ind1 = weighted_choice(weighted_population)
ind2 = weighted_choice(weighted_population)
ind1, ind2 = crossover(ind1, ind2)
population.append(mutate(ind1, dna_size, tune_params))
population.append(mutate(ind2, dna_size, tune_params))
return all_results, runner.dev.get_environment() | 099b5e513ab52353efbce8ba1e7465acf5b1f6bc | 23,617 |
def getTimeString(t, centi=True):
"""
category: General Utility Functions
Given a value in milliseconds, returns a Lstr with:
(hours if > 0):minutes:seconds:centiseconds.
WARNING: this Lstr value is somewhat large so don't use this to
repeatedly update node values in a timer/etc. For that purpose you
should use timeDisplay nodes and attribute connections.
"""
if type(t) is not int: t = int(t)
bits = []
subs = []
h = (t/1000)/(60*60)
if h != 0:
bits.append('${H}')
subs.append(('${H}', bs.Lstr(resource='timeSuffixHoursText',
subs=[('${COUNT}', str(h))])))
m = ((t/1000)/60)%60
if m != 0:
bits.append('${M}')
subs.append(('${M}', bs.Lstr(resource='timeSuffixMinutesText',
subs=[('${COUNT}', str(m))])))
# we add seconds if its non-zero *or* we havn't added anything else
if centi:
s = (t/1000.0 % 60.0)
if s >= 0.005 or not bits:
bits.append('${S}')
subs.append(('${S}', bs.Lstr(resource='timeSuffixSecondsText',
subs=[('${COUNT}', ('%.2f' % s))])))
else:
s = (t/1000 % 60)
if s != 0 or not bits:
bits.append('${S}')
subs.append(('${S}', bs.Lstr(resource='timeSuffixSecondsText',
subs=[('${COUNT}', str(s))])))
return bs.Lstr(value=' '.join(bits), subs=subs) | 12cbcf4fcfd8450af110f93c77c4c5b50285c0fd | 23,618 |
def validation_supervised(model, input_tensor, y_true, loss_fn, multiclass =False, n_classes= 1):
"""
Returns average loss for an input batch of data with a supervised model.
If running on multiclass mode, it also returns the accuracy.
"""
y_pred = model(input_tensor.float())
if multiclass:
loss = loss_fn(y_pred, y_true)
y_hat = y_pred.argmax(dim = 1)
acc = accuracy(y_hat, y_true)
else:
loss = loss_fn(y_pred, y_true.view(-1, n_classes).float())
try:
acc = accuracy(y_pred, y_true.view(-1, n_out).float())
except:
acc = None
return loss.mean().item(), acc | 901f4416fab5ebc23115ef2f3aab1b971607368e | 23,619 |
import logging
import sys
def configure_logger(app):
"""
logging: based on the configured setting we
:param app:
:return:
"""
#
# support stream and rotating handlers
logger = app.logger
logger.setLevel(logging.INFO)
if app.config['HANDLER'] == "StreamHandler":
class InfoFilter(logging.Filter):
def filter(self, rec):
# for stdout, every below and including warning should be shown
return rec.levelno < logging.ERROR
h1 = logging.StreamHandler(sys.stdout)
# this is minimal level
h1.setLevel(logging.DEBUG if app.debug else logging.INFO)
h1.setFormatter(RequestFormatter())
h1.addFilter(InfoFilter())
logger.addHandler(h1)
# only errors to stderr
h2 = logging.StreamHandler(sys.stderr)
h2.setLevel(logging.ERROR)
h2.setFormatter(RequestFormatter())
logger.addHandler(h2)
else: # elif config.HANDLER == "RotatingFileHandler":
handler = RotatingFileHandler(
'access.log', maxBytes=10000, backupCount=1)
handler.setFormatter(RequestFormatter())
handler.setLevel(logging.DEBUG if app.debug else logging.INFO)
logger.addHandler(handler)
return logger | 163d9e8e81ddeae7121c1e9cf20b00a1db4d10f0 | 23,620 |
def timing(func=None, *, name=None, is_stage=None):
"""
Decorator to measure the time taken by the function to execute
:param func: Function
:param name: Display Name of the function for which the time is being calculated
:param is_stage: Identifier for mining stage
Examples:
>>>
>>> @timing(name="foo")
>>> def func():
>>> ...
>>>
>>> @timing
>>> def func():
>>> ...
>>>
"""
if func is None:
return partial(timing, name=name, is_stage=is_stage)
@wraps(func)
def wrapper(*args, **kwargs):
start = timer()
result = func(*args, **kwargs)
end = timer()
total_time = end - start
logger.info(f"Time taken to execute `{name}`: {total_time} sec")
if not is_stage:
if name in ELAPSED_TIME_ON_FUNCTIONS:
ELAPSED_TIME_ON_FUNCTIONS[name] += total_time
else:
ELAPSED_TIME_ON_FUNCTIONS[name] = total_time
else:
if name in STAGE_WISE_TIME:
STAGE_WISE_TIME[name] += total_time
else:
STAGE_WISE_TIME[name] = total_time
return result
return wrapper | e7368e64bda81811075a295b6e36f0f9e9e7bcd5 | 23,621 |
def is_skip_file(filename):
""" Should the given file be skipped over for testing
:param filename: The file's name
:type filename: String
:return: True if the given file should be skipped, false otherwise
:rtype: Boolean
"""
filename_len = len(filename)
for skip_name in SKIP_FILES:
skip_name_len = len(skip_name)
if (skip_name_len <= filename_len) and (
skip_name == filename[-skip_name_len:]):
return True
return False | 066bcfbff6f984fb293c422f613746967713b31b | 23,622 |
def lowercase_or_notify(x):
""" Lowercases the input if it is valid, otherwise logs the error and sets a default value
Args:
String to lowercase
Returns:
Lowercased string if possible, else unmodified string or default value.
"""
try:
return x.lower()
except Exception:
if x and not np.isnan(x):
logger.info('Program activity of {} was unable to be lowercased. Entered as-is.'.format(x))
return x
else:
logger.info('Null value found for program activity name. Entered default value.') # should not happen
return '(not provided)' | a9e9cce9450f21f5cec80739d435e362288e8844 | 23,623 |
def is_not_null(node, eval_type, given_variables):
"""Process the is_not_null operator.
:param node: Formula node
:param eval_type: Type of evaluation
:param given_variables: Dictionary of var/values
:return: Boolean result, SQL query, or text result
"""
if eval_type == EVAL_EXP:
# Python evaluation
return not value_is_null(get_value(node, given_variables))
if eval_type == EVAL_SQL:
# SQL evaluation
query = sql.SQL('({0} is not null)').format(
OnTaskDBIdentifier(node['field']),
)
return query, []
# Text evaluation
return '{0} is not null'.format(node['field']) | a261731103f81f1e4fe2c6eb191d3127acb163fe | 23,624 |
def search_for_rooms(filters, allow_admin=False, availability=None):
"""Search for a room, using the provided filters.
:param filters: The filters, provided as a dictionary
:param allow_admin: A boolean specifying whether admins have override privileges
:param availability: A boolean specifying whether (un)available rooms should be provided,
or `None` in case all rooms should be returned.
"""
query = (Room.query
.outerjoin(favorite_room_table, db.and_(favorite_room_table.c.user_id == session.user.id,
favorite_room_table.c.room_id == Room.id))
.reset_joinpoint() # otherwise filter_by() would apply to the favorite table
.options(joinedload('owner').load_only('id'))
.filter(~Room.is_deleted)
.order_by(favorite_room_table.c.user_id.is_(None), db.func.indico.natsort(Room.full_name)))
criteria = {}
if 'capacity' in filters:
query = query.filter(Room.capacity >= filters['capacity'])
if 'building' in filters:
criteria['building'] = filters['building']
if 'division' in filters:
criteria['division'] = filters['division']
query = query.filter_by(**criteria)
if 'text' in filters:
text = ' '.join(filters['text'].strip().split())
if text.startswith('#') and text[1:].isdigit():
query = query.filter(Room.id == int(text[1:]))
else:
query = query.filter(_make_room_text_filter(text))
if filters.get('equipment'):
subquery = (db.session.query(RoomEquipmentAssociation)
.with_entities(db.func.count(RoomEquipmentAssociation.c.room_id))
.filter(RoomEquipmentAssociation.c.room_id == Room.id,
EquipmentType.name.in_(filters['equipment']))
.join(EquipmentType, RoomEquipmentAssociation.c.equipment_id == EquipmentType.id)
.correlate(Room)
.as_scalar())
query = query.filter(subquery == len(filters['equipment']))
if filters.get('features'):
for feature in filters['features']:
query = query.filter(Room.available_equipment.any(EquipmentType.features.any(RoomFeature.name == feature)))
if filters.get('favorite'):
query = query.filter(favorite_room_table.c.user_id.isnot(None))
if filters.get('mine'):
ids = get_managed_room_ids(session.user)
query = query.filter(Room.id.in_(ids))
query = _filter_coordinates(query, filters)
if availability is None:
return query
start_dt, end_dt = filters['start_dt'], filters['end_dt']
repeatability = (filters['repeat_frequency'], filters['repeat_interval'])
availability_filters = [Room.filter_available(start_dt, end_dt, repeatability, include_blockings=False,
include_pre_bookings=False)]
if not (allow_admin and rb_is_admin(session.user)):
selected_period_days = (filters['end_dt'] - filters['start_dt']).days
booking_limit_days = db.func.coalesce(Room.booking_limit_days, rb_settings.get('booking_limit'))
criterion = db.and_(Room.filter_bookable_hours(start_dt.time(), end_dt.time()),
Room.filter_nonbookable_periods(start_dt, end_dt),
db.or_(booking_limit_days.is_(None),
selected_period_days <= booking_limit_days))
unbookable_ids = [room.id
for room in query.filter(db.and_(*availability_filters), ~criterion)
if not room.can_override(session.user, allow_admin=False)]
availability_filters.append(~Room.id.in_(unbookable_ids))
availability_criterion = db.and_(*availability_filters)
if availability is False:
availability_criterion = ~availability_criterion
return query.filter(availability_criterion) | fe29ec5b4bf27d51b45ed2ba87cb7153d176583c | 23,625 |
def get_scalar(obj):
"""obj can either be a value, or a type
Returns the Stella type for the given object"""
type_ = type(obj)
if type_ == type(int):
type_ = obj
elif type_ == PyWrapper:
type_ = obj.py
# HACK {
if type_ == type(None): # noqa
return None_
elif type_ == str:
return Str
# } HACK
try:
return _pyscalars[type_]
except KeyError:
raise exc.TypeError("Invalid scalar type `{0}'".format(type_)) | 2b5c829a8a933ff5f80a1d17d0ba8c2a49c90643 | 23,626 |
def get_importable_subclasses(base_class, used_in_automl=True):
"""Get importable subclasses of a base class. Used to list all of our estimators, transformers, components and pipelines dynamically.
Args:
base_class (abc.ABCMeta): Base class to find all of the subclasses for.
used_in_automl: Not all components/pipelines/estimators are used in automl search. If True,
only include those subclasses that are used in the search. This would mean excluding classes related to
ExtraTrees, ElasticNet, and Baseline estimators.
Returns:
List of subclasses.
"""
all_classes = _get_subclasses(base_class)
classes = []
for cls in all_classes:
if "blocktorch.pipelines" not in cls.__module__:
continue
try:
cls()
classes.append(cls)
except (ImportError, MissingComponentError, TypeError):
logger.debug(
f"Could not import class {cls.__name__} in get_importable_subclasses"
)
except EnsembleMissingPipelinesError:
classes.append(cls)
if used_in_automl:
classes = [cls for cls in classes if cls.__name__ not in _not_used_in_automl]
return classes | 39b858f9287e6413be4c73053a7f515c16d181e9 | 23,627 |
import math
def sin(x, deg=None, **kwargs):
"""Computes the sine of x in either degrees or radians"""
x = float(x)
if deg or (trigDeg and deg is None):
x = math.radians(x)
return math.sin(x) | 5f5809fac0fd6970fa58a20b8c70e9f6a53d96d7 | 23,628 |
def Debug(message,
print_init_shape=True,
print_forward_shape=False,
print_inverse_shape=False,
compare_vals=False,
name='unnamed'):
# language=rst
"""
Help debug shapes
:param print_init_shape: Print the shapes
:param print_forward_shape: Print the shapes
:param print_inverse_shape: Print the shapes
:param compare_vals: Print the difference between the value of the forward pass and the reconstructed
"""
saved_val = None
def init_fun(key, input_shape, condition_shape):
if(print_init_shape):
print(message, 'input_shape', input_shape)
return name, input_shape, (), ()
def forward(params, state, log_px, x, condition, **kwargs):
if(print_forward_shape):
if(isinstance(x, tuple) or isinstance(x, list)):
print(message, 'x shapes', [_x.shape for _x in x], 'log_px shapes', [_x.shape for _x in log_px])
else:
print(message, 'x.shape', x.shape, 'log_px.shape', log_px.shape)
if(compare_vals):
nonlocal saved_val
saved_val = x
return log_px, x, state
def inverse(params, state, log_pz, z, condition, **kwargs):
if(print_inverse_shape):
if(isinstance(z, tuple) or isinstance(z, list)):
print(message, 'z shapes', [_z.shape for _z in z], 'log_pz shapes', [_z.shape for _z in log_pz])
else:
print(message, 'z.shape', z.shape, 'log_pz.shape', log_pz.shape)
if(compare_vals):
if(isinstance(z, tuple) or isinstance(z, list)):
print(message, 'jnp.linalg.norm(z - saved_val)', [jnp.linalg.norm(_z - _x) for _x, _z in zip(saved_val, z)])
else:
print(message, 'jnp.linalg.norm(z - saved_val)', jnp.linalg.norm(z - saved_val))
return log_pz, z, state
return init_fun, forward, inverse | 2054f8c56c853c3221a004a9721d22844b3e1e04 | 23,629 |
def bloated_nested_block(block_dets, *, repeat=False, **_kwargs):
"""
Look for long indented blocks under conditionals, inside loops etc that are
candidates for separating into functions to simplify the narrative of the
main code.
"""
bloated_outer_types = set()
included_if = False
for lbl, outer_xpath in OUTER_XPATHS.items():
if has_long_block(block_dets.element, outer_xpath):
bloated_outer_types.add(lbl)
if lbl == 'if':
included_if = True
if not bloated_outer_types:
return None
title = layout("""\
### Possibility of avoiding excessively long nested blocks
""")
summary_bits = []
for bloated_outer_type in bloated_outer_types:
summary_bits.append(layout(f"""\
The code has at least one long nested block under
`{bloated_outer_type}:`
"""))
summary = ''.join(summary_bits)
short_circuit_msg = layout("""\
#### Short-circuit and exit early
It may be possible to unnest the indented code block by exiting early if the
condition in the `if` expression is not met.
""")
short_circuit_demo_msg = (
layout("""
For example, instead of:
""")
+
layout("""\
if tall_enough:
## add to basketball team
line 1
line 2
line 3
...
line 30
logging.info("Finished!")
""", is_code=True)
+
layout("""\
we could possibly write:
""")
+
layout('''\
if not tall_enough:
return
## add to basketball team
line 1
line 2
line 3
...
line 30
logging.info("Finished!")
''', is_code=True)
)
move_to_func_msg = layout("""\
#### Shift to function
It may be possible to pull most of the nested code block into a function
which can be called instead.
""")
move_to_func_demo_msg = (
layout("""
For example, instead of:
""")
+
layout("""\
for name in names:
## contact name
line 1
line 2
line 3
...
line 30
logging.info("Finished!")
""", is_code=True)
+
layout("""\
we could possibly write:
""")
+
layout('''\
def contact(name):
"""
Contact person ...
"""
line 1
line 2
line 3
...
line 30
for name in names:
contact(name)
logging.info("Finished!")
''', is_code=True)
)
if not repeat:
brief_strategy = layout("""\
You might want to consider applying a strategy for avoiding
excessively long indented blocks:
""")
if included_if:
short_circuit = short_circuit_msg
short_circuit_demo = short_circuit_demo_msg
else:
short_circuit = ''
short_circuit_demo = ''
move_to_func = move_to_func_msg
move_to_func_demo = move_to_func_demo_msg
human = layout("""\
Computers can handle lots of nesting without malfunctioning. Human
brains are not so fortunate. As it says in The Zen of Python:
> "Flat is better than nested."
""")
else:
brief_strategy = ''
short_circuit = ''
short_circuit_demo = ''
move_to_func = ''
move_to_func_demo = ''
human = ''
message = {
conf.Level.BRIEF: (title + summary + brief_strategy + short_circuit
+ move_to_func),
conf.Level.MAIN: (title + summary + brief_strategy + short_circuit
+ short_circuit_demo + move_to_func + move_to_func_demo),
conf.Level.EXTRA: human,
}
return message | fc25529485c9725cf0de3fe5917299b084f499a3 | 23,630 |
from typing import Any
def _to_bytes(value: Any, type_str: str = "bytes32") -> bytes:
"""Convert a value to bytes"""
if isinstance(value, bool) or not isinstance(value, (bytes, str, int)):
raise TypeError(f"Cannot convert {type(value).__name__} '{value}' to {type_str}")
value = _to_hex(value)
if type_str == "bytes":
return eth_utils.to_bytes(hexstr=value)
if type_str == "byte":
type_str = "bytes1"
size = int(type_str.strip("bytes"))
if size < 1 or size > 32:
raise ValueError(f"Invalid type: {type_str}")
try:
return int(value, 16).to_bytes(size, "big")
except OverflowError:
raise OverflowError(f"'{value}' exceeds maximum length for {type_str}") | f324d915377cd281eacb25b3afbde7b83deedad1 | 23,631 |
def _map_sbs_sigs_back(df: pd.DataFrame) -> pd.Series:
"""
Map Back Single-Base Substitution Signatures.
-----------------------
Args:
* df: pandas.core.frame.DataFrame with index to be mapped
Returns:
* pandas.core.series.Series with matching indices to context96
"""
def _check_to_flip(x, ref):
if x in ref:
return x
else:
return compl(x)
if df.index.name is None: df.index.name = 'index'
df_idx = df.index.name
if ">" in df.index[0]:
# Already in arrow format
context_s = df.reset_index()[df_idx].apply(sbs_annotation_converter)
else:
# Already in word format
context_s = df.reset_index()[df_idx]
return context_s.apply(lambda x: _check_to_flip(x, context96.keys())) | d6a8843c80acdaf5320191af51cb40c8ce7e0d42 | 23,632 |
def rmsd(
coords1: np.ndarray,
coords2: np.ndarray,
atomicn1: np.ndarray,
atomicn2: np.ndarray,
center: bool = False,
minimize: bool = False,
atol: float = 1e-9,
) -> float:
"""
Compute RMSD
Parameters
----------
coords1: np.ndarray
Coordinate of molecule 1
coords2: np.ndarray
Coordinates of molecule 2
atomicn1: np.ndarray
Atomic numbers for molecule 1
atomicn2: np.ndarray
Atomic numbers for molecule 2
center: bool
Center molecules at origin
minimize: bool
Compute minimum RMSD (with QCP method)
atol: float
Absolute tolerance parameter for QCP method (see :func:`qcp_rmsd`)
Returns
-------
float
RMSD
Notes
-----
When `minimize=True`, the QCP method is used. [1]_ The molecules are
centred at the origin according to the center of geometry and superimposed
in order to minimize the RMSD.
.. [1] D. L. Theobald, *Rapid calculation of RMSDs using a quaternion-based
characteristic polynomial*, Acta Crys. A **61**, 478-480 (2005).
"""
assert np.all(atomicn1 == atomicn2)
assert coords1.shape == coords2.shape
# Center coordinates if required
c1 = utils.center(coords1) if center or minimize else coords1
c2 = utils.center(coords2) if center or minimize else coords2
if minimize:
rmsd = qcp.qcp_rmsd(c1, c2, atol)
else:
n = coords1.shape[0]
rmsd = np.sqrt(np.sum((c1 - c2) ** 2) / n)
return rmsd | e5f430d3ddb330c7bf61e0674c29cba3d6fadd7f | 23,633 |
def get_bridge_interfaces(yaml):
"""Returns a list of all interfaces that are bridgedomain members"""
ret = []
if not "bridgedomains" in yaml:
return ret
for _ifname, iface in yaml["bridgedomains"].items():
if "interfaces" in iface:
ret.extend(iface["interfaces"])
return ret | dad9e634a1c5306289e73d465b08b7ea857518e4 | 23,634 |
import os
import sys
def get_library_dirs():
"""
Return lists of directories likely to contain Arrow C++ libraries for
linking C or Cython extensions using pyarrow
"""
package_cwd = os.path.dirname(__file__)
library_dirs = [package_cwd]
if sys.platform == 'win32':
# TODO(wesm): Is this necessary, or does setuptools within a conda
# installation add Library\lib to the linker path for MSVC?
site_packages, _ = os.path.split(package_cwd)
python_base_install, _ = os.path.split(site_packages)
library_dirs.append(os.path.join(python_base_install,
'Library', 'lib'))
return library_dirs | 376b1dac450133fd92293c27d5043a2056aa8edb | 23,635 |
from typing import List
import tqdm
def get_entity_matched_docs(doc_id_map: List[str], data: List[dict]):
"""Gets the documents where the document name is contained inside the claim
Args:
doc_id_map (List[str]): A list of document names
data (List[dict]): One of the FEVEROUS datasets
Returns:
List[List[str]]: A list of lists of the related documents
"""
claims = [d["claim"] for d in data]
related_docs = []
for claim in tqdm(claims):
claim_docs = [doc_id for doc_id in doc_id_map if doc_id in claim]
claim_docs = [doc for doc in claim_docs if len(doc) > 3]
related_docs.append(claim_docs)
return related_docs | dd49d58bd2a4dc4eed06e16d5673c85bf1ed8b73 | 23,636 |
import requests
def getTemplateKeys(k):
"""
Prints out templates key for license or gitignore templates from github api
Params: str
Return: code
"""
code = 0
if k.lower() == "license":
r = requests.get(GITHUB_LICENSE_API)
if r.status_code != 200:
code = 1
print("Github LICENSE template keys: ")
for item in r.json():
print(item["key"])
elif k.lower() == "git":
r = requests.get(GITHUB_GITIGNORE_API)
if r.status_code != 200:
code = 1
print("Github .gitignore template keys: ")
for item in r.json():
print(item)
else:
print("Invalid argument for --get-template-keys! : options [git, license]")
code = 2
return code | 641e6aeb599fb206214530b55faea44be7de7d37 | 23,637 |
def get_num_conv2d_layers(model, exclude_downsample=True, include_linear=True):
""" Check the number of Conv2D layers. """
num = 0
for n, m in model.named_modules():
if "downsample" in n and exclude_downsample:
continue
if is_conv2d(m) or (include_linear and isinstance(m, nn.Linear)):
num += 1
return num | 79d1453f4cc49d358329a7d59fdd07bcdbb97736 | 23,638 |
def im_list_to_blob(ims, RGB, NIR, DEPTH):
"""Convert a list of images into a network input.
Assumes images are already prepared (means subtracted, BGR order, ...).
"""
max_shape = np.array([im.shape for im in ims]).max(axis=0)
num_images = len(ims)
if RGB & NIR & DEPTH:
blob = np.zeros((num_images, max_shape[0], max_shape[1], 5),
dtype=np.float32)
elif (RGB & NIR) | (RGB & DEPTH) | (NIR & DEPTH):
blob = np.zeros((num_images, max_shape[0], max_shape[1], 4),
dtype=np.float32)
else:
blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
dtype=np.float32)
for i in xrange(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
return blob | 96036933eddd742b9db4e211188c1716933d37dc | 23,639 |
async def async_setup_entry(hass, config_entry, async_add_devices):
"""Set up entry."""
miniserver = get_miniserver_from_config_entry(hass, config_entry)
loxconfig = miniserver.lox_config.json
devices = []
for switch_entity in get_all_switch_entities(loxconfig):
if switch_entity["type"] in ["Pushbutton", "Switch"]:
switch_entity.update(
{
"room": get_room_name_from_room_uuid(
loxconfig, switch_entity.get("room", "")
),
"cat": get_cat_name_from_cat_uuid(
loxconfig, switch_entity.get("cat", "")
),
}
)
new_push_button = LoxoneSwitch(**switch_entity)
devices.append(new_push_button)
elif switch_entity["type"] == "TimedSwitch":
switch_entity.update(
{
"room": get_room_name_from_room_uuid(
loxconfig, switch_entity.get("room", "")
),
"cat": get_cat_name_from_cat_uuid(
loxconfig, switch_entity.get("cat", "")
),
}
)
new_push_button = LoxoneTimedSwitch(**switch_entity)
devices.append(new_push_button)
elif switch_entity["type"] == "Intercom":
if "subControls" in switch_entity:
for sub_name in switch_entity["subControls"]:
subcontol = switch_entity["subControls"][sub_name]
_ = subcontol
_.update(
{
"name": "{} - {}".format(
switch_entity["name"], subcontol["name"]
)
}
)
_.update(
{
"room": get_room_name_from_room_uuid(
loxconfig, switch_entity.get("room", "")
)
}
)
_.update(
{
"cat": get_cat_name_from_cat_uuid(
loxconfig, switch_entity.get("cat", "")
)
}
)
new_push_button = LoxoneIntercomSubControl(**_)
devices.append(new_push_button)
async_add_devices(devices, True)
return True | 1cd4114645b7454c371bc23a13e212e2ae9f8173 | 23,640 |
import torch
def gradU_from_momenta(x, p, y, sigma):
"""
strain F'(x) for momenta p defined at control points y
a method "convolve_gradient" is doing a similar job but only compute (gradF . z)
x (M, D)
p (N, D)
y (N, D)
return
gradU (M, D, D)
"""
kern = deformetrica.support.kernels.factory("torch", gpu_mode=False, kernel_width=sigma)
# move tensors with respect to gpu_mode
t_x = torch.tensor(x, device="cpu")
t_y = torch.tensor(y, device="cpu")
t_p = torch.tensor(p, device="cpu")
# A = exp(-(x_i - y_j)^2/(ker^2)).
sq = kern._squared_distances(t_x, t_y)
A = torch.exp(-sq / (sigma ** 2)) # M, N
# B = -2/(ker^2) * (x_i - y_j)*exp(-(x_i - y_j)^2/(ker^2)).
B = (-2/(sigma ** 2)) * kern._differences(t_x, t_y) * A # (D, M, N)
res = torch.matmul(B, t_p) # (D, M, D)
return np.array(res.transpose(0,1)) | 03dc67bf8bc6b8a576b1ed96de841003bcb53383 | 23,641 |
def process(seed, K):
"""
K is model order / number of zeros
"""
print(K, end=" ")
# create the dirac locations with many, many points
rng = np.random.RandomState(seed)
tk = np.sort(rng.rand(K)*period)
# true zeros
uk = np.exp(-1j*2*np.pi*tk/period)
coef_poly = poly.polyfromroots(uk) # more accurate than np.poly
# estimate zeros
uk_hat = np.roots(np.flipud(coef_poly))
# place on unit circle?
uk_hat_unit = uk_hat / np.abs(uk_hat)
# compute error
min_dev_norm = distance(uk, uk_hat)[0]
_err_roots = 20*np.log10(np.linalg.norm(uk)/min_dev_norm)
min_dev_norm = distance(uk, uk_hat_unit)[0]
_err_unit = 20*np.log10(np.linalg.norm(uk)/min_dev_norm)
return _err_roots, _err_unit | 544d5116cf5ef3a2bff08253ee697d4a04994a2e | 23,642 |
def _gen_sieve_array(M, factor_base):
"""Sieve Stage of the Quadratic Sieve. For every prime in the factor_base
that doesn't divide the coefficient `a` we add log_p over the sieve_array
such that ``-M <= soln1 + i*p <= M`` and ``-M <= soln2 + i*p <= M`` where `i`
is an integer. When p = 2 then log_p is only added using
``-M <= soln1 + i*p <= M``.
Parameters:
===========
M : sieve interval
factor_base : factor_base primes
"""
sieve_array = [0]*(2*M + 1)
for factor in factor_base:
if factor.soln1 is None: #The prime does not divides a
continue
for idx in range((M + factor.soln1) % factor.prime, 2*M, factor.prime):
sieve_array[idx] += factor.log_p
if factor.prime == 2:
continue
#if prime is 2 then sieve only with soln_1_p
for idx in range((M + factor.soln2) % factor.prime, 2*M, factor.prime):
sieve_array[idx] += factor.log_p
return sieve_array | 98a8e5bedaa56dbe53aa8a152c20a015d7b3556d | 23,643 |
def yolo_eval_weighted_nms(yolo_outputs,
anchors,
num_classes,
image_shape,
score_threshold=.6):
""" yolo evaluate
Args:
yolo_outputs: [batch, 13, 13, 3*85]
anchors: [9, 2]
num_classes: num of your own classes
image_shape: the shape of original image
score_threshold: when score > score threshold, the anchor is positive
Returns:
boxes_, scores_, classes_
"""
num_layers = len(yolo_outputs)
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
input_shape = K.shape(yolo_outputs[0])[1:3] * 32
boxes = []
box_scores = []
for l in range(num_layers):
_boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],
anchors[anchor_mask[l]],
num_classes,
input_shape,
image_shape,
l)
boxes.append(_boxes)
box_scores.append(_box_scores)
boxes = K.concatenate(boxes, axis=0)
box_scores = K.concatenate(box_scores, axis=0)
mask = box_scores >= score_threshold
boxes_ = []
scores_ = []
classes_ = []
for c in range(num_classes):
# get positive anchors by using box_scores >= score_threshold
class_boxes = tf.boolean_mask(boxes, mask[:, c])
class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
classes = K.ones_like(class_box_scores, 'int32') * c
boxes_.append(class_boxes)
scores_.append(class_box_scores)
classes_.append(classes)
return boxes_, scores_, classes_ | 7066f2dbb4908709a3d762443385376d44d7f9f6 | 23,644 |
def next_coach_id():
"""
Generates the next id for newly added coaches, since their slugs (which combine the id and name fields)
are added post-commit.
"""
c = Coach.objects.aggregate(Max("id"))
return c['id__max']+1 | 55be7f6411685b391e9130bd9248588f3d0d8ffc | 23,645 |
def get_unsigned_short(data, index):
"""Return two bytes from data as an unsigned 16-bit value"""
return (data[index+1] << 8) + data[index] | 9e3b7dc30eaedb99edfb35b944442d7386ad8f9e | 23,646 |
def getObjDetRoI(imgSize, imgPatchSize, objx1, objy1, objx2, objy2):
"""
Get region of interest (ROI) for a given object detection with respect to image and image patch boundaries.
:param imgSize: size of the image of interest (e.g., [1920x1080]).
:param imgPatchSize: Patch size of the image patch of interest (e.g., 192).
:param objx1: Upper left x coordinate of the object detection.
:param objy1: Upper left y coordinate of the object detection.
:param objx2: Lower right x coordinate of the object detection.
:param objy2: Lower right y coordinate of the object detection.
"""
# Cast to float values for calculations
startX = float(objx1);
startY = float(objy1);
endX = float(objx2);
endY = float(objy2);
# Ensure image and image patch boundaries
xRange = endX - startX;
yRange = endY - startY;
addX = (imgPatchSize - (xRange % imgPatchSize));
addY = (imgPatchSize - (yRange % imgPatchSize));
endX = endX + addX;
endY = endY + addY;
if endX > imgSize[1]:
endX = imgSize[1]
if endY > imgSize[0]:
endY = imgSize[0]
return startX, startY, endX, endY | 2feedb9a5f79c24d0fda4eaa9b8db5bd6922b4ce | 23,647 |
def sigma_pp(b):
"""pair production cross section"""
return (
sigma_T
* 3.0
/ 16.0
* (1 - b ** 2)
* (2 * b * (b ** 2 - 2) + (3 - b ** 4) * np.log((1 + b) / (1 - b)))
) | a3745b5f39e71c5f5713e3d7e0c7fbdb53146d15 | 23,648 |
def compute_radii_simple(distances):
"""
Compute the radius for every hypersphere given the pairwise distances
to satisfy Eq. 6 in the paper. Does not implement the heuristic described
in section 3.5.
"""
n_inputs = tf.shape(distances)[1]
sorted_distances = tf.sort(distances, direction="ASCENDING", axis=-1)
median_index = n_inputs // 2
radii = sorted_distances[:, median_index]
return radii | a935bbe6539c32d9de87b80dbaa6314152979b07 | 23,649 |
def data_resolution_and_offset(data, fallback_resolution=None):
"""Compute resolution and offset from x/y axis data.
Only uses first two coordinate values, assumes that data is regularly
sampled.
Returns
=======
(resolution: float, offset: float)
"""
if data.size < 2:
if data.size < 1:
raise ValueError("Can't calculate resolution for empty data")
if fallback_resolution is None:
raise ValueError("Can't calculate resolution with data size < 2")
res = fallback_resolution
else:
res = (data[data.size - 1] - data[0]) / (data.size - 1.0)
res = res.item()
off = data[0] - 0.5 * res
return res, off.item() | 9cb5a14ff5be8509509e67b1576146231258583b | 23,650 |
from typing import List
def get_changed_files_committed_and_workdir(
repo: Git, commithash_to_compare: str
) -> List[str]:
"""Get changed files between given commit and the working copy"""
return repo.repo.git.diff("--name-only", commithash_to_compare).split() | 1696c3bc41084db5d260bc1ddd7811dd9f143586 | 23,651 |
from typing import Optional
from typing import Any
def load_document_by_string(
string: str, uri: str, loadingOptions: Optional[LoadingOptions] = None
) -> Any:
"""Load a CWL object from a serialized YAML string."""
yaml = yaml_no_ts()
result = yaml.load(string)
return load_document_by_yaml(result, uri, loadingOptions) | 1750f0df653f155e112b3cbb363e4ee499f76ab6 | 23,652 |
import re
def rename_symbol(symbol):
"""Rename the given symbol.
If it is a C symbol, prepend FLAGS.rename_string to the symbol, but
account for the symbol possibly having a prefix via split_symbol().
If it is a C++ symbol, prepend FLAGS.rename_string to all instances of the
given namespace.
Args:
symbol: C or C++ symbol to rename.
Returns:
Dictionary, keys = old symbols, values = renamed symbols.
"""
new_renames = {}
if is_cpp_symbol(symbol):
# Scan through the symbol looking for the namespace name, then modify it.
new_symbol = symbol
if FLAGS.platform in ["linux", "android", "darwin", "ios"]:
for ns in FLAGS.hide_cpp_namespaces:
if symbol_includes_cpp_namespace(symbol, ns):
# Linux and Darwin: To rename "namespace" to "prefixnamespace",
# change all instances of "9namespace" to "15prefixnamespace".
# (the number is the length of the namespace name)
# See https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling
new_ns = FLAGS.rename_string + ns
new_symbol = re.sub("(?<=[^0-9])%d%s" % (len(ns), ns),
"%d%s" % (len(new_ns), new_ns), new_symbol)
new_renames[symbol] = new_symbol
elif FLAGS.platform == "windows":
for ns in FLAGS.hide_cpp_namespaces:
if symbol_includes_cpp_namespace(symbol, ns):
# Windows: To rename "namespace" to "prefixnamespace",
# change all instances of "@namespace@@" to "@prefixnamespace@@".
# See https://msdn.microsoft.com/en-us/library/56h2zst2.aspx
new_ns = FLAGS.rename_string + ns
new_symbol = re.sub("@%s@@" % ns, "@%s@@" % new_ns, new_symbol)
new_renames[symbol] = new_symbol
else:
if FLAGS.platform == "windows" and symbol.startswith("$LN"):
# Don't rename $LN*, those are local symbols.
return new_renames
# C symbol. Just split, rename, and re-join.
(prefix, remainder) = split_symbol(symbol)
new_symbol = prefix + FLAGS.rename_string + remainder
new_renames[symbol] = new_symbol
for added_prefix in _additional_symbol_prefixes.get(FLAGS.platform, []):
new_renames[added_prefix + symbol] = new_renames[symbol]
return new_renames | 4c2291e3c604157df1f4d8f1f4e3b7a1277ceee2 | 23,653 |
from datetime import datetime
def py_time(data):
""" returns a python Time
"""
if '.' in data:
return datetime.datetime.strptime(data, '%H:%M:%S.%f').time()
else:
return datetime.datetime.strptime(data, '%H:%M:%S').time() | 53f1bb601ab08e06f67b759fdc9f41820ea0ff20 | 23,654 |
def create_empty_copy(G,with_nodes=True):
"""Return a copy of the graph G with all of the edges removed.
Parameters
----------
G : graph
A NetworkX graph
with_nodes : bool (default=True)
Include nodes.
Notes
-----
Graph, node, and edge data is not propagated to the new graph.
"""
H=G.__class__()
if with_nodes:
H.add_nodes_from(G)
return H | aea151473bd9f11b4e0cdfdf2ac4a689a1b5af49 | 23,655 |
def trim_resize_frame(frame, resize_ratio, trim_factor):
"""
Resize a frame according to specified ratio while
keeping original the original aspect ratio, then trim
the longer side of the frame according to specified
factor.
Parameters
----------
frame: np.array
The input frame
resize_ratio: float
Resize factor.
trim_factor: float
Trim factor for the longer side of the frame. Must
be btw 0 and 1.
Returns
----------
np.array
Resized and trimmed frame.
"""
frame = cv2.resize(
frame, dsize=(0,0), fx=resize_ratio, fy=resize_ratio)
__hor_longer, __l = (
True, frame.shape[1] if frame.shape[1] > frame.shape[0]
else (False, frame.shape[0]))
__t = int(__l * trim_factor)
__i = int((max(__l-__t, 0))/2)
if __hor_longer:
frame = frame[:,__i:__i+__t,:]
else:
frame = frame[__i:__i+__t,:,:]
return frame | 55569da6aad4b24ef367828a2ce3353048f27ae9 | 23,656 |
def copy_doclist(doclist, no_copy = []):
"""
Save & return a copy of the given doclist
Pass fields that are not to be copied in `no_copy`
"""
cl = []
# main doc
c = Document(fielddata = doclist[0].fields.copy())
# clear no_copy fields
for f in no_copy:
if c.fields.has_key(f):
c.fields[f] = None
c.name = None
c.save(1)
cl.append(c)
# new parent name
parent = c.name
# children
for d in doclist[1:]:
c = Document(fielddata = d.fields.copy())
c.name = None
# clear no_copy fields
for f in no_copy:
if c.fields.has_key(f):
c.fields[f] = None
c.parent = parent
c.save(1)
cl.append(c)
return cl | 73e6554696abce1d94ace2b50cb8a28b0563fb30 | 23,657 |
def set_from_tags(tags, title, description, all=True):
"""all=True means include non-public photos"""
user = flickr.test_login()
photos = flickr.photos_search(user_id=user.id, auth=all, tags=tags)
set = flickr.Photoset.create(photos[0], title, description)
set.editPhotos(photos)
return set | 14e30d7334c75d29eccaf7957f53dadc164aedf0 | 23,658 |
import os
from datetime import datetime
def submit():
"""Upload local file.
Needs to follow the station register template.
"""
spec = get_layout_active_spec('Upload')
if request.method == 'POST':
filename = os.path.join(
app.config['UPLOAD_FOLDER'],
datetime.date.today().strftime('%y%m%d'),
request.form.get('uploaded_file')
)
if filename:
return render_template('upload_file.html',
active_spec=spec,
connect_to_reg=True)
return render_template('upload_file.html', active_spec=spec) | 75b9af46dc067977425c9714b13e9f516ded6bca | 23,659 |
def heg_kfermi(rs):
""" magnitude of the fermi k vector for the homogeneous electron gas (HEG)
Args:
rs (float): Wigner-Seitz radius
Return:
float: kf
"""
density = (4*np.pi*rs**3/3)**(-1)
kf = (3*np.pi**2*density)**(1./3)
return kf | 4f210939ee7ec3c591c33ae7ec1b688ce2a257c6 | 23,660 |
import requests
import json
def stock_em_jgdy_detail():
"""
东方财富网-数据中心-特色数据-机构调研-机构调研详细
http://data.eastmoney.com/jgdy/xx.html
:return: 机构调研详细
:rtype: pandas.DataFrame
"""
url = "http://datainterface3.eastmoney.com/EM_DataCenter_V3/api/JGDYMX/GetJGDYMX"
params = {
"js": "datatable8174128",
"tkn": "eastmoney",
"secuCode": "",
"dateTime": "",
"sortfield": "0",
"sortdirec": "1",
"pageNum": "1",
"pageSize": "5000",
"cfg": "jgdymx",
"_": "1605088363693",
}
r = requests.get(url, params=params)
data_json = json.loads(r.text[r.text.find("(")+1:-1])
temp_df = pd.DataFrame([item.split("|") for item in data_json["Data"][0]["Data"]])
temp_df.columns = data_json["Data"][0]["FieldName"].split(",") + ["_"]
temp_df = temp_df.iloc[:, :-1]
return temp_df | 5d161ef69a77243202e48d80743c6664d8487549 | 23,661 |
def intersect_with_grid(int_coords, fill=False):
"""
Args:
- int_coords: projected coordinates to be used for intersection
- fill: whether to include the interior of the intersected cells. I.e.
if the coords of a box are provided and intersect with 0,0 and 4,4,
this would include the entire 25-cell grid
Returns:
GeoDataFrame with three columns:
- x: x coordinate of NDFD grid. A higher x seems to move down, towards the south?
- y: y coordinate of NDFD grid. A higher y seems to move right, towards the east?
- geometry: geometry of grid cell (reprojected back into WGS84)
"""
grid_path = create_grid()
with rasterio.Env(), rasterio.open(grid_path) as src:
intersected_cells = set()
for int_coord in int_coords:
intersected_cells.add(src.index(*int_coord))
if fill:
intersected_cells = fill_cells(intersected_cells)
# For each of the cells, generate its box
cell_boxes = []
for x, y in list(intersected_cells):
cell_boxes.append([
x, y, box(*src.xy(x, y, 'll'), *src.xy(x, y, 'ur'))])
grid = gpd.GeoDataFrame(
cell_boxes, columns=['x', 'y', 'geometry'], crs=constants.crs)
return grid.to_crs(epsg=4326) | 460faccf0280749f96b34e676a936cf8a39d4b61 | 23,662 |
def safe_epsilon_softmax(epsilon, temperature):
"""Tolerantly handles the temperature=0 case."""
egreedy = epsilon_greedy(epsilon)
unsafe = epsilon_softmax(epsilon, temperature)
def sample_fn(key: Array, logits: Array):
return jax.lax.cond(temperature > 0,
(key, logits), lambda tup: unsafe.sample(*tup),
(key, logits), lambda tup: egreedy.sample(*tup))
def probs_fn(logits: Array):
return jax.lax.cond(temperature > 0,
logits, unsafe.probs,
logits, egreedy.probs)
def log_prob_fn(sample: Array, logits: Array):
return jax.lax.cond(temperature > 0,
(sample, logits), lambda tup: unsafe.logprob(*tup),
(sample, logits), lambda tup: egreedy.logprob(*tup))
def entropy_fn(logits: Array):
return jax.lax.cond(temperature > 0,
logits, unsafe.entropy,
logits, egreedy.entropy)
def kl_fn(p_logits: Array, q_logits: Array):
return categorical_kl_divergence(p_logits, q_logits, temperature)
return DiscreteDistribution(sample_fn, probs_fn, log_prob_fn, entropy_fn,
kl_fn) | cf9d09dcd82638c526fb9508161181af6452dad5 | 23,663 |
def get_object_from_controller(object_type, object_name, controller_ip, username, password, tenant):
"""
This function defines that it get the object from controller or raise
exception if object status code is less than 299
:param uri: URI to get the object
:param controller_ip: ip of controller
:param username: usename of controller
:param password: password of controller
:param tenant: tenant of controller
:return: response status_code and content
"""
# Create new session
session = ApiSession.get_session(controller_ip, username,
password=password, tenant=tenant)
try:
resp = session.get_object_by_name(object_type, object_name)
return resp
except:
raise Exception("Failed get %s" % object_name, exc_info=True) | 590107e0106b87faa4fc228b6225e2317047ec19 | 23,664 |
from typing import DefaultDict
def scale_reshaping(scale: np.ndarray,
op2d: common.BaseNode,
kernel_channel_mapping: DefaultDict,
in_channels: bool = True) -> np.ndarray:
"""
Before scaling a kernel, the scale factor needs is reshaped to the correct
dimensions. This is a function of the layer that is scaled and whether its input channels or
output channels that should be scaled.
The index of the correct kernel index is obtained from kernel_channel_mapping.
Args:
scale: Scale factor to scale the kernel channels by.
op2d: Node to scale its kernel.
kernel_channel_mapping: Mapping from a layer to a tuple of indices of its output/input kernel channels.
in_channels: Kernel's index of input channels.
Returns:
The scale factor after reshaping it to the correct shape.
"""
op_ndims = op2d.get_weights_by_keys(KERNEL).ndim
reshape_target = np.ones(op_ndims, dtype=np.int)
reshape_target[kernel_channel_mapping.get(op2d.type)[int(in_channels)]] = -1
return np.reshape(scale, reshape_target) | edaa0ecbfc172f0a8a32a7bcc70629f1b51b3f57 | 23,665 |
import os
def process_metadata(split_name, caption_data, image_dir):
"""Process the captions and combine the data into a list of ImageMetadata.
Args:
split_name: A train/test/val split name.
caption_data: caption file containing caption annotations.
image_dir: Directory containing the image files.
Returns:
A list of ImageMetadata.
"""
print("Processing image-text...")
id_to_captions = {}
image_metadata = []
num_captions = 0
count = 0
for img in caption_data:
count += 1
label = img['id']
filename = os.path.join(image_dir, img['file_path'])
print(filename)
if not os.path.exists(filename):
continue;
assert os.path.exists(filename)
captions = img['processed_tokens']
id_to_captions.setdefault(label, [])
id_to_captions[label].append(captions)
split = img['split']
assert split == split_name
image_metadata.append(ImageMetadata(label, filename, captions, split))
num_captions += len(captions)
if len(captions) > 2:
print("index %d with %d captions" % (count, len(captions)))
num_examples = len(caption_data)
num_classes = len(id_to_captions)
print("Finished processing %d captions for %d images of %d identities in %s" %
(num_captions, num_examples, num_classes, split_name))
# Write out the data preparation information.
output_file = '%s/%s_data_info.txt' % (FLAGS.output_dir, split_name)
with tf.gfile.FastGFile(output_file, "w") as f:
f.write("Finished processing %d captions for %d images of %d identities in %s." %
(num_captions, num_examples, num_classes, split_name))
return image_metadata | 240b789b3164059765d5cfc3b0e9cda6f532fac6 | 23,666 |
def add_new_exif(info):
"""
创建exif记录(从表)
:param info:
:return:
"""
return ExifInfo(make=info.get('Image Make'),
model=info.get('Image Model'),
orientation=info.get('Image Orientation'),
date_original=info.get('EXIF DateTimeOriginal'),
x_resolution=info.get('Image XResolution'),
y_resolution=info.get('Image YResolution'),
resolution_unit=info.get('Image ResolutionUnit'),
artist=info.get('Image Artist'),
copyright=info.get('Image Copyright'),
software=info.get('Image Software'),
img_length=info.get('EXIF ExifImageLength'),
img_width=info.get('EXIF ExifImageWidth'),
exposure_time=info.get('EXIF ExposureTime'),
exposure_program=info.get('EXIF ExposureProgram'),
exposure_bias=info.get('EXIF ExposureBiasValue'),
exposure_mode=info.get('EXIF ExposureMode'),
fnumber=info.get('EXIF FNumber'),
sensitivity=info.get('EXIF ISOSpeedRatings'),
metering_mode=info.get('EXIF MeteringMode'),
flash=info.get('EXIF Flash'),
focal_len=info.get('EXIF FocalLength'),
white_balance=info.get('EXIF WhiteBalance'),
gps_latitude_ref=info.get('GPS GPSLatitudeRef'),
gps_latitude=info.get('GPS GPSLatitude'),
gps_longitude_ref=info.get('GPS GPSLongitudeRef'),
gps_longitude=info.get('GPS GPSLongitude'),
gps_altitude=info.get('GPS GPSAltitude'),
gps_datetime=info.get('GPS GPSDatetime'),
gps_direction=info.get(''),
gps_pos_err=info.get('')) | 55122efc1ef612b769be30a1e0735e237e12ab29 | 23,667 |
def prefetch_input_data(reader,
file_pattern,
is_training,
batch_size,
values_per_shard,
input_queue_capacity_factor=16,
num_reader_threads=1,
shard_queue_name="filename_queue",
value_queue_name="input_queue"):
"""Prefetches string values from disk into an input queue.
In training the capacity of the queue is important because a larger queue
means better mixing of training examples between shards. The minimum number of
values kept in the queue is values_per_shard * input_queue_capacity_factor,
where input_queue_memory factor should be chosen to trade-off better mixing
with memory usage.
Args:
reader: Instance of tf.ReaderBase.
file_pattern: Comma-separated list of file patterns (e.g.
/tmp/train_data-?????-of-00100).
is_training: Boolean; whether prefetching for training or eval.
batch_size: Model batch size used to determine queue capacity.
values_per_shard: Approximate number of values per shard.
input_queue_capacity_factor: Minimum number of values to keep in the queue
in multiples of values_per_shard. See comments above.
num_reader_threads: Number of reader threads to fill the queue.
shard_queue_name: Name for the shards filename queue.
value_queue_name: Name for the values input queue.
Returns:
A Queue containing prefetched string values.
"""
data_files = []
for pattern in file_pattern.split(","):
data_files.extend(tf.gfile.Glob(pattern))
if not data_files:
tf.logging.fatal("Found no input files matching %s", file_pattern)
else:
tf.logging.info("Prefetching values from %d files matching %s",
len(data_files), file_pattern)
if is_training:
filename_queue = tf.train.string_input_producer(
data_files, shuffle=True, capacity=16, name=shard_queue_name)
min_queue_examples = values_per_shard * input_queue_capacity_factor
capacity = min_queue_examples + 100 * batch_size
values_queue = tf.RandomShuffleQueue(
capacity=capacity,
min_after_dequeue=min_queue_examples,
dtypes=[tf.string],
name="random_" + value_queue_name)
else:
"""
num_epochs: If specified, string_input_producer produces each string
from string_tensor num_epochs times before generating an OutOfRange error.
If not specified, string_input_producer can cycle through the strings in
string_tensor an unlimited number of times.
"""
filename_queue = tf.train.string_input_producer(
data_files, num_epochs=None, shuffle=False, capacity=1, name=shard_queue_name)
capacity = values_per_shard + 3 * batch_size
values_queue = tf.FIFOQueue(
capacity=capacity, dtypes=[tf.string], name="fifo_" + value_queue_name)
enqueue_ops = []
for _ in range(num_reader_threads):
_, value = reader.read(filename_queue)
enqueue_ops.append(values_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(
values_queue, enqueue_ops))
tf.summary.scalar(
"queue/%s/fraction_of_%d_full" % (values_queue.name, capacity),
tf.cast(values_queue.size(), tf.float32) * (1. / capacity))
return values_queue | b754c1163cb868214e9ab74e1ae127a794a04808 | 23,668 |
def Chat_(request):
"""
{
"value" : "Your query"
}
"""
print(request.data)
serializer = PatternSerializer(request.data)
try:
response = ChatBot(serializer.data["value"])
except:
response = {
"error": "Data is in wrong formate use { 'value' : 'Your query' }",
"response": None,
"tag": None
}
return Response(response) | 33cada0ccbbea0e65d01179d51e5f1ed28f498bd | 23,669 |
def get_solubility(molecular_weight, density):
"""
Estimate the solubility of each oil pseudo-component
Estimate the solubility (mol/L) of each oil pseudo-component using the
method from Huibers and Lehr given in the huibers_lehr.py module of
py_gnome in the directory gnome/utilities/weathering/. This method is from
Huibers & Katrisky in a 2012 EPA report and was further modified by Lehr
to better match measured values. The equation used here is adapted to
return results in mol/L.
Parameters
----------
molecular_weight : np.array
Molecular weights of each pseudo-component as recorded in the NOAA
Oil Library (g/mol)
density : np.array
Density of each pseudo-component as recorded in the NOAA Oil Library
(kg/m^3)
Returns
-------
solubility : np.array
Array of solubilities (mol/L) for each pseudo-component of the oil.
"""
return 46.4 * 10. ** (-36.7 * molecular_weight / density) | 64a951e8a6d9579cf934893fe5c9bc0a9181d4cc | 23,670 |
def build_1d_frp_matrix(func, x, sigma, B=1):
""" Builds quadratic frp matrix respecting pbc.
func: Kernel function
x: position of points
sigma: width of Kernel
"""
N = len(x)
A = np.zeros((N, N))
shifts = np.arange(-5, 6) * B
for r in range(N):
for p in range(N):
value = 0
for shift in shifts:
value += func(x[r] - x[p] + shift, sigma[r])
A[r, p] = value
return A | cc2d2d51935847cc01aacb2afe5c42ad19c91fe8 | 23,671 |
def invalid_item(item_key, valid_flag=False):
"""
Update item valid_flag.
"""
if kind.str_is_empty(item_key):
raise RequiredError("item_key")
query = Registry.all()
query.filter("item_key =", item_key)
query.set("valid_flag", valid_flag)
return query.update(context.get_user_id()) | a99408dd770be0f8eb2e3c899b8d51160359b4fa | 23,672 |
def ret_str() -> str:
"""
# blahs
blahs
# blahs
Returns
-------
"""
# blahs
# blahs
# blahs
return '' | 56c182f971ff38444f5cc04fa1ea537ebbc3cb5f | 23,673 |
from typing import Union
def get_wh_words(document: Union[Doc, Span]):
"""
Get the list of WH-words\n
- when, where, why\n
- whence, whereby, wherein, whereupon\n
- how\n
- what, which, whose\n
- who, whose, which, what\n
Resources:\n
- https://grammar.collinsdictionary.com/easy-learning/wh-words\n
- https://www.ling.upenn.edu/hist-corpora/annotation/pos-wh.htm
:param document: The parsed document
:return: The list of WH-words
"""
return list([token for token in document if token.tag_ in ['WDT', 'WP', 'WP$', 'WRB']]) | a3dd46902bf161358239a5613c5037dfe4e831ff | 23,674 |
def sample_mixture_gaussian(batch_size, p_array, mu_list, sig_list, k=K, d=DIM):
"""
samples from a mixture of normals
:param batch_size: sample size
:param p_array: np array which includes probability for each component of mix
:param mu_list: list of means of each component
:param sig_list: list of covariance matrices of each component
:return: samples from mixture
"""
if hasattr(mu_list[0], "__len__"):
d = len(mu_list[0]) # dimension of distribution
else:
d = 1
k = len(mu_list) # number of mixtures
dataset = np.zeros([batch_size, d])
rh = np.random.choice(range(k), p=p_array, size=batch_size)
for i in range(batch_size):
if d > 1:
dataset[i, :] = np.random.multivariate_normal(mean=mu_list[rh[i]], cov=sig_list[rh[i]])
else:
dataset[i, :] = np.random.randn() * sig_list[rh[i]] + mu_list[rh[i]]
return dataset | 80374ed474ccb284a0cdb5efb63e44652318f0a2 | 23,675 |
def sign(x: float) -> float:
"""Return the sign of the argument. Zero returns zero."""
if x > 0:
return 1.0
elif x < 0:
return -1.0
else:
return 0.0 | 5998061fcb57ef0133c6ccd56e1ad79a31b06732 | 23,676 |
import numpy
def CalculateLocalDipoleIndex(mol):
"""
Calculation of local dipole index (D)
"""
GMCharge.ComputeGasteigerCharges(mol, iter_step)
res = []
for atom in mol.GetAtoms():
res.append(float(atom.GetProp('_GasteigerCharge')))
cc = [numpy.absolute(res[x.GetBeginAtom().GetIdx()] - res[x.GetEndAtom().GetIdx()]) for x in mol.GetBonds()]
B = len(mol.GetBonds())
return round(sum(cc) / B, 3) | f4e1f0cd0130cc1e94430eac2df910946f4e98d0 | 23,677 |
def tile1(icon="", **kw):
"""<!-- Tile with icon, icon can be font icon or image -->"""
ctx=[kw['tile_label']]
s = span(cls="icon %s" % icon)
ctx.append(s)
d2 = div(ctx=ctx, cls="tile-content iconic")
return d2 | fdcdecbc81733ae6b615cf5db5bce60585337efe | 23,678 |
from typing import Optional
def config_server(sender_email:str, sender_autorization_code:str, smtp_host: Optional[str] = None, smtp_port: Optional[int] = None, timeout=10):
"""
smtp server configuration
:param sender_email: sender's email
:param sender_autorization_code: sender's smtp authorization code
:param smtp_host: smtp host address
:param smtp_port: smtp host port
:param timeout: timeout
:return: smtp server object
"""
assert isinstance(sender_email, str), "sender_email should be given a string"
assert isinstance(sender_autorization_code, str), "sender_authorization_code should be given a string"
s = server(sender_email, sender_autorization_code, smtp_host=smtp_host, smtp_port=smtp_port, timeout=timeout)
if s.smtp_able():
print("server config success")
return s
else:
raise SMTPConfigException | f93b9efff8e8f415242bb9dbb5e09529baa1e238 | 23,679 |
def try_to_import_file(file_name):
"""
Tries to import the file as Python module. First calls
import_file_as_package() and falls back to import_file_as_module(). If
fails, keeps silent on any errors and returns the occured exceptions.
:param file_name: The path to import.
:return: The loaded module or tuple of length 2 with the exceptions.
"""
try:
return import_file_as_package(file_name)
except Exception as e1:
try:
return import_file_as_module(file_name)
except Exception as e2:
return e1, e2 | 15ab5c695bb7801b894c4466994abbb9f4ad791a | 23,680 |
def is_uppervowel(char: str) -> bool:
"""
Checks if the character is an uppercase Irish vowel (aeiouáéíóú).
:param char: the character to check
:return: true if the input is a single character, is uppercase, and is an Irish vowel
"""
vowels = "AEIOUÁÉÍÓÚ"
return len(char) == 1 and char[0] in vowels | 14e87fc53fbb31c2a1ba66d17082be533ef8c5a9 | 23,681 |
from typing import Optional
def visualize_permutation_results(
obs_r2: float,
permuted_r2: np.ndarray,
verbose: bool = True,
permutation_color: str = "#a6bddb",
output_path: Optional[str] = None,
show: bool = True,
close: bool = False,
) -> float:
"""
Parameters
----------
obs_r2 : float
Denotes the r2 value obtained using `x2_array` to predict `x1_array`,
given `z_array` if it was not None.
permuted_r2 : 1D np.ndarray
Should have length `num_permutations`. Each element denotes the r2
attained using a permuted version of `x2_array` to predict `x1_array`,
given `z_array` if it was not None.
verbose : optional, bool.
Denotes whether or not the p-value of the permutation test will be
printed to the stdout. Default == True.
permutation_color : optional, str.
Denotes the color of the kernel density estimate used to visuale the
distribution of r2 from the permuted values of `x2_array`.
Default == '#a6bddb'.
output_path : optional, str or None.
Denotes the path to the location where the plot visualizing the
permutation test results will be stored. If `output_path` is None, the
plot will not be stored. Default is None.
show : optional, bool.
Denotes whether the matplotlib figure that visualizes the results of
the permutation test should be shown. Default == True.
close : optional, bool.
Denotes whether the matplotlib figure that visualizes the results of
the permutation test should be closed. Default == False.
Returns
-------
p_value : float.
The p-value of the visual permutation test, denoting the percentage of
times that the r2 with permuted `x2_array` was greater than the r2 with
the observed `x2_array`.
"""
fig, ax = plt.subplots(figsize=(10, 6))
p_value = (obs_r2 < permuted_r2).mean()
if verbose:
msg = "The p-value of the permutation independence test is {:.2f}."
print(msg.format(p_value))
sbn.kdeplot(permuted_r2, ax=ax, color=permutation_color, label="Simulated")
v_line_label = "Observed\np-val: {:0.3f}".format( # noqa: F522
p_value, precision=1
)
ax.vlines(
obs_r2,
ax.get_ylim()[0],
ax.get_ylim()[1],
linestyle="dashed",
color="black",
label=v_line_label,
)
ax.set_xlabel(r"$r^2$", fontsize=13)
ax.set_ylabel(
"Density", fontdict={"fontsize": 13, "rotation": 0}, labelpad=40
)
ax.legend(loc="best")
sbn.despine()
if output_path is not None:
fig.savefig(output_path, dpi=500, bbox_inches="tight")
if show:
plt.show()
if close:
plt.close(fig=fig)
return p_value | cfdf84fd78cd54b39eb6db9b0af799a230a294c8 | 23,682 |
def htmlmovie(html_index_fname,pngfile,framenos,figno):
#=====================================
"""
Input:
pngfile: a dictionary indexed by (frameno,figno) with value the
corresponding png file for this figure.
framenos: a list of frame numbers to include in movie
figno: integer with the figure number for this movie.
Returns:
text for an html file that incorporates javascript to loop through the
plots one after another.
New 6/7/10: The html page also has buttons for controlling the movie.
The parameter iterval below is the time interval between loading
successive images and is in milliseconds.
The img_width and img_height parameters do not seem to have any effect.
"""
text = """
<html>
<head>
<script language="Javascript">
<!---
var num_images = %s; """ % len(framenos)
text += """
var img_width = 800;
var img_height = 600;
var interval = 300;
var images = new Array();
function preload_images()
{
t = document.getElementById("progress");
"""
i = 0
for frameno in framenos:
i = i+1
text += """
t.innerHTML = "Preloading image ";
images[%s] = new Image(img_width, img_height);
images[%s].src = "%s";
""" % (i,i,pngfile[frameno,figno])
text += """
t.innerHTML = "";
}
function tick()
{
frame += 1;
if (frame > num_images+1)
frame = 1;
document.movie.src = images[frame].src;
tt = setTimeout("tick()", interval);
}
function startup()
{
preload_images();
frame = 1;
document.movie.src = images[frame].src;
}
function rewind()
{
frame = 1;
document.movie.src = images[frame].src;
}
function start()
{
tt = setTimeout("tick()", interval);
}
function pause()
{
clearTimeout(tt);
}
function restart()
{
tt = setTimeout("tick()", interval);
}
function slower()
{
interval = interval / 0.7;
}
function faster()
{
interval = interval * 0.7;
}
// --->
</script>
</head>
<body onLoad="startup();">
<form>
<input type="button" value="Start movie" onClick="start()">
<input type="button" value="Pause" onClick="pause()">
<input type="button" value="Rewind" onClick="rewind()">
<input type="button" value="Slower" onClick="slower()">
<input type="button" value="Faster" onClick="faster()">
<a href="%s">Plot Index</a>
</form>
<p><div ID="progress"></div></p>
<img src="%s" name="movie"/>
</body>
</html>
""" % (html_index_fname,pngfile[framenos[0],figno])
return text
# end of htmlmovie | 7be1cf8ffce35e51667a67f322fbf038f396e817 | 23,683 |
from pathlib import Path
import re
import io
def readin_q3d_matrix_m(path: str) -> pd.DataFrame:
"""Read in Q3D cap matrix from a .m file exported by Ansys Q3d.
Args:
path (str): Path to .m file
Returns:
pd.DataFrame of cap matrix, with no names of columns.
"""
text = Path(path).read_text()
match = re.findall(r'capMatrix (.*?)]', text, re.DOTALL)
if match:
match = match[0].strip('= [').strip(']').strip('\n')
dfC = pd.read_csv(io.StringIO(match),
skipinitialspace=True,
header=None)
return dfC | 35a79ff4697ba1df3b2c1754d8b28064b459201f | 23,684 |
def get_DB(type='mysql'):
"""
Parameters
----------
type
Returns
-------
"""
if type == 'mysql':
return MySQLAdapter
elif type == 'mongodb':
return MongoAdapter | 07a3f0c1fcac691855f616e2e96d5ab947ca7be3 | 23,685 |
def movstd(x,window):
""" Computes the moving standard deviation for a 1D array. Returns
an array with the same length of the input array.
Small window length provides a finer description of deviation
Longer window coarser (faster to compute).
By default, each segment is centered, going L/2 to L/2-1 around Ai.
Parameters
----------
x : input numpy array (1D)
window : integer for the evaluation window,
Returns
-------
1d vector of standard deviations
"""
if not type(x)==np.ndarray:
x=np.array(x)
if window%2:
window=window-1
win2 = np.floor(window/2)
N=len(x)
y=np.full(N,medx)
for ii in np.arange(win2,N-win2+1,window):
try:
idx=(np.arange(-win2,win2)+ii).astype(np.int)
y[idx] = np.nanstd(x[idx])
except:
pass
return y | e9c4bc43f92d6d22c8191d1d15b93a51aadef32c | 23,686 |
def get_attribute(parent, selector, attribute, index=0):
"""Get the attribute value for the child element of parent matching the given CSS selector
If index is specified, return the attribute value for the matching child element with the specified zero-based index; otherwise, return the attribute value for the first matching child element.
If selector is None, return the attribute value for parent instead.
"""
if selector is None:
return parent.get(attribute)
else:
values = get_attributes(parent, selector, attribute)
if (index < 0 and len(values) >= abs(index)) or (index >= 0 and len(values) > index):
return values[index] | fff9ec0a30dd00431164c69f5ba3430ec09f804a | 23,687 |
def rdd_plot(
data, x_variable, y_variable, nbins=20, ylimits=None, frac=None, width=20.1, deg=1
):
""" Plots a Regression Discontinouity Design graph. For this, binned
observations are portrayed in a scatter plot.
Uses non-parametric regression (local polynomial estimation) to fit a curve
on the original observations.
Args:
data: contains DataFrame that contains the data to plot (DataFrame)
x_var: determines variable on x-axis, passed as the column name (string)
y_var: determines variable on y_axis, passed as the column name (string)
nbins: defines number of equally sized bins (int)
ylimits: A tuple specifying the limits of the y axis (tuple)
width: Bandwidth for the local polynomial estimation
deg: degree of the polynomial to be estimated
Returns:
Returns the RDD Plot
"""
# Find min and max of the running variable
x_var, y_var = data.loc[:, x_variable], data.loc[:, y_variable]
x_min = int(round(x_var.min()))
x_max = int(round(x_var.max()))
x_width = int(round(((abs(x_min) + abs(x_max)) / nbins)))
# Get a list of t uples with borders of each bin.
bins = []
for b in range(x_min, x_max, x_width):
bins.append((b, b + x_width))
# Find bin for a given value
def find_bin(value, bins):
for count, b in enumerate(bins):
# Bins generally follow the structure [lower_bound, upper_bound),
# thus do not include the upper bound.
if count < len(bins) - 1:
if (value >= bins[count][0]) & (value < bins[count][1]):
bin_number = count
# The last bin, however, includes its upper bound.
else:
if (value >= bins[count][0]) & (value <= bins[count][1]):
bin_number = count
return bin_number
# Sort running data into bins
x_bin = np.zeros(len(x_var))
i = 0
for value in x_var.values:
x_bin[i] = find_bin(value, bins)
i += 1
# Write data needed for the plot into a DataFrame
df = pd.DataFrame(data={"x_variable": x_var, "y_variable": y_var, "x_bin": x_bin})
# For each bin calculate the mean of affiliated values on the y-axis.
y_bin_mean = np.zeros(len(bins))
for n, b in enumerate(bins):
affiliated_y_values = df.loc[x_bin == n]
y_bin_mean[n] = affiliated_y_values.y_variable.mean()
# For the x-axis take the mean of the bounds of each bin.
x_bin_mean = np.zeros(len(bins))
i = 0
for e, t in enumerate(bins):
x_bin_mean[i] = (bins[e][0] + bins[e][1]) / 2
i += 1
# Draw the actual plot for all bins of the running variable and their
# affiliated mean in the y-variable.
plt.scatter(x=x_bin_mean, y=y_bin_mean, s=50, c="black", alpha=1)
plt.axvline(x=0)
if ~(ylimits == None):
plt.ylim(ylimits)
plt.grid()
# Implement local polynomial regression, calculate fitted values as well as
# estimated betas.
# This is estimated seperatly for the untreadted state 0 and the treated state 1
df0 = pd.DataFrame(
data={
"x0": data.loc[data[x_variable] < 0][x_variable],
"y0": data.loc[data[x_variable] < 0][y_variable],
}
).sort_values(by="x0")
df0["y0_hat"] = localreg(
x=df0["x0"].to_numpy(),
y=df0["y0"].to_numpy(),
degree=deg,
kernel=tricube,
frac=frac,
width=width,
)["y"]
for i in range(deg + 1):
df0["beta_hat_" + str(i)] = localreg(
x=df0["x0"].to_numpy(),
y=df0["y0"].to_numpy(),
degree=deg,
kernel=tricube,
frac=frac,
width=width,
)["beta"][:, i]
df1 = pd.DataFrame(
data={
"x1": data.loc[data[x_variable] > 0][x_variable],
"y1": data.loc[data[x_variable] > 0][y_variable],
}
).sort_values(by="x1")
df1["y1_hat"] = localreg(
x=df1["x1"].to_numpy(),
y=df1["y1"].to_numpy(),
degree=deg,
kernel=tricube,
frac=frac,
width=width,
)["y"]
for i in range(deg + 1):
df1["beta_hat_" + str(i)] = localreg(
x=df1["x1"].to_numpy(),
y=df1["y1"].to_numpy(),
degree=deg,
kernel=tricube,
frac=frac,
width=width,
)["beta"][:, i]
# Calculate local standard errors
y0_se = local_se(df=df0, kernel=tricube, deg=deg, width=width)
y1_se = local_se(df=df1, kernel=tricube, deg=deg, width=width)
# Calculate confidence intervals
# TODO: This certainly would be faster if I would not use dictionaries!
y0_upper_ci = np.empty(len(df0["y0"]))
y0_lower_ci = np.empty(len(df0["y0"]))
y1_upper_ci = np.empty(len(df1["y1"]))
y1_lower_ci = np.empty(len(df1["y1"]))
for count, element in enumerate(df0["x0"].array):
y0_upper_ci[count] = df0["y0_hat"].iloc[count] + 1.96 * y0_se[str(element)]
for count, element in enumerate(df0["x0"].array):
y0_lower_ci[count] = df0["y0_hat"].iloc[count] - 1.96 * y0_se[str(element)]
for count, element in enumerate(df1["x1"].array):
y1_upper_ci[count] = df1["y1_hat"].iloc[count] + 1.96 * y1_se[str(element)]
for count, element in enumerate(df1["x1"].array):
y1_lower_ci[count] = df1["y1_hat"].iloc[count] - 1.96 * y1_se[str(element)]
# Plot the RDD-Graph
# fittet lines
plt.plot(df0.x0, df0.y0_hat, color="r")
plt.plot(df1.x1, df1.y1_hat, color="r")
plt.plot(df0.x0, y0_upper_ci, color="black")
plt.plot(df0.x0, y0_lower_ci, color="black")
plt.plot(df1.x1, y1_upper_ci, color="black")
plt.plot(df1.x1, y1_lower_ci, color="black")
# Plot the RDD-Graph
# fittet lines
plt.plot(df0.x0, df0.y0_hat, color="r")
plt.plot(df1.x1, df1.y1_hat, color="r")
# labels
plt.title(label="Figure 5: Regression Discontinuity Design Plot")
plt.xlabel("Binned margin of victory")
plt.ylabel("Normalized rank improvement")
plt.show
return | 10d0624c3a734cd097c3c23850675b7ad013837a | 23,688 |
import torch
def cal_head_bbox(kps, image_size):
"""
Args:
kps (torch.Tensor): (N, 19, 2)
image_size (int):
Returns:
bbox (torch.Tensor): (N, 4)
"""
NECK_IDS = 12 # in cocoplus
kps = (kps + 1) / 2.0
necks = kps[:, NECK_IDS, 0]
zeros = torch.zeros_like(necks)
ones = torch.ones_like(necks)
# min_x = int(max(0.0, np.min(kps[HEAD_IDS:, 0]) - 0.1) * image_size)
min_x, _ = torch.min(kps[:, NECK_IDS:, 0] - 0.05, dim=1)
min_x = torch.max(min_x, zeros)
max_x, _ = torch.max(kps[:, NECK_IDS:, 0] + 0.05, dim=1)
max_x = torch.min(max_x, ones)
# min_x = int(max(0.0, np.min(kps[HEAD_IDS:, 0]) - 0.1) * image_size)
min_y, _ = torch.min(kps[:, NECK_IDS:, 1] - 0.05, dim=1)
min_y = torch.max(min_y, zeros)
max_y, _ = torch.max(kps[:, NECK_IDS:, 1], dim=1)
max_y = torch.min(max_y, ones)
min_x = (min_x * image_size).long() # (T,)
max_x = (max_x * image_size).long() # (T,)
min_y = (min_y * image_size).long() # (T,)
max_y = (max_y * image_size).long() # (T,)
rects = torch.stack((min_x, max_x, min_y, max_y), dim=1)
return rects | 546b4d4fcf756a75dd588c85ab467c21e9f45550 | 23,689 |
def my_json_render(docs, style="dep", options=None, manual=False) -> list:
"""
Render nlp visualisation.
Args:
docs (list or Doc): Document(s) to visualise.
style (unicode): Visualisation style, 'dep' or 'ent'.
options (dict): Visualiser-specific options, e.g. colors.
manual (bool): Don't parse `Doc` and
instead expect a dict/list of dicts.
Returns:
[{'text': '近一周饮食不当,一度腹泻,日3次,泻下后精神疲烦,时有低热,怕风,口干,痰中夹有血丝,左侧胸痛时作',
'ents': [{'start': 20, 'end': 24, 'label': 'ZZ'},
{'start': 25, 'end': 27, 'label': 'CD'},
{'start': 27, 'end': 29, 'label': 'ZZ'},
{'start': 30, 'end': 32, 'label': 'ZZ'},
{'start': 33, 'end': 35, 'label': 'ZZ'},
{'start': 36, 'end': 42, 'label': 'ZZ'}],
'title': None, 'settings': {'lang': 'zh', 'direction': 'ltr'}}]
"""
if options is None:
options = {}
factories = {
"dep": (DependencyRenderer, parse_deps),
"ent": (EntityRenderer, parse_ents),
}
if style not in factories:
raise ValueError(Errors.E087.format(style=style))
if isinstance(docs, (Doc, Span, dict)):
docs = [docs]
docs = [obj if not isinstance(obj, Span) else obj.as_doc() for obj in docs]
if not all(isinstance(obj, (Doc, Span, dict)) for obj in docs):
raise ValueError(Errors.E096)
renderer, converter = factories[style]
renderer = renderer(options=options)
parsed = [converter(doc, options) for doc in docs] if not manual else docs
return parsed | a19068ae0c9e4eb89e810f378ccc8d5fbd14547a | 23,690 |
import json
def get_testcase_chain(testcase_id, case_type, chain_list=None, with_intf_system_name=None, with_extract=None,
only_first=False, main_case_flow_id=None, childless=False):
"""
根据testcase_id获取调用链, 包含接口用例和全链路用例
return example:
[
{
"preCaseId": 1,
"preCaseName": "指定手机获取验证码",
"preCaseType": "接口用例",
"preIntfName": "接口描述-/url/api"
},
{
"preCaseId": 27,
"preCaseName": "新户申请钱包",
"preCaseType": "全链路用例"
},
{
"preCaseId": 2,
"preCaseName": "登录",
"preCaseType": "接口用例"
}
]
"""
if not chain_list:
chain_list = []
# 调用链最大长度保护
if len(chain_list) >= 100:
return chain_list
if case_type == 1:
tc_obj = ApiTestcaseInfoManager.get_testcase(id=testcase_id)
if tc_obj:
if with_intf_system_name:
intf_obj = ApiIntfInfoManager.get_intf(id=tc_obj.api_intf_id)
system_obj = ApiSystemInfoManager.get_system(id=intf_obj.api_system_id)
chain_row_dic = {
"preCaseName": '{0}__{1}'.format(tc_obj.testcase_name, tc_obj.expect_result),
"preCaseId": tc_obj.id,
"preCaseType": get_desc_by_case_type(case_type),
"preIntfName": '{0}-{1}'.format(intf_obj.intf_desc, intf_obj.intf_name),
"preSystemName": system_obj.system_name
}
if with_extract:
# 解析出用例中提取变量
extract_v_names = get_extract_v_names(testcase_id)
public_v_names = get_public_v_names(tc_obj)
chain_row_dic.update({"extract_v_names": extract_v_names, "public_v_names": public_v_names})
chain_list.insert(0, chain_row_dic)
else:
chain_row_dic = {
"preCaseName": '{0}__{1}'.format(tc_obj.testcase_name, tc_obj.expect_result),
"preCaseId": tc_obj.id,
"preCaseType": get_desc_by_case_type(case_type),
}
if with_extract:
# 解析出用例中提取变量
extract_v_names = get_extract_v_names(testcase_id)
public_v_names = get_public_v_names(tc_obj)
chain_row_dic.update({"extract_v_names": extract_v_names, "public_v_names": public_v_names})
chain_list.insert(0, chain_row_dic)
if childless:
chain_list[0]['hasChildren'] = False
return chain_list
setup_case_list = json.loads(tc_obj.setup_case_list) if tc_obj.setup_case_list else []
setup_case_list.reverse()
if setup_case_list:
if only_first:
chain_list[0]['hasChildren'] = True
return chain_list
else:
# 继续递归查询前置
for setup_case_str in setup_case_list:
setup_case_type, setup_case_id, option = parse_setup_case_str(setup_case_str)
kwargs = {
'chain_list': chain_list,
'with_intf_system_name': with_intf_system_name,
'with_extract': with_extract
}
if setup_case_type == 1:
if option == 'self':
kwargs['childless'] = True
elif setup_case_type == 2:
kwargs['main_case_flow_id'] = option
chain_list = get_testcase_chain(setup_case_id, setup_case_type, **kwargs)
# setup_case_type, setup_case_id, setup_case_flow_id = parse_setup_case_str(setup_case_str)
# chain_list = get_testcase_chain(
# setup_case_id, setup_case_type, chain_list=chain_list,
# with_intf_system_name=with_intf_system_name, with_extract=with_extract,
# main_case_flow_id=setup_case_flow_id
# )
else:
if only_first:
chain_list[0]['hasChildren'] = False
return chain_list
return chain_list
elif case_type == 2:
tm_obj = ApiTestcaseMainManager.get_testcase_main(id=testcase_id)
if tm_obj:
chain_list.insert(
0,
{
"preCaseName": '{0}__{1}'.format(tm_obj.testcase_name, tm_obj.expect_result),
"preCaseId": tm_obj.id,
"preCaseType": get_desc_by_case_type(case_type),
"preIntfName": '',
"preSystemName": '',
"customFlowId": None,
"customFlowName": ''
}
)
if only_first:
chain_list[0]['hasChildren'] = False
if main_case_flow_id:
flow_obj = ApiTestcaseMainCustomFlowManager.get_flow(id=main_case_flow_id)
if flow_obj:
chain_list[0]['customFlowName'] = flow_obj.flow_name
chain_list[0]['customFlowId'] = flow_obj.id
return chain_list | 92892c432a46287559c41fe9d1b5fb11dec35e86 | 23,691 |
from typing import Iterable
def approximate_parameter_profile(
problem: Problem,
result: Result,
profile_index: Iterable[int] = None,
profile_list: int = None,
result_index: int = 0,
n_steps: int = 100,
) -> Result:
"""
Calculate profiles based on an approximation via a normal likelihood
centered at the chosen optimal parameter value, with the covariance matrix
being the Hessian or FIM.
Parameters
----------
problem:
The problem to be solved.
result:
A result object to initialize profiling and to append the profiling
results to. For example, one might append more profiling runs to a
previous profile, in order to merge these.
The existence of an optimization result is obligatory.
profile_index:
List with the profile indices to be computed
(by default all of the free parameters).
profile_list:
Integer which specifies whether a call to the profiler should create
a new list of profiles (default) or should be added to a specific
profile list.
result_index:
Index from which optimization result profiling should be started
(default: global optimum, i.e., index = 0).
n_steps:
Number of profile steps in each dimension.
Returns
-------
result:
The profile results are filled into `result.profile_result`.
"""
# Handling defaults
# profiling indices
if profile_index is None:
profile_index = problem.x_free_indices
# create the profile result object (retrieve global optimum) or append to
# existing list of profiles
global_opt = initialize_profile(problem, result, result_index,
profile_index, profile_list)
# extract optimization result
optimizer_result = result.optimize_result.list[result_index]
# extract values of interest
x = optimizer_result.x
fval = optimizer_result.fval
hess = problem.get_reduced_matrix(optimizer_result.hess)
# ratio scaling factor
ratio_scaling = np.exp(global_opt - fval)
# we need the hessian - compute if not provided or fishy
if hess is None or np.isnan(hess).any():
logger.info("Computing Hessian/FIM as not available in result.")
hess = problem.objective(
problem.get_reduced_vector(x), sensi_orders=(2,))
# inverse of the hessian
sigma = np.linalg.inv(hess)
# the steps
xs = np.linspace(problem.lb_full, problem.ub_full, n_steps).T
# loop over parameters for profiling
for i_par in profile_index:
# not requested or fixed -> compute no profile
if i_par in problem.x_fixed_indices:
continue
i_free_par = problem.full_index_to_free_index(i_par)
ys = multivariate_normal.pdf(xs[i_par], mean=x[i_par],
cov=sigma[i_free_par, i_free_par])
fvals = - np.log(ys)
ratios = ys / ys.max() * ratio_scaling
profiler_result = ProfilerResult(
x_path=xs,
fval_path=fvals,
ratio_path=ratios
)
result.profile_result.set_profiler_result(
profiler_result=profiler_result,
i_par=i_par, profile_list=profile_list)
return result | 478a95b370360c18a808e1753a8ad60f6a7b1bb7 | 23,692 |
def _process_input(data, context):
""" pre-process request input before it is sent to
TensorFlow Serving REST API
Args:
data (obj): the request data, in format of dict or string
context (Context): object containing request and configuration details
Returns:
(dict): a JSON-serializable dict that contains request body and headers
"""
if context.request_content_type == 'application/json':
data = data.read().decode("utf-8")
return data if len(data) else ''
raise ValueError('{{"error": "unsupported content type {}"}}'.format(
context.request_content_type or "unknown"
)) | 05d48d327613df156a5a3b6ec76e6e5023fa54ca | 23,693 |
from ibmsecurity.appliance.ibmappliance import IBMError
def update_policies(isamAppliance, name, policies, action, check_mode=False, force=False):
"""
Update a specified policy set's policies (add/remove/set)
Note: Please input policies as an array of policy names (it will be converted to id's)
"""
pol_id, update_required, json_data = _check_policies(isamAppliance, name, policies, action)
if pol_id is None:
raise IBMError("999", "Cannot update data for unknown policy set: {0}".format(name))
if force is True or update_required is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put(
"Update a specified policy set",
"{0}/{1}/policies{2}".format(uri, pol_id, tools.create_query_string(action=action)), json_data)
return isamAppliance.create_return_object() | 666fd658f8d6748f8705a098b0f773f3fa758bbe | 23,694 |
def is_bullish_engulfing(previous: Candlestick, current: Candlestick) -> bool:
"""Engulfs previous candle body. Wick and tail not included"""
return (
previous.is_bearish
and current.is_bullish
and current.open <= previous.close
and current.close > previous.open
) | ab46a10009368cbb057ddf79ee9eda56ab862169 | 23,695 |
import math
def yaw_cov_to_quaternion_cov(yaw, yaw_covariance):
"""Calculate the quaternion covariance based on the yaw and yaw covariance.
Perform the operation :math:`C_{\\theta} = R C_q R^T`
where :math:`C_{\\theta}` is the yaw covariance,
:math:`C_q` is the quaternion covariance and :math:`R` is
the Jacobian of the transform from yaw to a quaternion.
:math:`R` will be a collumn vector defined by:
.. math::
R = \\\\
\\frac{dx}{d\\theta} &= 0, \\\\
\\frac{dy}{d\\theta} &= 0, \\\\
\\frac{dz}{d\\theta} &= \\frac{1}{2} \\cos \\frac{1}{2} \\theta, \\\\
\\frac{dw}{d\\theta} &= -\\frac{1}{2} \\sin \\frac{1}{2} \\theta, \\\\
:param yaw: Yaw of the vehicle in radians
:type quat: float
:return: The yaw covariance transformed to quaternion coordinates.
:rtype: 4x4 numpy array
"""
R = np.c_[0,
0,
0.5 * math.cos(yaw * 0.5),
-0.5 * math.sin(yaw * 0.5)].T
quat_covariance = R.dot(yaw_covariance).dot(R.T)
return quat_covariance | f98a7b996ea290f735214704d592c5926ca4d07f | 23,696 |
import logging
async def token(req: web.Request) -> web.Response:
"""Auth endpoint."""
global nonce, user_eppn, user_family_name, user_given_name
id_token = {
"at_hash": "fSi3VUa5i2o2SgY5gPJZgg",
"sub": "smth",
"eduPersonAffiliation": "member;staff",
"eppn": user_eppn,
"displayName": f"{user_given_name} {user_family_name}",
"iss": "http://mockauth:8000",
"schacHomeOrganizationType": "urn:schac:homeOrganizationType:test:other",
"given_name": user_given_name,
"nonce": nonce,
"aud": "aud2",
"acr": "http://mockauth:8000/LoginHaka",
"nsAccountLock": "false",
"eduPersonScopedAffiliation": "staff@test.what;member@test.what",
"auth_time": 1606579533,
"name": f"{user_given_name} {user_family_name}",
"schacHomeOrganization": "test.what",
"exp": 9999999999,
"iat": 1561621913,
"family_name": user_family_name,
"email": user_eppn,
}
data = {"access_token": "test", "id_token": jwt.encode(header, id_token, jwk_pair[1]).decode("utf-8")}
logging.info(data)
return web.json_response(data) | 771d21043a1185a7a6b4bd34fda5ae78ad45d51e | 23,697 |
def split_train_test(observations, train_percentage):
"""Splits observations into a train and test set.
Args:
observations: Observations to split in train and test. They can be the
representation or the observed factors of variation. The shape is
(num_dimensions, num_points) and the split is over the points.
train_percentage: Fraction of observations to be used for training.
Returns:
observations_train: Observations to be used for training.
observations_test: Observations to be used for testing.
"""
num_labelled_samples = observations.shape[1]
num_labelled_samples_train = int(
np.ceil(num_labelled_samples * train_percentage))
num_labelled_samples_test = num_labelled_samples - num_labelled_samples_train
observations_train = observations[:, :num_labelled_samples_train]
observations_test = observations[:, num_labelled_samples_train:]
assert observations_test.shape[1] == num_labelled_samples_test, \
"Wrong size of the test set."
return observations_train, observations_test | 8b6aa5896c5ae8fc72414e707013248fcb320d88 | 23,698 |
def InitF11(frame):
"""F6 to navigate between regions
:param frame: see InitShorcuts->param
:type frame: idem
:return: entrie(here tuple) for AcceleratorTable
:rtype: tuple(int, int, int)
"""
frame.Bind(wx.EVT_MENU, frame.shell.SetFocus, id=wx.ID_SHELL_FOCUS)
return (wx.ACCEL_NORMAL, wx.WXK_F11, wx.ID_SHELL_FOCUS) | 055852664e48154768353af109ec1be533a7ad4a | 23,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.