content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def create_work_subdirs(subdirs: List[str]):
"""Create |subdirs| in work directory."""
for subdir in subdirs:
os.mkdir(os.path.join(experiment_utils.get_work_dir(), subdir))
| 18,800
|
def test_from_rsid(rsids, start_rsid):
"""Continue collecting publications for rsids in list, beginning with start_rsid
Args:
rsids (list): list of rsids to collect publications on
start_rsid (str): rsid identifier to resume collecting publications on
Returns:
runtime_rsids (list): [start_rsid, onward...]
start_rsid (str): starting rsid
start_idx (str): starting rsid index
rsids (list): [original list of ALL rsids]
"""
start_idx = rsids.index(start_rsid) # start_rsid index
print(f"STARTING POINT SET TO: | INDEX: {start_idx} / {len(rsids)} | RSID: {rsids[start_idx]}")
runtime_rsids = rsids[start_idx:] # runtime rsids
return runtime_rsids, start_rsid, start_idx, rsids
| 18,801
|
def _average_scada(times, values, nvalues):
"""
Function which down samples scada values.
:param times: Unix times of the data points.
:param values: Corresponding sensor value
:param nvalues: Number of samples we average over.
:return: new time values and
"""
if len(times) % nvalues:
nsamples = (len(times) // nvalues) - 1
else:
nsamples = (len(times) // nvalues)
res = np.zeros(nsamples, dtype=np.float32)
new_times = np.zeros(nsamples, dtype=np.int64)
for ind in range(nsamples):
res[ind] = np.mean(values[ind * nvalues:(ind + 1) * nvalues])
new_times[ind] = np.mean(times[ind * nvalues:(ind + 1) * nvalues])
return new_times, res
| 18,802
|
def main():
"""
Test harness
"""
def game_factory():
"""
Creates the game we need
"""
return Maze(Layout.from_string(Layout.MEDIUM_STR))
bot_factory = PlannedBot
trainer = BotTrainer(game_factory, bot_factory, 16, 2, goal_score=13)
start_time = time()
generations, result = trainer.breed_best_bot()
end_time = time()
msg = 'After {} generations, the bot {} the game'.format(
generations, 'won' if result.finished else 'lost')
print msg
print 'Elapsed time:', int(end_time - start_time + 0.5), 'seconds'
print 'Bot score:', result.score
print 'Bot plan:', result.player.moves
| 18,803
|
def behavior_of(classname):
"""
Finds and loads the behavior class for C++ (decoded) classname or returns
None if there isn't one.
Behaviors do not have a required base class, and they may be used with
Awkward Array's ``ak.behavior``.
The search strategy for finding behavior classes is:
1. Translate the ROOT class name from C++ to Python with
:py:func:`~uproot4.model.classname_encode`. For example,
``"ROOT::RThing"`` becomes ``"Model_ROOT_3a3a_RThing"``.
2. Look for a submodule of ``uproot4.behaviors`` without
the ``"Model_"`` prefix. For example, ``"ROOT_3a3a_RThing"``.
3. Look for a class in that submodule with the fully encoded
name. For example, ``"Model_ROOT_3a3a_RThing"``.
See :py:mod:`uproot4.behaviors` for details.
"""
name = classname_encode(classname)
assert name.startswith("Model_")
name = name[6:]
if name not in globals():
if name in behavior_of._module_names:
exec(
compile(
"import uproot4.behaviors.{0}".format(name), "<dynamic>", "exec"
),
globals(),
)
module = eval("uproot4.behaviors.{0}".format(name))
behavior_cls = getattr(module, name, None)
if behavior_cls is not None:
globals()[name] = behavior_cls
return globals().get(name)
| 18,804
|
def test_feed_from_annotations_with_3_annotations():
"""If there are 3 annotations it should return 3 entries."""
annotations = [factories.Annotation(), factories.Annotation(),
factories.Annotation()]
feed = rss.feed_from_annotations(
annotations, _annotation_url(), mock.Mock(), '', '', '')
assert len(feed['entries']) == 3
| 18,805
|
def _abs_user_path(fpath):
"""don't overload the ap type"""
return os.path.abspath(os.path.expanduser(fpath))
| 18,806
|
def save_tmp(config):
"""Save the configuration to a temp file.
The global state of ReachMaster is tracked with a temp
file. Whenever a child window of the main application is
created or destroyed, the temp file is updated to reflect
whatever changes in settings may have occurred. If the
user forgets to save the configuration file at the end of
a session, the temp file can be used as a backup. It is
saved to the temp folder in the ReachMaster root directory.
**Warning: temp file is reset to defaults as a new session
is started!**
Parameters
----------
config : dict
The currently loaded configuration.
"""
configPath = "./temp/"
if not os.path.isdir(configPath):
os.makedirs(configPath)
fn = configPath + 'tmp_config.txt'
with open(fn, 'w') as outfile:
json.dump(config, outfile, indent=4)
| 18,807
|
def adjust(data):
"""Calculate mean of list of values and subtract the mean of every element
in the list, making a new list.
Returns tuple of mean, list of adjusted values
"""
mu = mean(data)
return mu, map(lambda x: (x-mu), data)
| 18,808
|
def clear():
"""
Clears the world, and then returns the cleared representation
"""
myWorld.clear()
return jsonify(myWorld.world())
| 18,809
|
def handle_evap_mode(j,brivis_status):
"""Parse evap part of JSON."""
# pylint: disable=too-many-branches,too-many-statements
cfg = get_attribute(j[1].get("ECOM"),"CFG",None)
if not cfg:
# Probably an error
_LOGGER.error("No CFG - Not happy, Jan")
else:
if y_n_to_bool(get_attribute(cfg, "ZAIS", None)):
brivis_status.heater_status.zones.append("A")
if y_n_to_bool(get_attribute(cfg, "ZBIS", None)):
brivis_status.heater_status.zones.append("B")
if y_n_to_bool(get_attribute(cfg, "ZCIS", None)):
brivis_status.heater_status.zones.append("C")
if y_n_to_bool(get_attribute(cfg, "ZDIS", None)):
brivis_status.heater_status.zones.append("D")
gso = get_attribute(j[1].get("ECOM"),"GSO",None)
if not gso:
_LOGGER.error("No GSO here")
else:
#_LOGGER.debug("Looking at: {}".format(gso))
switch = get_attribute(gso,"SW", None)
if switch == "N":
opmode = get_attribute(gso, "OP", None)
#_LOGGER.debug("setting opmode: {}".format(opmode))
brivis_status.evap_status.set_mode(opmode)
_LOGGER.debug("EVAP is ON")
brivis_status.system_on = True
brivis_status.evap_status.evap_on = True
if opmode == "M":
# Evap is on and manual - what is the fan speed
evap_fan = get_attribute(gso,"FS",None)
_LOGGER.debug("Fan is: %s", evap_fan)
brivis_status.evap_status.set_fan(evap_fan)
fan_speed = get_attribute(gso,"FL",None)
_LOGGER.debug("Fan Speed is: %s", fan_speed)
brivis_status.evap_status.set_fan_speed(int(fan_speed))
water_pump = get_attribute(gso,"PS",None)
_LOGGER.debug("Water Pump is: %s", water_pump)
brivis_status.evap_status.set_water_pump(water_pump)
brivis_status.evap_status.zone_a = y_n_to_bool(get_attribute(gso,"ZAUE",False))
brivis_status.evap_status.zone_b = y_n_to_bool(get_attribute(gso,"ZBUE",False))
brivis_status.evap_status.zone_c = y_n_to_bool(get_attribute(gso,"ZCUE",False))
brivis_status.evap_status.zone_d = y_n_to_bool(get_attribute(gso,"ZDUE",False))
else:
# Evap is on and auto - look for comfort level
comfort = get_attribute(gso, "SP", 0)
_LOGGER.debug("Comfort Level is: %s", comfort)
brivis_status.evap_status.set_comfort(comfort)
brivis_status.evap_status.zone_a = False
brivis_status.evap_status.zone_b = False
brivis_status.evap_status.zone_c = False
brivis_status.evap_status.zone_d = False
gss = get_attribute(j[1].get("ECOM"),"GSS",None)
if not gss:
_LOGGER.error("No GSS here")
else:
brivis_status.evap_status.common_auto = y_n_to_bool(get_attribute(gss,"ZUAE",False))
brivis_status.evap_status.zone_a_auto = y_n_to_bool(get_attribute(gss,"ZAAE",False))
brivis_status.evap_status.zone_b_auto = y_n_to_bool(get_attribute(gss,"ZBAE",False))
brivis_status.evap_status.zone_c_auto = y_n_to_bool(get_attribute(gss,"ZCAE",False))
brivis_status.evap_status.zone_d_auto = y_n_to_bool(get_attribute(gss,"ZDAE",False))
brivis_status.evap_status.prewetting = y_n_to_bool(get_attribute(gss,"PW",False))
brivis_status.evap_status.cooler_busy = y_n_to_bool(get_attribute(gss,"BY",False))
elif switch == "F":
# Evap is off
_LOGGER.debug("EVAP is OFF")
brivis_status.system_on = False
brivis_status.evap_status.evap_on = False
brivis_status.evap_status.zone_a = False
brivis_status.evap_status.zone_b = False
brivis_status.evap_status.zone_c = False
brivis_status.evap_status.zone_d = False
brivis_status.evap_status.common_auto = False
brivis_status.evap_status.zone_a_auto = False
brivis_status.evap_status.zone_b_auto = False
brivis_status.evap_status.zone_c_auto = False
brivis_status.evap_status.zone_d_auto = False
| 18,810
|
def _create_terminal_writer_factory(output: Optional[TextIO]):
"""
A factory method for creating a `create_terminal_writer` function.
:param output: The receiver of all original pytest output.
"""
def _create_terminal_writer(config: Config, _file: Optional[TextIO] = None) -> TerminalWriter:
file = output if output is not None else get_sink_io()
return create_terminal_writer(config, file)
return _create_terminal_writer
| 18,811
|
def filter(
f: typing.Callable,
stage: Stage = pypeln_utils.UNDEFINED,
workers: int = 1,
maxsize: int = 0,
timeout: float = 0,
on_start: typing.Callable = None,
on_done: typing.Callable = None,
) -> Stage:
"""
Creates a stage that filter the data given a predicate function `f`. exactly like python's built-in `filter` function.
```python
import pypeln as pl
import time
from random import random
def slow_gt3(x):
time.sleep(random()) # <= some slow computation
return x > 3
data = range(10) # [0, 1, 2, ..., 9]
stage = pl.sync.filter(slow_gt3, data, workers=3, maxsize=4)
data = list(stage) # [3, 4, 5, ..., 9]
```
Arguments:
f: A function with signature `f(x, **kwargs) -> bool`, where `kwargs` is the return of `on_start` if present.
stage: A stage or iterable.
workers: This parameter is not used and only kept for API compatibility with the other modules.
maxsize: This parameter is not used and only kept for API compatibility with the other modules.
timeout: Seconds before stoping the worker if its current task is not yet completed. Defaults to `0` which means its unbounded.
on_start: A function with signature `on_start(worker_info?) -> kwargs`, where `kwargs` can be a `dict` of keyword arguments that will be passed to `f` and `on_done`. If you define a `worker_info` argument an object with information about the worker will be passed. This function is executed once per worker at the beggining.
on_done: A function with signature `on_done(stage_status?, **kwargs)`, where `kwargs` is the return of `on_start` if present. If you define a `stage_status` argument an object with information about the stage will be passed. This function is executed once per worker when the worker finishes.
!!! warning
To implement `timeout` we use `stopit.async_raise` which has some limitations for stoping threads.
Returns:
If the `stage` parameters is given then this function returns a new stage, else it returns a `Partial`.
"""
if pypeln_utils.is_undefined(stage):
return pypeln_utils.Partial(
lambda stage: filter(
f,
stage=stage,
workers=workers,
maxsize=maxsize,
timeout=timeout,
on_start=on_start,
on_done=on_done,
)
)
stage = to_stage(stage)
return Filter(
f=f, on_start=on_start, on_done=on_done, timeout=timeout, dependencies=[stage],
)
| 18,812
|
def function_factory(model, loss, dataset):
"""A factory to create a function required by tfp.optimizer.lbfgs_minimize.
Args:
model [in]: an instance of `tf.keras.Model` or its subclasses.
loss [in]: a function with signature loss_value = loss(pred_y, true_y).
train_x [in]: the input part of training data.
train_y [in]: the output part of training data.
Returns:
A function that has a signature of:
loss_value, gradients = f(model_parameters).
"""
# obtain the shapes of all trainable parameters in the model
shapes = tf.shape_n(model.trainable_variables)
n_tensors = len(shapes)
# we'll use tf.dynamic_stitch and tf.dynamic_partition later, so we need to
# prepare required information first
count = 0
idx = [] # stitch indices
part = [] # partition indices
for i, shape in enumerate(shapes):
n = numpy.product(shape)
idx.append(tf.reshape(tf.range(count, count+n, dtype=tf.int32), shape))
part.extend([i]*n)
count += n
part = tf.constant(part)
@tf.function
@tf.autograph.experimental.do_not_convert
def assign_new_model_parameters(params_1d):
"""A function updating the model's parameters with a 1D tf.Tensor.
Args:
params_1d [in]: a 1D tf.Tensor representing the model's trainable parameters.
"""
params = tf.dynamic_partition(params_1d, part, n_tensors)
for i, (shape, param) in enumerate(zip(shapes, params)):
model.trainable_variables[i].assign(tf.reshape(param, shape))
#tf.print(model.trainable_variables[i])
@tf.function
def volume_form(x, Omega_Omegabar, mass, restriction):
kahler_metric = complex_math.complex_hessian(tf.math.real(model(x)), x)
volume_form = tf.math.real(tf.linalg.det(tf.matmul(restriction, tf.matmul(kahler_metric, restriction, adjoint_b=True))))
weights = mass / tf.reduce_sum(mass)
factor = tf.reduce_sum(weights * volume_form / Omega_Omegabar)
#factor = tf.constant(35.1774, dtype=tf.complex64)
return volume_form / factor
# now create a function that will be returned by this factory
def f(params_1d):
"""A function that can be used by tfp.optimizer.lbfgs_minimize.
This function is created by function_factory.
Args:
params_1d [in]: a 1D tf.Tensor.
Returns:
A scalar loss and the gradients w.r.t. the `params_1d`.
"""
# use GradientTape so that we can calculate the gradient of loss w.r.t. parameters
for step, (points, Omega_Omegabar, mass, restriction) in enumerate(dataset):
with tf.GradientTape() as tape:
# update the parameters in the model
assign_new_model_parameters(params_1d)
# calculate the loss
det_omega = volume_form(points, Omega_Omegabar, mass, restriction)
loss_value = loss(Omega_Omegabar, det_omega, mass)
# calculate gradients and convert to 1D tf.Tensor
grads = tape.gradient(loss_value, model.trainable_variables)
grads = tf.dynamic_stitch(idx, grads)
# reweight the loss and grads
mass_sum = tf.reduce_sum(mass)
try:
total_loss += loss_value * mass_sum
total_grads += grads * mass_sum
total_mass += mass_sum
except NameError:
total_loss = loss_value * mass_sum
total_grads = grads * mass_sum
total_mass = mass_sum
total_loss = total_loss / total_mass
total_grads = total_grads / total_mass
# print out iteration & loss
f.iter.assign_add(1)
tf.print("Iter:", f.iter, "loss:", total_loss)
# store loss value so we can retrieve later
tf.py_function(f.history.append, inp=[total_loss], Tout=[])
return total_loss, total_grads
# store these information as members so we can use them outside the scope
f.iter = tf.Variable(0)
f.idx = idx
f.part = part
f.shapes = shapes
f.assign_new_model_parameters = assign_new_model_parameters
f.history = []
return f
| 18,813
|
def main():
"""Main script function.
"""
# arguments
parser = ArgumentParser(
prog='multi-ear-uart',
description=('Sensorboard serial readout with data storage'
'in a local influx database.'),
)
parser.add_argument(
'-i', '--ini', metavar='..', type=str, default='config.ini',
help='Path to configuration file'
)
parser.add_argument(
'-j', '--journald', action='store_true', default=False,
help='Log to systemd journal'
)
parser.add_argument(
'--dry-run', action='store_true', default=False,
help='Serial read without storage in the influx database'
)
parser.add_argument(
'--debug', action='store_true', default=False,
help='Make the operation a lot more talkative'
)
parser.add_argument(
'--version', action='version', version=version,
help='Print the version and exit'
)
args = parser.parse_args()
uart = UART(
config_file=args.ini,
journald=args.journald,
debug=args.debug,
dry_run=args.dry_run,
)
uart.readout()
| 18,814
|
def nn_CPRAND(tensor,rank,n_samples,n_samples_err,factors=None,exact_err=False,it_max=100,err_it_max=20,tol=1e-7,list_factors=False,time_rec=False):
"""
Add argument n_samples_err
CPRAND for CP-decomposition in non negative case, with err_rand
return also exact error
Parameters
----------
tensor : tensor
rank : int
n_samples : int
sample size
n_samples_err : int
sample size used for error estimation. The default is 400.
factors : list of matrices, optional
initial non negative factor matrices. The default is None.
exact_err : boolean, optional
whether use err or err_rand_fast for terminaison criterion. The default is False.
(not useful for this version)
it_max : int, optional
maximal number of iteration. The default is 100.
err_it_max : int, optional
maximal of iteration if terminaison critirion is not improved. The default is 20.
tol : float, optional
error tolerance. The default is 1e-7.
list_factors : boolean, optional
If true, then return factor matrices of each iteration. The default is False.
time_rec : boolean, optional
If true, return computation time of each iteration. The default is False.
Returns
-------
the CP decomposition, number of iteration and exact / estimated termination criterion.
list_fac and list_time are optional.
"""
N=tl.ndim(tensor) # order of tensor
norm_tensor=tl.norm(tensor) # norm of tensor
if list_factors==True : list_fac=[]
if time_rec == True : list_time=[]
if (factors==None): factors=svd_init_fac(tensor,rank)
if list_factors==True : list_fac.append(copy.deepcopy(factors))
weights=None
it=0
err_it=0
########################################
######### error initialization #########
########################################
temp,ind_err=err_rand(tensor,weights,factors,n_samples_err)
error=[temp/norm_tensor]
min_err=error[len(error)-1]
rng = tl.check_random_state(None)
while (min_err>tol and it<it_max and err_it<err_it_max):
if time_rec == True : tic=time.time()
for n in range(N):
Zs,indices=sample_khatri_rao(factors,n_samples,skip_matrix=n,random_state=rng)
indices_list = [i.tolist() for i in indices]
indices_list.insert(n, slice(None, None, None))
indices_list = tuple(indices_list)
if (n==0) :sampled_unfolding = tensor[indices_list]
else : sampled_unfolding =tl.transpose(tensor[indices_list])
V=tl.dot(tl.transpose(Zs),Zs)
W=tl.dot(sampled_unfolding,Zs)
# update
fac, _, _, _ = hals_nnls(tl.transpose(W), V,tl.transpose(factors[n]))
factors[n]=tl.transpose(fac)
if list_factors==True : list_fac.append(copy.deepcopy(factors))
it=it+1
################################
######### error update #########
################################
error.append(err_rand(tensor,weights,factors,n_samples_err,ind_err)[0]/norm_tensor) # same indices used as for Random Lesat Square Calculation
if (error[len(error)-1]<min_err) : min_err=error[len(error)-1] # err update
else : err_it=err_it+1
if time_rec == True :
toc=time.time()
list_time.append(toc-tic)
if time_rec == True and list_factors==True: return(weights,factors,it,error,list_fac,list_time)
if list_factors==True : return(weights,factors,it,error,list_fac)
if time_rec==True : return(weights,factors,it,error,list_time)
return(weights,factors,it,error)
| 18,815
|
def parametrize_application_yml(argvalues, ids):
"""
Define parameters for testable application definition.
Args:
argvalues (list): Parametrize args values
ids (list): Parametrize ID
"""
from accelpy._application import Application
with scandir(dirname(realpath(__file__))) as entries:
for entry in entries:
name, ext = splitext(entry.name)
if ext == '.yml' and name.startswith('test_'):
app = Application(entry.path)
if 'test' not in app._definition and not app.providers:
continue
name = name.split("_", 1)[1].replace('_', '-')
for provider in app.providers:
ids.append(f'{name}_{provider.replace(",", "-")}')
argvalues.append(dict(path=entry.path, provider=provider))
| 18,816
|
def get_base_url(host_name, customer_id):
"""
:arg host_name: the host name of the IDNow gateway server
:arg customer_id: your customer id
:returns the base url of the IDNow API and the selected customer
"""
return 'https://{0}/api/v1/{1}'.format(host_name, customer_id)
| 18,817
|
def sample_category(user, **params):
"""Create and return a sample category"""
defaults = {
'name': 'Sample category',
'persian_title': 'persian',
'parent_category': None
}
defaults.update(params)
return Category.objects.create(user=user, **defaults)
| 18,818
|
def lindbladian_average_infid_set(
propagators: dict, instructions: Dict[str, Instruction], index, dims, n_eval
):
"""
Mean average fidelity over all gates in propagators.
Parameters
----------
propagators : dict
Contains unitary representations of the gates, identified by a key.
index : int
Index of the qubit(s) in the Hilbert space to be evaluated
dims : list
List of dimensions of qubits
proj : boolean
Project to computational subspace
Returns
-------
tf.float64
Mean average fidelity
"""
infids = []
for gate, propagator in propagators.items():
perfect_gate = instructions[gate].get_ideal_gate(dims)
infid = lindbladian_average_infid(perfect_gate, propagator, index, dims)
infids.append(infid)
return tf.reduce_mean(infids)
| 18,819
|
def edit(request, course_id):
"""
Teacher form for editing a course
"""
course = get_object_or_404(Course, id=course_id)
courseForm = CourseForm(request.POST or None, instance=course)
if request.method == 'POST': # Form was submitted
if courseForm.is_valid():
courseForm.save()
messages.add_message(request, messages.SUCCESS,
f'The course {course.code} - {course.title} was altered!')
return redirect('course:index')
return render(request, 'course/edit.html', {'form': courseForm})
| 18,820
|
def _build_ontology_embedded_list():
""" Helper function intended to be used to create the embedded list for ontology.
All types should implement a function like this going forward.
"""
synonym_terms_embed = DependencyEmbedder.embed_defaults_for_type(base_path='synonym_terms',
t='ontology_term')
definition_terms_embed = DependencyEmbedder.embed_defaults_for_type(base_path='definition_terms',
t='ontology_term')
return synonym_terms_embed + definition_terms_embed
| 18,821
|
def remap_expn_sample_names(expn):
"""
Super-specific method to remap the sample names.
"""
names = expn.getConditionNames()
new_names = []
for n in names:
if n in sample_description:
new_names.append(sample_description[n])
else:
new_names.append('?%s' % n)
expn.setConditionNames(new_names)
| 18,822
|
def interp1d(x,y,xi,axis=None,extrap=True):
"""
Args:
x (uniformly sampled vector/array): sampled x values
y (array): sampled y values
xi (array): x values to interpolate onto
axis (int): axis along which to interpolate.
extrap (bool): if True, use linear extrapolation based on the extreme values.
If false, nearest neighbour is used for extrapolation instead.
"""
x=np.asarray(x)
if axis is None:
axis=get_axis(x)
return mathx.interp1d_lin_reg(zero(x,axis),delta(x,axis),y,xi,axis,extrap)
| 18,823
|
def _back_operate(
servicer, callback, work_pool, transmission_pool, utility_pool,
termination_action, ticket, default_timeout, maximum_timeout):
"""Constructs objects necessary for back-side operation management.
Also begins back-side operation by feeding the first received ticket into the
constructed _interfaces.ReceptionManager.
Args:
servicer: An interfaces.Servicer for servicing operations.
callback: A callable that accepts packets.BackToFrontPackets and delivers
them to the other side of the operation. Execution of this callable may
take any arbitrary length of time.
work_pool: A thread pool in which to execute customer code.
transmission_pool: A thread pool to use for transmitting to the other side
of the operation.
utility_pool: A thread pool for utility tasks.
termination_action: A no-arg behavior to be called upon operation
completion.
ticket: The first packets.FrontToBackPacket received for the operation.
default_timeout: A length of time in seconds to be used as the default
time alloted for a single operation.
maximum_timeout: A length of time in seconds to be used as the maximum
time alloted for a single operation.
Returns:
The _interfaces.ReceptionManager to be used for the operation.
"""
lock = threading.Lock()
with lock:
termination_manager = _termination.back_termination_manager(
work_pool, utility_pool, termination_action, ticket.subscription)
transmission_manager = _transmission.back_transmission_manager(
lock, transmission_pool, callback, ticket.operation_id,
termination_manager, ticket.subscription)
operation_context = _context.OperationContext(
lock, ticket.operation_id, packets.Kind.SERVICER_FAILURE,
termination_manager, transmission_manager)
emission_manager = _emission.back_emission_manager(
lock, termination_manager, transmission_manager)
ingestion_manager = _ingestion.back_ingestion_manager(
lock, work_pool, servicer, termination_manager,
transmission_manager, operation_context, emission_manager)
expiration_manager = _expiration.back_expiration_manager(
lock, termination_manager, transmission_manager, ingestion_manager,
ticket.timeout, default_timeout, maximum_timeout)
reception_manager = _reception.back_reception_manager(
lock, termination_manager, transmission_manager, ingestion_manager,
expiration_manager)
termination_manager.set_expiration_manager(expiration_manager)
transmission_manager.set_ingestion_and_expiration_managers(
ingestion_manager, expiration_manager)
operation_context.set_ingestion_and_expiration_managers(
ingestion_manager, expiration_manager)
emission_manager.set_ingestion_manager_and_expiration_manager(
ingestion_manager, expiration_manager)
ingestion_manager.set_expiration_manager(expiration_manager)
reception_manager.receive_packet(ticket)
return reception_manager
| 18,824
|
def runICA(fslDir, inFile, outDir, melDirIn, mask, dim, TR, seed=None):
""" This function runs MELODIC and merges the mixture modeled thresholded ICs into a single 4D nifti file
Parameters
---------------------------------------------------------------------------------
fslDir: Full path of the bin-directory of FSL
inFile: Full path to the fMRI data file (nii.gz) on which MELODIC should be run
outDir: Full path of the output directory
melDirIn: Full path of the MELODIC directory in case it has been run before, otherwise define empty string
mask: Full path of the mask to be applied during MELODIC
dim: Dimensionality of ICA
TR: TR (in seconds) of the fMRI data
Output (within the requested output directory)
---------------------------------------------------------------------------------
melodic.ica MELODIC directory
melodic_IC_thr.nii.gz merged file containing the mixture modeling thresholded Z-statistical maps located in melodic.ica/stats/ """
# Define the 'new' MELODIC directory and predefine some associated files
melDir = os.path.join(outDir,'melodic.ica')
melIC = os.path.join(melDir,'melodic_IC.nii.gz')
melICmix = os.path.join(melDir,'melodic_mix')
melICthr = os.path.join(outDir,'melodic_IC_thr.nii.gz')
# When a MELODIC directory is specified, check wheter all needed files are present. Otherwise... run MELODIC again
if (len(melDir) != 0) and os.path.isfile(os.path.join(melDirIn,'melodic_IC.nii.gz')) and os.path.isfile(os.path.join(melDirIn,'melodic_FTmix')) and os.path.isfile(os.path.join(melDirIn,'melodic_mix')):
print ' - The existing/specified MELODIC directory will be used.'
# If a 'stats' directory is present (contains thresholded spatial maps) create a symbolic link to the MELODIC directory. Otherwise create specific links and run mixture modeling to obtain thresholded maps.
if os.path.isdir(os.path.join(melDirIn,'stats')):
os.symlink(melDirIn,melDir)
else:
print ' - The MELODIC directory does not contain the required \'stats\' folder. Mixture modeling on the Z-statistical maps will be run.'
# Create symbolic links to the items in the specified melodic directory
os.makedirs(melDir)
for item in os.listdir(melDirIn):
os.symlink(os.path.join(melDirIn,item),os.path.join(melDir,item))
# Run mixture modeling
cmdline = [os.path.join(fslDir,'melodic'),
'--in=' + melIC,
'--ICs=' + melIC,
'--mix=' + melICmix,
'--outdir=' + melDir,
'--Ostats --mmthresh=0.5']
if seed is not None:
cmdline.append('--seed=%u' % seed)
os.system(' '.join(cmdline))
else:
# If a melodic directory was specified, display that it did not contain all files needed for ICA-AROMA (or that the directory does not exist at all)
if len(melDirIn) != 0 :
if not os.path.isdir(melDirIn):
print ' - The specified MELODIC directory does not exist. MELODIC will be run seperately.'
else:
print ' - The specified MELODIC directory does not contain the required files to run ICA-AROMA. MELODIC will be run seperately.'
# Run MELODIC
cmdline = [os.path.join(fslDir,'melodic'),
'--in=' + inFile,
'--outdir=' + melDir,
'--mask=' + mask,
'--dim=' + str(dim),
'--Ostats --nobet --mmthresh=0.5 --report',
'--tr=' + str(TR)]
if seed is not None:
cmdline.append('--seed=%u' % seed)
os.system(' '.join(cmdline))
# Get number of components
cmd = ' '.join([os.path.join(fslDir,'fslinfo'),
melIC,
'| grep dim4 | head -n1 | awk \'{print $2}\''])
nrICs=int(float(commands.getoutput(cmd)))
# Merge mixture modeled thresholded spatial maps. Note! In case that mixture modeling did not converge, the file will contain two spatial maps. The latter being the results from a simple null hypothesis test. In that case, this map will have to be used (first one will be empty).
for i in range(1,nrICs+1):
# Define thresholded zstat-map file
zTemp = os.path.join(melDir,'stats','thresh_zstat' + str(i) + '.nii.gz')
cmd = ' '.join([os.path.join(fslDir,'fslinfo'),
zTemp,
'| grep dim4 | head -n1 | awk \'{print $2}\''])
lenIC=int(float(commands.getoutput(cmd)))
# Define zeropad for this IC-number and new zstat file
cmd = ' '.join([os.path.join(fslDir,'zeropad'),
str(i),
'4'])
ICnum=commands.getoutput(cmd)
zstat = os.path.join(outDir,'thr_zstat' + ICnum)
# Extract last spatial map within the thresh_zstat file
os.system(' '.join([os.path.join(fslDir,'fslroi'),
zTemp, # input
zstat, # output
str(lenIC-1), # first frame
'1'])) # number of frames
# Merge and subsequently remove all mixture modeled Z-maps within the output directory
os.system(' '.join([os.path.join(fslDir,'fslmerge'),
'-t', # concatenate in time
melICthr, # output
os.path.join(outDir,'thr_zstat????.nii.gz')])) # inputs
os.system('rm ' + os.path.join(outDir,'thr_zstat????.nii.gz'))
# Apply the mask to the merged file (in case a melodic-directory was predefined and run with a different mask)
os.system(' '.join([os.path.join(fslDir,'fslmaths'),
melICthr,
'-mas ' + mask,
melICthr]))
| 18,825
|
def load_train_val_data(
data_dir: str, batch_size: int,
training_fraction: float) -> Tuple[DataLoader, DataLoader]:
"""
Returns two DataLoader objects that wrap training and validation data.
Training and validation data are extracted from the full original training
data, split according to training_fraction.
"""
full_train_data = datasets.FashionMNIST(data_dir,
train=True,
download=False,
transform=ToTensor())
full_train_len = len(full_train_data)
train_len = int(full_train_len * training_fraction)
val_len = full_train_len - train_len
(train_data, val_data) = random_split(dataset=full_train_data,
lengths=[train_len, val_len])
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_data, batch_size=batch_size, shuffle=True)
return (train_loader, val_loader)
| 18,826
|
def match(input_string, start_node):
"""ๅน้
ๅญ็ฌฆไธฒ
input_string :: ้่ฆ้
ๅค็ๅญ็ฌฆไธฒ
start_node :: NFA่ตทๅง่็น
return :: True | False
"""
# ๅๅงๅ่ฟ่ก็ถๆ็็ถๆ้ๅ: ่ตทๅง่็น+็ฉบ่ฝฌ็งป่ฝๅฐ่พพ็่็น
current_state_set = [start_node]
next_state_set = closure(current_state_set)
# ๅพช็ฏ่ฏปๅ
ฅๅญ็ฌฆ็ๆ็ถๆ้ๅ
for i, ch in enumerate(input_string):
# ่ฏปๅ
ฅไธไธชๅญ็ฌฆๅ็็ถๆ้ๅ+็ฉบ่ฝฌ็งป่ฝๅฐ่พพ็่็น
current_state_set = move(next_state_set, ch)
next_state_set = closure(current_state_set)
# ็ถๆ้ๅไธบ็ฉบ,่ฟๅFalse
if next_state_set is None:
return False
# ่ฏปๅ
ฅๆๅไธไธชๅญ็ฌฆไธๅญๅจๆฅๅ็ถๆ็่ฟๅTrue
if has_accepted_state(next_state_set) and i == len(input_string) - 1:
return True
return False
| 18,827
|
def _H_to_h(H):
"""Converts CIECAM02/CAM02-UCS hue composition (H) to raw hue angle (h)."""
x0 = H % 400 * 360 / 400
h, _, _ = fmin_l_bfgs_b(lambda x: abs(h_to_H(x) - H), x0, approx_grad=True)
return h % 360
| 18,828
|
def filter_df_merge(cpu_df, filter_column=None):
"""
process cpu data frame, merge by 'model_name', 'batch_size'
Args:
cpu_df ([type]): [description]
"""
if not filter_column:
raise Exception(
"please assign filter_column for filter_df_merge function")
df_lists = []
filter_column_lists = []
for k, v in cpu_df.groupby(filter_column, dropna=True):
filter_column_lists.append(k)
df_lists.append(v)
final_output_df = df_lists[-1]
# merge same model
for i in range(len(df_lists) - 1):
left_suffix = cpu_df[filter_column].unique()[0]
right_suffix = df_lists[i][filter_column].unique()[0]
print(left_suffix, right_suffix)
if not pd.isnull(right_suffix):
final_output_df = pd.merge(
final_output_df,
df_lists[i],
how='left',
left_on=['model_name', 'batch_size'],
right_on=['model_name', 'batch_size'],
suffixes=('', '_{0}_{1}'.format(filter_column, right_suffix)))
# rename default df columns
origin_column_names = list(cpu_df.columns.values)
origin_column_names.remove(filter_column)
suffix = final_output_df[filter_column].unique()[0]
for name in origin_column_names:
final_output_df.rename(
columns={name: "{0}_{1}_{2}".format(name, filter_column, suffix)},
inplace=True)
final_output_df.rename(
columns={
filter_column: "{0}_{1}_{2}".format(filter_column, filter_column,
suffix)
},
inplace=True)
final_output_df.sort_values(
by=[
"model_name_{0}_{1}".format(filter_column, suffix),
"batch_size_{0}_{1}".format(filter_column, suffix)
],
inplace=True)
return final_output_df
| 18,829
|
def MRP2Euler121(q):
"""
MRP2Euler121(Q)
E = MRP2Euler121(Q) translates the MRP
vector Q into the (1-2-1) euler angle vector E.
"""
return EP2Euler121(MRP2EP(q))
| 18,830
|
def nlayer(depth=64):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = NLayer_D(depth=depth)
return model
| 18,831
|
def get_dataset(config):
"""Downloads the dataset if it is not yet available and unzips it"""
datasets = {
"CVE-2014-0160": "https://www.exploids.de/lid-ds-downloads/LID-DS-Recordings-01/CVE-2014-0160.tar.gz",
"PHP_CWE-434": "https://www.exploids.de/lid-ds-downloads/LID-DS-Recordings-01/PHP_CWE-434.tar.gz",
"Bruteforce_CWE-307": "https://www.exploids.de/lid-ds-downloads/LID-DS-Recordings-01/Bruteforce_CWE-307.tar.gz",
"SQL_Injection_CWE-89": "https://www.exploids.de/lid-ds-downloads/LID-DS-Recordings-01/SQL_Injection_CWE-89.tar.gz",
"ZipSlip": "https://www.exploids.de/lid-ds-downloads/LID-DS-Recordings-01/ZipSlip.tar.gz",
"CVE-2012-2122": "https://www.exploids.de/lid-ds-downloads/LID-DS-Recordings-01/CVE-2012-2122.tar.gz",
"CVE-2017-7529": "https://www.exploids.de/lid-ds-downloads/LID-DS-Recordings-01/CVE-2017-7529.tar.gz",
"CVE-2018-3760": "https://www.exploids.de/lid-ds-downloads/LID-DS-Recordings-01/CVE-2018-3760.tar.gz",
"CVE-2019-5418": "https://www.exploids.de/lid-ds-downloads/LID-DS-Recordings-01/CVE-2019-5418.tar.gz",
"EPS_CWE-434": "https://www.exploids.de/lid-ds-downloads/LID-DS-Recordings-01/EPS_CWE-434.tar.gz",
}
os.makedirs(config["data"]["raw"], exist_ok=True)
try:
link = datasets[config["dataset"]]
except KeyError as key:
print("This dataset does not exist. Aborting.")
print(f"The key was {key}")
sys.exit(1)
raw_data = os.path.join(config["data"]["prefix"], "raw")
datapath = "{}/{}.tar.gz".format(raw_data, config["dataset"])
print(datapath)
if not os.path.exists(datapath):
os.system(f"curl -LOJ {link}")
os.system(f"mv {config['dataset']}.tar.gz {datapath}")
os.system(f"tar -zxvf {datapath} -C {raw_data}")
os.system(f"rm {datapath}")
| 18,832
|
def deserialize(
value: ElementTree.Element,
cipher: PSCryptoProvider,
**kwargs: typing.Any,
) -> typing.Optional[typing.Union[bool, PSObject]]:
"""Deserialize CLIXML to a Python object.
Deserializes a CLIXML XML Element from .NET to a Python object.
Args:
value: The CLIXML XML Element to deserialize to a Python object.
cipher: The Runspace Pool cipher to use for SecureStrings.
kwargs: Optional parameters to sent to the FromPSObjectForRemoting
method on classes that use that.
Returns:
Optional[Union[bool, PSObject]]: The CLIXML as an XML Element object.
"""
return _Serializer(cipher, **kwargs).deserialize(value)
| 18,833
|
def is_url_ok(url: str) -> bool:
"""Check if the given URL is down."""
try:
r = requests.get(url)
return r.status_code == 200
except Exception:
return False
| 18,834
|
def calculate_pair_energy_np(coordinates, i_particle, box_length, cutoff):
"""
Calculate interaction energy of particle w/ its environment (all other particles in sys)
Parameters
----------------
coordinates : list
the coordinates for all particles in sys
i_particle : int
particle number for which to calculate energy
cutoff : float
simulation cutoff. beyond distances, interactions aren't calculated
box length : float
length of simultion box. assumes cubic boc
Returns
---------------
float
pairwise interaction energy of ith particle w/all other particles in sys
"""
particle_distances = calculate_distance_np(coordinates[i_particle], coordinates[i_particle+1:], box_length)
particle_distances_filtered = particle_distances[particle_distances < cutoff]
return calculate_LJ_np(particle_distances_filtered).sum()
| 18,835
|
def lwhere(mappings, **cond):
"""Selects mappings containing all pairs in cond."""
return list(where(mappings, **cond))
| 18,836
|
def get_number(message, limit=4):
"""
convert Chinese to pinyin and extract useful numbers
attention:
1. only for integer
2. before apply this method, the message should be preprocessed
input:
message: the message you want to extract numbers from.
limit: limit the length of number sequence
"""
words = pinyin.get_pinyin(message).split('-')
numbers = []
tmp = ''
count = 0
for w in words:
if re.search(r'\W', w, re.A):
for s in list(w):
if s in special_char.keys():
count += 1
tmp += special_char[s]
else:
if count >= limit:
numbers.append(tmp)
count = 0
tmp = ''
elif w in pinyin2number.keys():
count += 1
tmp += pinyin2number[w]
else:
if count >= limit:
numbers.append(tmp)
count = 0
tmp = ''
if count >= limit:
numbers.append(tmp)
return numbers
| 18,837
|
def get_db_connection(path, timeout=30, okay_to_create=False):
"""
Returns a properly configured SQLite database connection.
:param path: path to DB
:param timeout: timeout for connection
:param okay_to_create: if True, create the DB if it doesn't exist
:returns: DB connection object
"""
try:
connect_time = time.time()
conn = sqlite3.connect(path, check_same_thread=False,
factory=GreenDBConnection, timeout=timeout)
if path != ':memory:' and not okay_to_create:
# attempt to detect and fail when connect creates the db file
stat = os.stat(path)
if stat.st_size == 0 and stat.st_ctime >= connect_time:
os.unlink(path)
raise DatabaseConnectionError(path,
'DB file created by connect?')
conn.row_factory = sqlite3.Row
conn.text_factory = str
with closing(conn.cursor()) as cur:
cur.execute('PRAGMA synchronous = NORMAL')
cur.execute('PRAGMA count_changes = OFF')
cur.execute('PRAGMA temp_store = MEMORY')
cur.execute('PRAGMA journal_mode = DELETE')
conn.create_function('chexor', 3, chexor)
except sqlite3.DatabaseError:
import traceback
raise DatabaseConnectionError(path, traceback.format_exc(),
timeout=timeout)
return conn
| 18,838
|
def calc_merkle_root(trie: Trie):
"""private method that builds the merkle-trie and calculates root_hash"""
txs = trie.transactions.copy()
# if there is only one tx the trie is not valid, hence we need to add an
# empty root
if len(txs) == 1:
txs.append(stringutil.empty_root)
# do until there is only one hash left
while len(txs) != 1:
temp = []
# add an empty hash if the number of hashes is unequal
if len(txs) % 2 == 1:
txs.append(stringutil.empty_root)
# go over all pairs and hash them
for tup in zip(txs[0::2], txs[1::2]):
temp.append(hashutil.hash_tuple(tup[0], tup[1]))
# continue with new result
txs = temp
# set root and finihs
trie.root_hash = txs[0]
| 18,839
|
def create_connection(db_file):
"""
Creates a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
except Error as e:
print(e)
return conn
| 18,840
|
def convex_hull(poly):
"""
ratio of the convex hull area to the area of the shape itself
Altman's A_3 measure, from Neimi et al 1991.
"""
chull = to_shapely_geom(poly).convex_hull
return poly.area / chull.area
| 18,841
|
def create_scifact_annotations(
claims, corpus, tokenizer, class_to_id: Dict[str, int], neutral_class: str
) -> List[SciFactAnnotation]:
"""Create a SciFactAnnotation for each claim - evidence/cited document pair."""
def get_abstract_and_encoding(
doc_id,
) -> Tuple[List[List[str]], List[torch.IntTensor]]:
doc = [d for d in corpus if d["doc_id"] == int(doc_id)]
assert len(doc) == 1
abstract = doc[0]["abstract"]
encoding = [
torch.IntTensor(tokenizer.encode(sentence, add_special_tokens=False))
for sentence in abstract
]
return abstract, encoding
annotations = []
for c in claims:
# Convert Interventions, Comparator, and Outcomes tokens to encodings
intervention = torch.IntTensor(tokenizer.convert_tokens_to_ids(c["i_tokens"]))
comparator = torch.IntTensor(tokenizer.convert_tokens_to_ids(c["c_tokens"]))
outcome = torch.IntTensor(tokenizer.convert_tokens_to_ids(c["o_tokens"]))
evidence = c["evidence"]
# Handle claims with no evidence (label is NOT_ENOUGH_INFO)
if not evidence:
cited_doc_id = c["cited_doc_ids"][0]
abstract, encoded_abstract = get_abstract_and_encoding(cited_doc_id)
rationale_id = class_to_id[neutral_class]
s_ann = SciFactAnnotation(
claim_id=int(c["id"]),
doc_id=int(cited_doc_id),
sentences=abstract,
encoded_sentences=encoded_abstract,
rationale_sentences=[],
i=intervention,
c=comparator,
o=outcome,
rationale_class=neutral_class,
rationale_id=rationale_id,
)
annotations.append(s_ann)
# Create a SciFact Annotation for each evidence document
else:
for doc_id, doc_rationales in evidence.items():
abstract, encoded_abstract = get_abstract_and_encoding(doc_id)
rationale_class = doc_rationales[0]["label"]
rationale_id = class_to_id[rationale_class]
# extract all rationale sentence indices from the document
rationale_sentences = []
for rationale in doc_rationales:
rationale_sentences.extend(rationale["sentences"])
s_ann = SciFactAnnotation(
claim_id=int(c["id"]),
doc_id=int(doc_id),
sentences=abstract,
encoded_sentences=encoded_abstract,
rationale_sentences=rationale_sentences,
i=intervention,
c=comparator,
o=outcome,
rationale_class=rationale_class,
rationale_id=rationale_id,
)
annotations.append(s_ann)
return annotations
| 18,842
|
def predict_test_results(Test_data, Results_path, cph, col_names,SN=1,penalizer=[],
var_thresh=[],CI=[]):
"""
:param Test_file_path:
:param Results_path:
:param cph:
:param col_names:
:param SN:
:param penalizer:
:param var_thresh:
:return: Tot_test_pred, Y_tot, Test_dummy, y_test_val, y_pred_val, AUC, APS
"""
Test_length=Test_data.shape[0]
if CI=="CI":
Test_data_boot= Test_data.sample(n=Test_length,replace=True)
else:
Test_data_boot = Test_data
drop_cols=[x for x in ["21003-4.0", "21003-3.0", "2443-3.0", "TTE"] if x in Test_data_boot.columns]
# drop_cols=[x for x in ["21003-4.0", "21003-3.0", "2443-3.0"] if x in Test_data_boot.columns]
if len(drop_cols)>0:
Test_data_clean = Test_data_boot.drop(drop_cols, axis=1)
Test_dummy = pd.get_dummies(Test_data_clean, drop_first=True)
# Test_dummy_rel=Test_dummy.iloc[:,:-2]
test_predicted = cph.predict_survival_function(Test_dummy)
# test_predicted =cph.score(Test_dummy)
dummy_idx = np.arange(0, Test_dummy.shape[0])
Test_dummy.index=dummy_idx
Test_data_boot.index=dummy_idx
test_predicted.columns=dummy_idx
Tot_test_pred = test_predicted.T.join(Test_data_boot.loc[:, "21003-4.0"])
Tot_test_pred["21003-4.0"] = Tot_test_pred["21003-4.0"].astype(str)
col = [str(x) for x in Tot_test_pred.columns.values]
new_col_dict = dict(list(zip(Tot_test_pred.columns.values, col)))
Tot_test_pred.rename(columns=new_col_dict, inplace=True)
Tot_test_pred["pred"] = Tot_test_pred.apply(get_rel_score, axis=1)
Tot_test_pred.index=np.arange(0,Tot_test_pred.shape[0])
Test_data_boot.index=np.arange(0,Test_data_boot.shape[0])
Y_tot = Tot_test_pred.join(Test_data_boot.loc[:,"2443-3.0"]).loc[:,["pred","2443-3.0"]].dropna(axis=1)
# print("*************~~~~~ Ytot ~~~~~~~~************")
# print("KeyError: u'the label [2443-3.0] is not in the [columns]'")
# print (Y_tot)
# print("*************~~~~~++++++~~~~~~~~************")
y_test_val = Y_tot.loc[:,"2443-3.0"].values
y_pred_val = 1 - Y_tot.loc[:,"pred"].values
AUC = roc_auc_score(y_test_val, y_pred_val)
# plot_ROC_curve(y_test_val, y_pred_val, AUC)
APS = average_precision_score(y_test_val, np.array(y_pred_val))
# plot_precision_recall(y_test_val, y_pred_val, APS)
results_df = pd.DataFrame.from_dict({"APS": [APS], "AUC": [AUC], "SN": [SN],"penalizer":[penalizer],"var_thresh":[var_thresh]})
results_df = results_df.set_index("SN", drop=True)
prediction_DF = pd.DataFrame.from_dict({"y_test_val": y_test_val, "y_pred_val": y_pred_val})
results_df.to_csv(os.path.join(Results_path, "AUC_APS_results_" + str(int(SN)) + ".csv"),index=True)
prediction_DF.to_csv(os.path.join(Results_path, "y_pred_results_" + str(int(SN)) + ".csv"))
# return Tot_test_pred, Y_tot, Test_dummy, y_test_val, y_pred_val, AUC, APS
| 18,843
|
def epsilon(tagfile):
"""Compute the total epsilon factor for each event
Compute the flatfield correction from the P-flat and L-flat reference files
(PFLTFILE and LFLTFILE respectively).
Parameters
----------
tagfile, str
input STIS time-tag data file
Returns
-------
epsilon, np.ndarray
array of epsilons
"""
print("Calculating Epsilon")
with fits.open(tagfile) as hdu:
epsilon_out = np.ones(hdu[1].data['time'].shape)
#-- Flatfield correction
for ref_flat in ['PFLTFILE', 'LFLTFILE']:
reffile = expand_refname(hdu[0].header[ref_flat])
print('FLATFIELD CORRECTION {}: {}'.format(ref_flat, reffile))
if not os.path.exists(reffile):
print("{} not found, correction not performed".format(reffile))
return np.ones(len(hdu[1].data))
with fits.open(reffile) as image_hdu:
image = image_hdu[1].data
if not image.shape == (2048, 2048):
x_factor = 2048 // image.shape[1]
y_factor = 2048 // image.shape[0]
print('Enlarging by {},{}'.format(x_factor, y_factor))
image = enlarge(image, x_factor, y_factor)
#--indexing is 1 off
if 'AXIS1' in hdu[1].data.names:
epsilon_out *= map_image(image,
hdu[1].data['AXIS1'] - 1,
hdu[1].data['AXIS2'] - 1)
else:
epsilon_out *= map_image(image,
hdu[1].data['XCORR'].astype(np.int32) - 1,
hdu[1].data['YCORR'].astype(np.int32) - 1)
return epsilon_out
| 18,844
|
def contact(update: Update, context: CallbackContext) -> None:
""" /contact command """
update.message.reply_text(
static_text.contact_command,
parse_mode=ParseMode.HTML,
disable_web_page_preview=True,
)
| 18,845
|
async def test_no_scopes():
"""The credential should raise ValueError when get_token is called with no scopes"""
credential = VSCodeCredential()
with pytest.raises(ValueError):
await credential.get_token()
| 18,846
|
def sort_by_value(front, values):
"""
This function sorts the front list according to the values
:param front: List of indexes of elements in the value
:param values: List of values. Can be longer than the front list
:return:
"""
copied_values = values.copy() # Copy so we can modify it
sorted_list = []
while len(sorted_list) != len(front):
min_value = copied_values.index(min(copied_values))
if min_value in front:
sorted_list.append(min_value)
copied_values[min_value] = math.inf
return sorted_list
| 18,847
|
def save_exposure(fitstbl, frame, spectrograph, science_path, par, caliBrate, all_spec2d, all_specobjs):
"""
Save the outputs from extraction for a given exposure
Args:
frame (:obj:`int`):
0-indexed row in the metadata table with the frame
that has been reduced.
all_spec2d(:class:`pypeit.spec2dobj.AllSpec2DObj`):
sci_dict (:obj:`dict`):
Dictionary containing the primary outputs of
extraction
basename (:obj:`str`):
The root name for the output file.
Returns:
None or SpecObjs: All of the objects saved to disk
"""
# TODO: Need some checks here that the exposure has been reduced?
# Get the basename
basename = fitstbl.construct_basename(frame)
# Determine the headers
row_fitstbl = fitstbl[frame]
# Need raw file header information
rawfile = fitstbl.frame_paths(frame)
head2d = fits.getheader(rawfile, ext=spectrograph.primary_hdrext)
# Check for the directory
if not os.path.isdir(science_path):
os.makedirs(science_path)
subheader = spectrograph.subheader_for_spec(row_fitstbl, head2d)
# 1D spectra
if all_specobjs.nobj > 0:
# Spectra
outfile1d = os.path.join(science_path, 'spec1d_{:s}.fits'.format(basename))
all_specobjs.write_to_fits(subheader, outfile1d,
update_det=par['rdx']['detnum'],
slitspatnum=par['rdx']['slitspatnum'])
# Info
outfiletxt = os.path.join(science_path, 'spec1d_{:s}.txt'.format(basename))
all_specobjs.write_info(outfiletxt, spectrograph.pypeline)
else:
outfile1d = None
# 2D spectra
outfile2d = os.path.join(science_path, 'spec2d_{:s}.fits'.format(basename))
# Build header
pri_hdr = all_spec2d.build_primary_hdr(head2d, spectrograph,
redux_path=par['rdx']['redux_path'],
master_key_dict=caliBrate.master_key_dict,
master_dir=caliBrate.master_dir,
subheader=subheader)
# Write
all_spec2d.write_to_fits(outfile2d, pri_hdr=pri_hdr, update_det=par['rdx']['detnum'])
return outfile2d, outfile1d
| 18,848
|
def test_day_detail_view(init_feasible_db, client):
"""Test day detail view."""
client.login(username="temporary", password="temporary")
day = Day.objects.get(number=1)
response = client.get(reverse("day_detail", kwargs={"pk": day.id}))
assert response.status_code == 200
assert "Day: 1" in response.rendered_content
assert "day_detail.html" in [t.name for t in response.templates]
| 18,849
|
def get_cell_area(self, indices=[]):
"""Return the area of the cells on the outer surface.
Parameters
----------
self : MeshVTK
a MeshVTK object
indices : list
list of the points to extract (optional)
Returns
-------
areas: ndarray
Area of the cells
"""
surf = self.get_surf(indices)
return surf.compute_cell_sizes(area=True)["Area"]
| 18,850
|
def _to_dataarray(origins, sources, values):
""" Converts grid_search inputs to DataArray
"""
origin_dims = ('origin_idx',)
origin_coords = [np.arange(len(origins))]
origin_shape = (len(origins),)
source_dims = sources.dims
source_coords = sources.coords
source_shape = sources.shape
return MTUQDataArray(**{
'data': np.reshape(values, source_shape + origin_shape),
'coords': source_coords + origin_coords,
'dims': source_dims + origin_dims,
})
| 18,851
|
def package(metadata: Metadata, requirements: Optional[List[str]] = None, path: Optional[str] = None):
"""Packages the chatbot into a single archive for deployment.
Performs some preliminary checks on the metadata.
Creates a _package.zip file in the directory containing the file that contains the bot class
unless a path is provided.
:param metadata:
:param requirements:
:param path:
:return:
"""
bot_file = Path(inspect.getfile(metadata.input_class))
print("Running verification checks on metadata.")
metadata.verify(bot_file)
metadata_dict = {
'name': metadata.name,
'imageUrl': metadata.image_url,
'color': metadata.color,
'developerUid': metadata.developer_uid,
'description': metadata.description,
'inputFile': bot_file.stem,
'inputClass': metadata.input_class.__name__,
'memory': metadata.memory,
}
print("Prepared metadata:")
pprint.pprint(metadata_dict)
print("Preparing temporary directory...")
with tempfile.TemporaryDirectory() as temp_dir:
# Copy files in bot directory
def ignore(src, names):
ignore_list = []
for name in names:
# e.g .git folder is not wanted
if name.startswith('.') or name.startswith('_package.zip'):
warnings.warn(
f"Ignoring files which start with '.': {name}.",
RuntimeWarning
)
ignore_list.append(name)
if name == "main.py":
raise RuntimeError("Bot root directory cannot contain a main.py file.")
return ignore_list
copytree(bot_file.parent, temp_dir, ignore=ignore)
# Write metadata.json
with (Path(temp_dir) / "metadata.json").open("w") as f:
json.dump(metadata_dict, f)
# Write requirements.txt
if requirements:
write_valid_requirements_file(Path(temp_dir) / "requirements.txt", requirements)
# Create zip
if path is None:
path = bot_file.parent / "_package.zip"
else:
path = Path(path)
with path.open("wb") as f:
zipfile_from_folder(temp_dir, f)
print(f"Created zip package at {path}.")
| 18,852
|
def warp_grid(grid: tf.Tensor, theta: tf.Tensor) -> tf.Tensor:
"""
Perform transformation on the grid.
- grid_padded[i,j,k,:] = [i j k 1]
- grid_warped[b,i,j,k,p] = sum_over_q (grid_padded[i,j,k,q] * theta[b,q,p])
:param grid: shape = (dim1, dim2, dim3, 3), grid[i,j,k,:] = [i j k]
:param theta: parameters of transformation, shape = (batch, 4, 3)
:return: shape = (batch, dim1, dim2, dim3, 3)
"""
grid_size = grid.get_shape().as_list()
# grid_padded[i,j,k,:] = [i j k 1], shape = (dim1, dim2, dim3, 4)
grid_padded = tf.concat([grid, tf.ones(grid_size[:3] + [1])], axis=3)
# grid_warped[b,i,j,k,p] = sum_over_q (grid_padded[i,j,k,q] * theta[b,q,p])
# shape = (batch, dim1, dim2, dim3, 3)
grid_warped = tf.einsum("ijkq,bqp->bijkp", grid_padded, theta)
return grid_warped
| 18,853
|
def droid_visualization(video, device="cuda:0"):
""" DROID visualization frontend """
torch.cuda.set_device(device)
droid_visualization.video = video
droid_visualization.cameras = {}
droid_visualization.points = {}
droid_visualization.warmup = 8
droid_visualization.scale = 1.0
droid_visualization.ix = 0
droid_visualization.filter_thresh = 0.005
def increase_filter(vis):
droid_visualization.filter_thresh *= 2
with droid_visualization.video.get_lock():
droid_visualization.video.dirty[:droid_visualization.video.counter.value] = True
def decrease_filter(vis):
droid_visualization.filter_thresh *= 0.5
with droid_visualization.video.get_lock():
droid_visualization.video.dirty[:droid_visualization.video.counter.value] = True
def animation_callback(vis):
cam = vis.get_view_control().convert_to_pinhole_camera_parameters()
with torch.no_grad():
with video.get_lock():
t = video.counter.value
dirty_index, = torch.where(video.dirty.clone())
dirty_index = dirty_index
if len(dirty_index) == 0:
return
video.dirty[dirty_index] = False
# convert poses to 4x4 matrix
poses = torch.index_select(video.poses, 0, dirty_index)
disps = torch.index_select(video.disps, 0, dirty_index)
Ps = SE3(poses).inv().matrix().cpu().numpy()
images = torch.index_select(video.images, 0, dirty_index)
images = images.cpu()[:,[2,1,0],3::8,3::8].permute(0,2,3,1) / 255.0
points = droid_backends.iproj(SE3(poses).inv().data, disps, video.intrinsics[0]).cpu()
thresh = droid_visualization.filter_thresh * torch.ones_like(disps.mean(dim=[1,2]))
count = droid_backends.depth_filter(
video.poses, video.disps, video.intrinsics[0], dirty_index, thresh)
count = count.cpu()
disps = disps.cpu()
masks = ((count >= 2) & (disps > .5*disps.mean(dim=[1,2], keepdim=True)))
for i in range(len(dirty_index)):
pose = Ps[i]
ix = dirty_index[i].item()
if ix in droid_visualization.cameras:
vis.remove_geometry(droid_visualization.cameras[ix])
del droid_visualization.cameras[ix]
if ix in droid_visualization.points:
vis.remove_geometry(droid_visualization.points[ix])
del droid_visualization.points[ix]
### add camera actor ###
cam_actor = create_camera_actor(True)
cam_actor.transform(pose)
vis.add_geometry(cam_actor)
droid_visualization.cameras[ix] = cam_actor
mask = masks[i].reshape(-1)
pts = points[i].reshape(-1, 3)[mask].cpu().numpy()
clr = images[i].reshape(-1, 3)[mask].cpu().numpy()
## add point actor ###
point_actor = create_point_actor(pts, clr)
vis.add_geometry(point_actor)
droid_visualization.points[ix] = point_actor
# hack to allow interacting with vizualization during inference
if len(droid_visualization.cameras) >= droid_visualization.warmup:
cam = vis.get_view_control().convert_from_pinhole_camera_parameters(cam)
droid_visualization.ix += 1
vis.poll_events()
vis.update_renderer()
### create Open3D visualization ###
vis = o3d.visualization.VisualizerWithKeyCallback()
vis.register_animation_callback(animation_callback)
vis.register_key_callback(ord("S"), increase_filter)
vis.register_key_callback(ord("A"), decrease_filter)
vis.create_window(height=540, width=960)
vis.get_render_option().load_from_json("misc/renderoption.json")
vis.run()
vis.destroy_window()
| 18,854
|
async def makenotifyrole(guild):
"""Make the notify role in the given guild.
:type guild: discord.Guild
:rtype: None | discord.Role
:param guild: Guild instance to create the role in.
:return: The created role, possibly None if the creation failed.
"""
userrole = None
try:
# The bot should have the ping any role perm, so the role doesn't need to be mentionable
userrole = await guild.create_role(reason="Role created for notification", name=notifyrolename)
except discord.Forbidden: # May not have permission
pass # This should leave userrole as none
return userrole
| 18,855
|
def mape(forecast: Forecast, target: Target) -> np.ndarray:
"""
Calculate MAPE.
This method accepts one or many timeseries.
For multiple timeseries pass matrix (N, M) where N is number of timeseries and M is number of time steps.
:param forecast: Predicted values.
:param target: Target values.
:return: Same shape array with sMAPE calculated for each time step of each timeseries.
"""
return 100 * np.abs(forecast - target) / target
| 18,856
|
def reverse_weighted_graph(graph):
"""
Function for reverting direction of the graph (weights still the same)
Args:
graph: graph representation as Example: {1: {2: 1, 3: 5}, 2: {3: 2}, 4: {1: 2}}
Returns:
reversed graph
Examples:
>>> reverse_weighted_graph({1: {2: 1, 3: 5}, 2: {3: 2}, 4: {1: 2}})
defaultdict(<class 'dict'>, {2: {1: 1}, 3: {1: 5, 2: 2}, 1: {4: 2}})
"""
rev_graph = defaultdict(dict)
for node, neighborhood in graph.items():
for adj, weight in neighborhood.items():
rev_graph[adj].update(({node: weight}))
return rev_graph
| 18,857
|
def set_logger(log_path, level=logging.INFO, console=True):
"""Sets the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(level)
logging.basicConfig(format="")
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(
logging.Formatter("%(asctime)s:%(levelname)s: %(message)s")
)
logger.addHandler(file_handler)
# Logging to console
if console and False:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter("%(message)s"))
logger.addHandler(stream_handler)
| 18,858
|
def test_asset_file_meta_source():
"""Test ht.inline.api.asset_file_meta_source."""
target = "Scanned Asset Library Directories"
path = hou.text.expandString("$HH/otls/OPlibSop.hda")
assert ht.inline.api.asset_file_meta_source(path) == target
assert ht.inline.api.asset_file_meta_source("/some/fake/pat") is None
| 18,859
|
def make_output_dir(out_filename):
"""
Makes the directory to output the file to if it doesn't exist.
"""
# check if output is to cwd, or is a path
dirname = os.path.dirname(out_filename)
if dirname != '' and not os.path.exists(dirname):
try:
os.makedirs(os.path.dirname(out_filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
| 18,860
|
def clean_bin():
"""permanently deletes entries - crud delete"""
mongo.db.bin.remove()
mongo.db.bin.insert({'_id': ObjectId()})
return redirect(url_for('get_bin', data_requested="teams"))
| 18,861
|
def correct_by_threshold(img, threshold):
"""
correct the fMRI RSA results by threshold
Parameters
----------
img : array
A 3-D array of the fMRI RSA results.
The shape of img should be [nx, ny, nz]. nx, ny, nz represent the shape of the fMRI-img.
threshold : int
The number of voxels used in correction.
If threshold=n, only the similarity clusters consisting more than n voxels will be visualized.
Returns
-------
img : array
A 3-D array of the fMRI RSA results after correction.
The shape of img should be [nx, ny, nz]. nx, ny, nz represent the shape of the fMRI-img.
"""
if len(np.shape(img)) != 3:
return "Invalid input"
sx = np.shape(img)[0]
sy = np.shape(img)[1]
sz = np.shape(img)[2]
nsmall = 1
while nsmall*nsmall*nsmall < threshold:
nsmall = nsmall + 1
nlarge = nsmall + 2
for i in range(sx-nlarge+1):
for j in range(sy-nlarge+1):
for k in range(sz-nlarge+1):
listlarge = list(np.reshape(img[i:i+nlarge, j:j+nlarge, k:k+nlarge], [nlarge*nlarge*nlarge]))
if listlarge.count(0) < nlarge*nlarge*nlarge:
index1 = 0
for l in range(nlarge):
for m in range(nlarge):
if img[i + l, j + m, k] == 0:
index1 = index1 + 1
if img[i + l, j + m, k + nlarge - 1] == 0:
index1 = index1 + 1
for l in range(nlarge-1):
for m in range(nlarge-2):
if img[i + l, j, k + m] == 0:
index1 = index1 + 1
if img[i, j + l + 1, k + m] == 0:
index1 = index1 + 1
if img[i + nlarge - 1, j + l, k + m] == 0:
index1 = index1 + 1
if img[i + l + 1, j + nlarge - 1, k + m] == 0:
index1 = index1 + 1
nex = nlarge * nlarge * nlarge - nsmall * nsmall * nsmall
if index1 == nex:
unit = img[i+1:i+1+nsmall, j+1:j+1+nsmall, k+1:k+1+nsmall]
unit = np.reshape(unit, [nsmall*nsmall*nsmall])
list_internal = list(unit)
index2 = nsmall*nsmall*nsmall-list_internal.count(0)
if index2 < threshold:
img[i+1:i+1+nsmall, j]
for l in range(nsmall):
for m in range(nsmall):
for p in range(nsmall):
img[i+1:i+1+nsmall, j+1:j+1+nsmall, k+1:k+1+nsmall] = np.zeros([nsmall, nsmall, nsmall])
print("finished correction")
return img
| 18,862
|
def config_date(dut, date):
"""
:param dut:
:param date:
:return:
"""
st.log("config date")
command = "date --set='{}'".format(date)
st.config(dut, command)
return True
| 18,863
|
def test_verify_email(
api_rf, email_confirmation_factory, email_factory, user_factory
):
"""
Sending a POST request with valid data to the endpoint should mark
the associated email address as verified.
"""
user = user_factory(password="password")
email = email_factory(user=user)
confirmation = email_confirmation_factory(email=email)
data = {"key": confirmation.key, "password": "password"}
serializer = serializers.EmailVerificationSerializer(data=data)
assert serializer.is_valid()
request = api_rf.post("/", data)
response = email_verification_view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == serializer.data
| 18,864
|
def read_dicom():
"""Read in DICOM series"""
dicomPath = join(expanduser('~'), 'Documents', 'SlicerDICOMDatabase',
'TCIALocal', '0', 'images', '')
reader = sitk.ImageSeriesReader()
seriesIDread = reader.GetGDCMSeriesIDs(dicomPath)[1]
dicomFilenames = reader.GetGDCMSeriesFileNames(dicomPath, seriesIDread)
reader.SetFileNames(dicomFilenames)
return reader.Execute()
| 18,865
|
def test_7():
"""
Detect the date columns in austin_weather.csv
Here 2 date columns are present with different formats.
"""
table = pandas.read_csv('data_for_tests/table_7.csv')
result = date_detection.detect(table)
print(result)
expected_result = '''{'date': {'type': <ColumnTypes.CONSINTENT: 1>, 'day_first': True}, ' date__new': {'type': <ColumnTypes.CONSINTENT: 1>, 'day_first': False}}'''
assert(expected_result == str(result))
| 18,866
|
def read_file(*file_paths):
"""Read text file."""
with codecs.open(os.path.join(ROOT_DIR, *file_paths), 'r') as fp:
return fp.read()
| 18,867
|
def load_config():
"""Load the config file and validate contents."""
filename = os.path.join(user_config_dir("timetagger_cli"), config_fname)
if not os.path.isfile(filename):
raise RuntimeError("Config not set, run 'timetagger setup' first.")
with open(filename, "rb") as f:
config = toml.loads(f.read().decode())
if "api_url" not in config:
raise RuntimeError("No api_url set in config. Run 'timetagger setup' to fix.")
if not config["api_url"].startswith(("http://", "https://")):
raise RuntimeError(
"The api_url must start with 'http://' or 'https://'. Run 'timetagger setup' to fix."
)
if "api_token" not in config:
raise RuntimeError("No api_token set in config. Run 'timetagger setup' to fix.")
return config
| 18,868
|
def zfs_upgrade_list(supported: bool = False) -> str:
"""
zfs upgrade [-v]
Displays a list of file systems that are not the most recent version.
-v Displays ZFS filesystem versions supported by the current
software. The current ZFS filesystem version and all previous
supported versions are displayed, along with an explanation
of the features provided with each version.
"""
call_args = []
if supported:
call_args.append("-v")
command = _Command("upgrade", call_args)
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to list upgradeable filesystems\n{e.output}\n")
| 18,869
|
def main():
"""
CLI Interface for developing civ plugins
"""
pass
| 18,870
|
def L_model_backward(AL, Y, caches):
"""
ๅฎๆLๅฑ็ฅ็ป็ฝ็ปๆจกๅๅๅไผ ๆญ่ฎก็ฎ
Arguments:
AL -- ๆจกๅ่พๅบๅผ
Y -- ็ๅฎๅผ
caches -- ๅ
ๅซReluๅSigmoidๆฟๆดปๅฝๆฐ็linear_activation_forward()ไธญๆฏไธไธชcache
Returns:
grads -- ๅ
ๅซๆๆๆขฏๅบฆ็ๅญๅ
ธ
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# ๅๅงๅๅๅไผ ๆญ่ฎก็ฎ
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
# Lๅฑ็ฅ็ป็ฝ็ปๆขฏๅบฆ. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"]
current_cache = caches[L - 1]
grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = \
linear_activation_backward(dAL, current_cache, activation="sigmoid")
for l in reversed(range(L - 1)):
# ็ฌฌLๅฑ: (RELU -> LINEAR) ๆขฏๅบฆ
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = \
linear_activation_backward(grads["dA" + str(l + 2)], current_cache, activation="relu")
grads["dA" + str(l + 1)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
return grads
| 18,871
|
def findbps(reads, output, bowtie_options, motif, length, threshold, strand):
"""
Input:
reads: str of name of file where single-end, stranded
RNA-seq reads in fastq format are located
output:str of desired basename of output files
bowtie_options: str of bowtie options you wish to
be used for alignment of reads after splitting.
See the bowtie manual.
Recommend "-y -p 2 -v 0 -X 5000 -m 1 <index>"
motif: list of dictionaries representing 5'ss motif
position weight matrix. Each dictionary has a
key for each nucleotide, with a float of the
probability as keys.
length:int of the lowest acceptable number of bases
used to align a fragment of a read.
threshold: float of the lowest acceptable probability
that a sequence would be sampled from the
given martrix in order to attempt mapping.
Recommend 0.0 unless many false positives
strand:str either 'first' if reads are first-stranded
or 'second' if reads are second-stranded
Output:
output + '.bed':
A file in paired-end bed format with
information about the reads with a valid
alignment.
output + '_no_alignment.fastq':
Reads with no valid alignment in the
paired-end tab-delimited format
described in the bowtie manual split
as they were attempted to be aligned.
"""
#gets the name of the directory of this file
directory = path.dirname(path.realpath(__file__))
#make these arguments into strings so they can be passed to fp_checker.py
motif = '"' + dumps(motif) + '"'
length = str(length)
threshold = str(threshold)
#this process splits each read at the most likely 5'SS based on the
# given weight matrix and sends them to bowtie to be mapped
# see fp_checker.py for further details
fp_checker = Popen('python ' + directory + '/fp_checker.py ' +
motif +' '+ length +' '+ threshold +' '+ strand,
stdin = open(reads,'r'), stdout = PIPE, shell = True)
#this process maps each split read to the given genome
bowtie = Popen('bowtie --ff ' + bowtie_options + ' --12 - --un ' +
output+'_no_alignment.fastq',
stdin = fp_checker.stdout, stdout = PIPE, shell = True)
fp_checker.stdout.close()
#this process converts the bowtie output into a bed file
# see make_bed.py for further details
make_bed = Popen('python ' + directory + '/make_bed.py',
stdin = bowtie.stdout,
stdout = open(output + ".bed",'w'), shell = True)
bowtie.stdout.close()
make_bed.wait()
return 0
| 18,872
|
def Cnot(idx0: int = 0, idx1: int = 1) -> Operator:
"""Controlled Not between idx0 and idx1, controlled by |1>."""
return ControlledU(idx0, idx1, PauliX())
| 18,873
|
def test_get_df_database_input_value():
""" Check input value"""
try:
data_compile.get_df_database(1)
raise Exception()
except ValueError:
pass
| 18,874
|
def init_sql_references(conn):
"""
Utility function to get references from SQL.
The returned objects conveniently identify users based on kb_name or user hashkey
"""
# get kb_names to kb_id
kb_ref = pds.read_sql("""SELECT id, kb_name, directory_id FROM dbo.kb_raw""", conn)
get_kb_dir_id = kb_ref.loc[:,['kb_name', 'directory_id']].set_index('kb_name').to_dict()['directory_id']
get_kb_raw_id = kb_ref.loc[:,['kb_name', 'id']].set_index('kb_name').to_dict()['id']
# get kb permissions
permissions = pds.read_sql("SELECT hashkey, kb_name, user_id FROM dbo.users \
LEFT JOIN dbo.kb_directory ON dbo.users.id = dbo.kb_directory.user_id \
LEFT JOIN kb_raw ON dbo.kb_directory.id = dbo.kb_raw.directory_id \
", conn)
permissions = pd.DataFrame(np.array(permissions), columns = ['hashkey', 'kb_name', 'user_id']).set_index('hashkey')
return get_kb_dir_id, get_kb_raw_id, permissions
| 18,875
|
def inoptimal_truncation_square_root(A, B, C, k, check_stability=False):
"""Use scipy to perform balanced truncation
Use scipy to perform balanced truncation on a linear state-space system.
This method is the natural application of scipy and inoptimal performance
wise compared to `truncation_square_root_trans_matrix`
See also
-----
truncation_square_root_trans_matrix
"""
if check_stability and not isStable(A):
raise ValueError("This doesn't seem to be a stable system!")
AH = A.transpose().conj()
P = linalg.solve_lyapunov(A, -np.dot(B, B.transpose().conj()))
Q = linalg.solve_lyapunov(AH, -np.dot(C.transpose().conj(), C))
U = linalg.cholesky(P).transpose().conj()
L = linalg.cholesky(Q)
W, Sigma, V = linalg.svd(np.dot(U.transpose().conj(), L),
full_matrices=False,
overwrite_a=True, check_finite=False)
W1 = W[:, :k]
Sigma1 = Sigma[:k]
V1 = V[:, :k]
Sigma1_pow_neg_half = np.diag(Sigma1**-.5)
T1 = np.dot(Sigma1_pow_neg_half,
np.dot(V1.transpose().conj(), L.transpose().conj()))
Ti1 = np.dot(np.dot(U, W1),
Sigma1_pow_neg_half)
return k, np.dot(T1, np.dot(A, Ti1)), np.dot(T1, B), np.dot(C, Ti1), \
Sigma, Ti1, T1
| 18,876
|
def _tessellate_bed(chrom: str, chromStart: int, chromEnd: int, window_size: int) -> pd.DataFrame:
"""Return tessellated pandas dataframe splitting given window.
Parameters
-----------------------
chrom: str,
Chromosome containing given window.
chromStart: int,
Position where the window starts.
chromEnd: int,
Position where the window ends.
window_size: int
Target window size.
Returns
-----------------------
Returns a pandas DataFrame in bed-like format containing the tessellated windows.
"""
return pd.DataFrame([
{
"chrom": chrom,
"chromStart": chromStart + window_size*i,
"chromEnd": chromStart + window_size*(i+1),
}
for i in range((chromEnd - chromStart)//window_size)
])
| 18,877
|
def _replace_variables(dictionary):
"""Replace environment variables in a nested dict."""
for path in _walk(dictionary):
value = path.pop()
if isinstance(value, str) and _ispath(value):
value = os.path.expandvars(value)
value = pathlib.Path(value)
last_key = path.pop()
sub_dict = dictionary
for key in path:
sub_dict = sub_dict[key]
sub_dict[last_key] = value
return dictionary
| 18,878
|
def compute_locksroot(locks: PendingLocksState) -> Locksroot:
"""Compute the hash representing all pending locks
The hash is submitted in TokenNetwork.settleChannel() call.
"""
return Locksroot(keccak(b"".join(locks.locks)))
| 18,879
|
def refresh_interface(self):
"""
Refresh the Frelatage CLI
"""
# Title
title = "Frelatage {version} ({function_name})".format(
version=__version__, function_name=self.method.__name__
).center(105)
# Execs per second
current_second = int(time.time())
if current_second != self.current_second:
self.executions_per_second = self.last_seconds_executions
self.last_seconds_executions = 0
self.current_second = int(time.time())
execs_per_second = str(self.executions_per_second).rjust(6)
# Process timing
run_time = format_time_elapsed(self.fuzz_start_time).ljust(32)
last_new_path_time = format_time_elapsed(self.last_new_path_time).ljust(32)
last_unique_crash_time = format_time_elapsed(self.last_unique_crash_time).ljust(32)
last_unique_timeout_time = format_time_elapsed(self.last_unique_timeout_time).ljust(
32
)
# Overall results
uniques_crashes_count = str(self.unique_crashes).ljust(9)
uniques_timeouts_count = str(self.unique_timeout).ljust(9)
# Finding in depth
total_paths_count = str(len(self.reached_instructions)).ljust(22)
favored_paths_count = len(self.favored_pairs)
favored_paths_rate = (
round(int(favored_paths_count) / int(total_paths_count) * 100, 2)
if int(total_paths_count)
else 0.00
)
favored_paths = "{favored_paths} ({rate}%)".format(
favored_paths=favored_paths_count, rate=favored_paths_rate
).ljust(22)
# Crashes
total_crashes = "{crashes} ({uniques} uniques)".format(
crashes=str(self.total_crashes), uniques=str(self.unique_crashes)
).ljust(22)
total_timeouts = "{total_timeouts} [{timeout_delay} sec]".format(
total_timeouts=self.total_timeouts, timeout_delay=Config.FRELATAGE_TIMEOUT_DELAY
).ljust(22)
# Progress
cycles_count = str(self.cycles_count).ljust(12)
total_executions = str(self.inputs_count).ljust(12)
# Stage progress
current_argument = self.queue.position + 1
total_arguments_count = len(self.queue.arguments)
current_stage = "{current_argument}/{total_arguments_count}".format(
current_argument=current_argument, total_arguments_count=total_arguments_count
).ljust(16)
stage_executions = str(self.stage_inputs_count).ljust(16)
# Interface
self.screen.addstr(
0,
0,
"""
{title}
โโโโโ Process timing โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโ Finding in depth โโโโโโโโโโโโโโโโโโโโโโ
โ Run time :: {run_time}โ Favored paths :: {favored_paths}โ
โ Last new path :: {last_new_path_time}โ Total paths :: {total_paths_count}โ
โ Last unique crash :: {last_unique_crash_time}โ Total timeouts :: {total_timeouts}โ
โ Last unique timeout :: {last_unique_timeout_time}โ Total crashes :: {total_crashes}โ
โโโโโ Overall result โโโโโโโโโโโโโโฌโโโ Global progress โโโดโโโโโโโโโโโโโโฌโโโโ Stage progressโโโโโโโโโโโโโโค
โ Uniques crashes :: {uniques_crashes_count}โ Cycles done :: {cycles_count}โ Stage :: {current_stage}โ
โ Unique timeouts :: {uniques_timeouts_count}โ Total executions :: {total_executions}โ Stage execs :: {stage_executions}โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
[ {execs_per_second} exec/s ]
""".format(
title=title,
execs_per_second=execs_per_second,
run_time=run_time,
last_new_path_time=last_new_path_time,
last_unique_crash_time=last_unique_crash_time,
last_unique_timeout_time=last_unique_timeout_time,
uniques_crashes_count=uniques_crashes_count,
uniques_timeouts_count=uniques_timeouts_count,
favored_paths=favored_paths,
total_paths_count=total_paths_count,
total_timeouts=total_timeouts,
total_crashes=total_crashes,
cycles_count=cycles_count,
total_executions=total_executions,
current_stage=current_stage,
stage_executions=stage_executions,
),
)
self.screen.refresh()
| 18,880
|
def flatten(text: Union[str, List[str]], separator: str = None) -> str:
"""
Flattens the text item to a string. If the input is a string, that
same string is returned. Otherwise, the text is joined together with
the separator.
Parameters
----------
text : Union[str, List[str]]
The text to flatten
separator : str, default=None
The separator to join the list with. If `None`, the separator will be " "
Returns
-------
str
The flattened text
"""
separator = separator or " "
if isinstance(text, list):
return separator.join(text)
return text
| 18,881
|
def convert_byte32_arr_to_hex_arr(byte32_arr):
"""
This function takes in an array of byte32 strings and
returns an array of hex strings.
Parameters:
byte32_arr Strings to convert from a byte32 array to a hex array
"""
hex_ids = []
for byte32_str in byte32_arr:
hex_ids = hex_ids + [byte32_str.hex()]
return hex_ids
| 18,882
|
def process_ring(cat, ref, pairs, ringpairs, area, radius, sigma, sigma_init=None, gamma=None, niter=10, nextr=100, mid=True,
printprogress=True, printerror=False):
"""
Estimate omega with robust algorithm in rings. Obtain optimal estimate from best ring.
Internal function to process pairs that are already split into rings.
:param cat: Input catalog dataframe with (x,y,z) coordinates.
:param ref: Reference catalog dataframe with (x,y,z) coordinates.
:param area: Area of the footprint, units in steradians.
:param radius: Separation radius threshold.
:param sigma: True value of sigma, the astrometric uncertainty of the catalog.
:param sigma_init: If not None, assign a large initial value for sigma.
:param gamma: Fraction of good matches among all pairs. If None, will be computed in estimation.
:param niter: Min number of iterations for the convergence.
:param nextr: Max number of additional iterations for the convergence.
:param mid: Boolean value, indicate if reference as midpoints of the two catalogs
:param prinprogress: Boolean value, if true shows progress bar.
:param printerror: Boolean value, indicate if track error.
:type cat: pandas.DataFrame
:type ref: pandas.DataFrame
:type area: float
:type radius: float
:type sigma: float
:type sigma_init: None or float
:type gamma: None or float
:type niter: int
:type nextr: int
:type mid: bool
:type printprogress: bool
:type printerror: bool
:returns: (bestomega, bestpairs, bestwt) omega: 3D transformation vector estimated in
the optimal ring by robust algorithm,
bestpairs: pairs in the optimal ring,
bestwt: robust weights for bestpairs.
"""
sigma_init = sigma_init or 25 * sigma # heuristic estimate for convergence parameter
nrings = len(ringpairs)
if printprogress:
print(f"Split {pairs.shape[0]} pairs into {nrings} overlapping rings")
print(f"process_ring: sigma {sigma} sigma_init {sigma_init}")
# gamma = gamma or min(cat.shape[0],ref.shape[0]) / pairs.shape[0]
if not gamma:
# count just sources actually included in pairs
# this makes a difference when search radius is small and many sources don't match
n1 = (np.bincount(pairs[:,0])!=0).sum()
n2 = (np.bincount(pairs[:,1])!=0).sum()
gamma = min(n1,n2) / pairs.shape[0]
# increase gamma because expected match is higher in the correct ring
#gfac = pairs.shape[0] / np.mean([x.shape[0] for x in ringpairs])
#gamma = gamma * gfac
#if printprogress and gfac != 1:
# print(f"Increased gamma by factor {gfac:.2f} to {gamma}")
# Initial best sum(weight)=0
bestwtsum = 0.0
bestomega = None
bestring = nrings
if printprogress:
# print progress bar (but disable on non-TTY output)
# disable = None
# print progress bar
disable = False
else:
# do not print progress bar
disable = True
sys.stdout.flush()
loop = tqdm(total=nrings, position=0, leave=False, disable=disable)
# loop over all pairs to find optimal omega estimate
for iring in range(nrings):
rpairs = ringpairs[iring]
# paired catalog and reference in ring
r,c = getRC(cat, ref, rpairs, mid)
# estimate omega using robust algorithm
try:
omega, w = rob_est(r, c, sigma, gamma, area, sigma_init=sigma_init, niter=niter, nextr=nextr,
printerror=printerror, verbose=printprogress>1)
except SingularMatrixError as e:
if printerror:
print(e)
print('continuing to next ring')
continue
# Sum of weights is the number of good pairs
wtsum = w.sum()
if wtsum > bestwtsum:
bestring = iring
bestpairs = rpairs
bestomega = omega
bestwtsum = wtsum
bestwt = w
if not printerror:
loop.set_description("Computing...".format(iring))
loop.update(1)
loop.close()
if bestomega is None:
if printerror:
print("process_ring: no solution found")
return np.zeros(3), np.zeros((0,2),dtype=int), np.zeros(0,dtype=float)
return bestomega, bestpairs, bestwt
| 18,883
|
def backtest_loop(
start_time: Union[pd.Timestamp, str],
end_time: Union[pd.Timestamp, str],
trade_strategy: BaseStrategy,
trade_executor: BaseExecutor,
) -> Tuple[PortfolioMetrics, Indicator]:
"""backtest function for the interaction of the outermost strategy and executor in the nested decision execution
please refer to the docs of `collect_data_loop`
Returns
-------
portfolio_metrics: PortfolioMetrics
it records the trading portfolio_metrics information
indicator: Indicator
it computes the trading indicator
"""
return_value = {}
for _decision in collect_data_loop(start_time, end_time, trade_strategy, trade_executor, return_value):
pass
return return_value.get("portfolio_metrics"), return_value.get("indicator")
| 18,884
|
def test_assert_sets_equal(test_case: SetsEqualTestCase):
"""
GraphHelper.sets_equals and related functions work correctly in both
positive and negative cases.
"""
lhs_graph: Graph = Graph().parse(data=test_case.lhs, format=test_case.lhs_format)
rhs_graph: Graph = Graph().parse(data=test_case.rhs, format=test_case.rhs_format)
public_id = URIRef("example:graph")
lhs_cgraph: ConjunctiveGraph = ConjunctiveGraph()
lhs_cgraph.parse(
data=test_case.lhs, format=test_case.lhs_format, publicID=public_id
)
rhs_cgraph: ConjunctiveGraph = ConjunctiveGraph()
rhs_cgraph.parse(
data=test_case.rhs, format=test_case.rhs_format, publicID=public_id
)
assert isinstance(lhs_cgraph, ConjunctiveGraph)
assert isinstance(rhs_cgraph, ConjunctiveGraph)
graph: Graph
cgraph: ConjunctiveGraph
for graph, cgraph in ((lhs_graph, lhs_cgraph), (rhs_graph, rhs_cgraph)):
GraphHelper.assert_sets_equals(graph, graph, True)
GraphHelper.assert_sets_equals(cgraph, cgraph, True)
GraphHelper.assert_triple_sets_equals(graph, graph, True)
GraphHelper.assert_triple_sets_equals(cgraph, cgraph, True)
GraphHelper.assert_quad_sets_equals(cgraph, cgraph, True)
if not test_case.equal:
with pytest.raises(AssertionError):
GraphHelper.assert_sets_equals(
lhs_graph, rhs_graph, test_case.ignore_blanks
)
with pytest.raises(AssertionError):
GraphHelper.assert_sets_equals(
lhs_cgraph, rhs_cgraph, test_case.ignore_blanks
)
with pytest.raises(AssertionError):
GraphHelper.assert_triple_sets_equals(
lhs_graph, rhs_graph, test_case.ignore_blanks
)
with pytest.raises(AssertionError):
GraphHelper.assert_triple_sets_equals(
lhs_cgraph, rhs_cgraph, test_case.ignore_blanks
)
with pytest.raises(AssertionError):
GraphHelper.assert_quad_sets_equals(
lhs_cgraph, rhs_cgraph, test_case.ignore_blanks
)
else:
GraphHelper.assert_sets_equals(lhs_graph, rhs_graph, test_case.ignore_blanks)
GraphHelper.assert_sets_equals(lhs_cgraph, rhs_cgraph, test_case.ignore_blanks)
GraphHelper.assert_triple_sets_equals(
lhs_graph, rhs_graph, test_case.ignore_blanks
)
GraphHelper.assert_triple_sets_equals(
lhs_cgraph, rhs_cgraph, test_case.ignore_blanks
)
GraphHelper.assert_quad_sets_equals(
lhs_cgraph, rhs_cgraph, test_case.ignore_blanks
)
| 18,885
|
def failOnNonTransient(func):
"""Only allow function execution when immutable is transient."""
@functools.wraps(func)
def wrapper(inst, *args, **kwargs):
# make the call fail if the object is not transient
if inst.__im_state__ != interfaces.IM_STATE_TRANSIENT:
raise AttributeError('Cannot update locked immutable object.')
return func(inst, *args, **kwargs)
return wrapper
| 18,886
|
def text_has_emoji(text):
"""ๅคๆญๆๆฌไธญๆฏๅฆๅ
ๅซemoji"""
for character in text:
if character in emoji.UNICODE_EMOJI:
return True
return False
| 18,887
|
def rod_faces(n1, n2, xform, dim1, dim2): # validated
"""
defines points in a circle with triangle based end caps
"""
# 4,8,12,16,... becomes 5,9,13,17,...
thetas = np.radians(np.linspace(0., 360., 17))
ntheta = len(thetas)
nfaces = 0
all_faces = []
points_list = []
x = np.zeros(ntheta)
for nid, dim in [(n1, dim1), (n2, dim2)]:
radius, = dim
y = radius * np.cos(thetas)
z = radius * np.sin(thetas)
xyz = np.vstack([x, y, z]).T
assert xyz.shape == (ntheta, 3), xyz.shape
pointsi = np.dot(xyz, xform) + nid
points_list.append(pointsi)
# the tri_cap is made from points that aren't defined yet
# (the n1/n2 end points)
tris = tri_cap(ntheta)
# we need to use the tolist because we're going to
# combine quads and tris (the elements have different
# lengths)
all_faces += (nfaces + tris).tolist()
nfaces += tris.shape[0]
# the main cylinder uses the points defined independent
# of the points n1/n2
faces = elements_from_quad(2, ntheta)
all_faces += faces.tolist()
# used by the tri_caps
points_list.append(n1)
points_list.append(n2)
points = np.vstack(points_list)
return all_faces, points, points.shape[0]
| 18,888
|
def xfork():
""" xfork() is similar to fork but doesn't throw an OSError exception.
Returns -1 on error, otherwise it returns the same value as fork() does.
"""
try:
ret = fork()
except OSError:
ret = -1
return ret
| 18,889
|
def cigar_segment_bounds(cigar, start):
"""
Determine the start and end positions on a chromosome of a non-no-matching part of an
RNA-seq read based on a read's cigar string.
cigar string meaning: http://bioinformatics.cvr.ac.uk/blog/tag/cigar-string/
Example:
'50M25N50M' with start = 100 -> [100, 149, 175, 224]. Note that start and end integers
are inclusive, i.e. all positions at or between 100 and 149 and at or between 175 and 224
are covered by reads.
:param cigar: str a read's cigar string, e.g. "49M165N51M"
:param start: int a read's start position on a chromosome
:return: list of integers representing cigar match start, end points, in order of matching subsequences
"""
# if CIGAR string is a single full match (i.e. "<positive integer>M")
# extract length of the match, return match segment.
full_match = re.match(r'(\d+)M$', cigar)
if full_match is not None:
extension = int(cigar[:(full_match.span()[-1] - 1)]) - 1
return [start, start + extension]
# break up cigar string into list of 2-tuples (letter indicative of match/no match, run length integer).
cigar_split = [(v, int(k)) for k, v in re.findall(r'(\d+)([A-Z]?)', cigar)]
# initialize parse params.
# Allow for "hard clipping" where aligned read can start with non-matching region (https://bit.ly/2K6TJ5Y)
augment = False
any_match = False
# output storage.
match_idx_list = list()
for idx in range(len(cigar_split)):
segment = cigar_split[idx]
if segment[0] == 'M':
any_match = True
extension = segment[1] - 1 # end of a match run is inclusive.
augment = True
match_idx_list += [start, start + extension] # append a match run to output.
else:
if augment:
extension = segment[1] + 1
augment = False
else:
extension = segment[1]
start += extension
# if no matching regions found, throw error.
if not any_match:
raise ValueError('CIGAR string {0} has no matching region.'.format(cigar))
return match_idx_list
| 18,890
|
def augument(data_dir, img_path, steering_angle, range_x=100, range_y=10):
"""
Generate an augumented image and adjust steering angle.
(The steering angle is associated with the image)
"""
image, steering_angle = choose_image(data_dir, img_path, steering_angle)
image, steering_angle = random_flip(image, steering_angle)
image, steering_angle = random_translate(image, steering_angle, range_x, range_y)
image = random_shadow(image)
image = random_brightness(image)
return image, steering_angle
| 18,891
|
def ldns_buffer_limit(*args):
"""LDNS buffer."""
return _ldns.ldns_buffer_limit(*args)
| 18,892
|
def language():
"""
Loads languages.
:return: None
"""
if os.path.isfile(omw_db):
omw_connection = sqlite3.connect(omw_db)
cursor = omw_connection.cursor()
known = dict()
cursor.execute("""SELECT id, iso639 from lang""")
for (lid, l3) in cursor:
known[l3] = lid
for l3 in "eng cmn".split():
# for l3 in "eng als arb bul cmn dan ell fas fin fra heb hrv ita jpn cat eus glg spa ind zsm nno nob pol por slv swe tha aar afr aka amh asm aze bam bel ben bod bos bre ces cor cym deu dzo epo est ewe fao ful gla gle glv guj hau hin hun hye ibo iii ina isl kal kan kat kaz khm kik kin kir kor lao lav lin lit lub lug mal mar mkd mlg mlt mon mya nbl nde nep nld oci ori orm pan pus roh ron run rus sag sin slk sme sna som sot srp ssw swa tam tel tgk tir ton tsn tso tur ukr urd uzb ven vie xho yor zul ang arz ast chr fry fur grc hat hbs ido kur lat ltg ltz mri nan nav rup san scn srd tat tgl tuk vol yid yue".split():
if l3 in known: ### already in
continue
l = languages.get(part3=l3)
if l.part1: ### use the two letter code if it exists
bcp47 = l.part1
else:
bcp47 = l3
# INSERT LANG DATA (CODES AND NAMES)
u = 'omw'
cursor.execute("""INSERT INTO lang (bcp47, iso639, u)
VALUES (?,?,?)""", (bcp47, l3, u))
cursor.execute("""SELECT MAX(id) FROM lang""")
lang_id = cursor.fetchone()[0]
cursor.execute("""INSERT INTO lang_name (lang_id, in_lang_id, name, u)
VALUES (?,?,?,?)""", (lang_id, known['eng'], l.name, u))
omw_connection.commit()
omw_connection.close()
sys.stdout.write('Loading languages finished\n')
else:
sys.stdout.write('Unable to find database (%s) file\n' % omw_db)
return None
| 18,893
|
def test_data_folder():
"""
This fixture returns path to folder with shared test resources among all tests
"""
data_dir = os.path.join(script_dir, "testdata")
if not os.path.exists(data_dir):
os.mkdir(data_dir)
files_to_download = ["https://raw.githubusercontent.com/opencv/opencv/4.0.0/samples/data/messi5.jpg",
"https://raw.githubusercontent.com/opencv/opencv/4.0.0/samples/data/basketball1.png",
"https://raw.githubusercontent.com/opencv/opencv/4.0.0/samples/data/Megamind.avi",
"https://github.com/ARM-software/ML-zoo/raw/master/models/object_detection/ssd_mobilenet_v1/tflite_uint8/ssd_mobilenet_v1.tflite",
"https://git.mlplatform.org/ml/ethos-u/ml-embedded-evaluation-kit.git/plain/resources/kws/samples/yes.wav",
"https://raw.githubusercontent.com/Azure-Samples/cognitive-services-speech-sdk/master/sampledata/audiofiles/myVoiceIsMyPassportVerifyMe04.wav"
]
for file in files_to_download:
path, filename = ntpath.split(file)
file_path = os.path.join(data_dir, filename)
if not os.path.exists(file_path):
print("\nDownloading test file: " + file_path + "\n")
urllib.request.urlretrieve(file, file_path)
return data_dir
| 18,894
|
def _call_godot(environment, source, arguments, target):
"""Runs the Godot executable with the specified command line arguments
@param environment Environment in which the Godot executable will be run
@param source Input files that will be involved
@param arguments Arguments that will be passed to the Godot executable
@param target Output files that should result from the call"""
if 'GODOT_EXECUTABLE' in environment:
godot_excutable = environment['GODOT_EXECUTABLE']
else:
if 'GODOT_VERSION' in environment:
godot_version = environment['GODOT_VERSION']
else:
godot_version = _default_godot_version
godot_executable = _find_godot_executable(godot_version)
#environment['GODOT_EXECUTABLE'] = godot_executable
#if source is None:
# source = godot_executable
return environment.Command(
target, source, '"' + godot_executable + '" ' + arguments
)
| 18,895
|
def random_seeded(func):
""" Decorator that uses the `random_seed` parameter from functions to seed the RNG. """
@wraps(func)
def wrapper(*args, random_seed: int = None, **kwargs):
_RNG.seed(random_seed)
return func(*args, **kwargs)
return wrapper
| 18,896
|
def getCRS(station_name=None, crs=None, autoCreate=True):
"""
Method to get CRS code for the give station name. This method may not
scale nicely for a production environment. Use a proper DB instead.
@param station_name: Some characters for the station name.
@param crs: CRS code if known
@param autoCreate: Boolean to indicate if the sqlite DB should be created if not exist.
"""
# Create the SQLite DB of CRS if not found already. This can be turned off
# by passing autoCreate = False.
if not os.path.exists(CRS_SQLITE_DB) and autoCreate:
print "Attempting to create CRS DB for first run ..."
recreateDB()
fetchFromUrl()
conn = sqlite3.connect(CRS_SQLITE_DB)
c = conn.cursor()
if station_name:
c.execute('SELECT * from crstab where station_name like "%%%s%%"' %station_name.lower())
elif crs:
c.execute('SELECT * from crstab where crs = "%s"' %crs.lower())
else:
return None
ret = c.fetchall()
c.close()
conn.close()
return ret
| 18,897
|
def _initialize_object_from_dict(object_dict, parent=None):
"""Initialize a python object from dict."""
provider = object_dict['provider']
args = object_dict.get('args') or []
kwargs = object_dict.get('kwargs') or {}
obj = _get_object_by_referance(provider)
if parent is not None:
kwargs.update({'parent': parent})
return obj(*args, **kwargs)
| 18,898
|
def from_hdf(in_path, index=None, keypoints=True, descriptors=True):
"""
For a given node, load the keypoints and descriptors from a hdf5 file. The
keypoints and descriptors kwargs support returning only keypoints or descriptors.
The index kwarg supports returning a subset of the data.
Parameters
----------
in_path : str
handle to the file
key : str
An optional path into the HDF5. For example key='image_name', will
search /image_name/descriptors for the descriptors.
index : iterable
an h5py accepted indexer to pull only a subset of the keypoints
off disk. Default is None to pull all keypoints.
keypoints : bool
if True (default) return the keypoints
descriptors : bool
if True (default) return the descriptors
Returns
-------
keypoints : DataFrame
A pandas dataframe of keypoints.
descriptors : ndarray
A numpy array of descriptors
"""
if isinstance(in_path, str):
hdf = io_hdf.HDFDataset(in_path, mode='r')
else:
hdf = in_path
outd = '/descriptors'
outk = '/keypoints'
if index is not None:
index=np.asarray(index)
# The indices into HDF have to be sorted lists. When indices get passed in
# they are frequently ordered, so this pulls the data using the sorted
# index and then reorders the data.
i = np.argsort(index)
ii = np.argsort(i)
# Is is important to use sorted() so that an in-place sort is NOT used.
if descriptors:
desc = hdf[outd][index[i].tolist()]
desc = desc[ii]
if keypoints:
raw_kps = hdf[outk][index[i].tolist()]
raw_kps = raw_kps[ii]
else:
# Unlike numpy hdf does not handle NoneType as a proxy for `:`
if descriptors:
desc = hdf[outd][:]
if keypoints:
raw_kps = hdf[outk][:]
if keypoints:
index = raw_kps['index']
clean_kps = utils.remove_field_name(raw_kps, 'index')
columns = clean_kps.dtype.names
allkps = pd.DataFrame(data=clean_kps, columns=columns, index=index)
if isinstance(in_path, str):
hdf = None
if keypoints and descriptors:
return allkps, desc
elif keypoints:
return allkps
else:
return desc
| 18,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.