content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def get_bucket_metadata(bucket, user_settings=None, access=ServiceAccount.STORAGE):
"""
Retrieves metadata about the given bucket.
:param str bucket: name of the Google cloud storage bucket
:param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.
If one is not provided, then this method must be invoked by an EngineThread
which defines the settings
:param access: must be 'storage'. Other values are for internal use only
:return: Returns the response obtained from the API by uploading the object
"""
if access == ServiceAccount.STORAGE:
service = CredentialManager.get_client_storage_service(user_settings)
elif access == ServiceAccount.EARTH_ENGINE:
service = CredentialManager.get_earth_engine(user_settings)
else:
service = CredentialManager.get_server_storage_service(user_settings)
# Make a request to buckets.get to retrieve a list of objects in the
# specified bucket.
req = service.buckets().get(bucket=bucket)
return req.execute(num_retries=3) | 29,700 |
def remove_conjunction(conjunction: str, utterance: str) -> str:
"""Remove the specified conjunction from the utterance.
For example, remove the " and" left behind from extracting "1 hour" and "30 minutes"
from "for 1 hour and 30 minutes". Leaving it behind can confuse other intent
parsing logic.
Args:
conjunction: translated conjunction (like the word "and") to be
removed from utterance
utterance: Full request, e.g. "set a 30 second timer"
Returns:
The same utterance with any dashes replaced by spaces.
"""
pattern = r"\s\s{}".format(conjunction)
remaining_utterance = re.sub(pattern, "", utterance, flags=re.IGNORECASE)
return remaining_utterance | 29,701 |
def othertitles(hit):
"""Split a hit.Hit_def that contains multiple titles up, splitting out the hit ids from the titles."""
id_titles = hit.Hit_def.text.split('>')
titles = []
for t in id_titles[1:]:
fullid, title = t.split(' ', 1)
hitid, id = fullid.split('|', 2)[1:3]
titles.append(dict(id = id,
hitid = hitid,
fullid = fullid,
title = title))
return titles | 29,702 |
def unlock_device(password=None, device=None) -> bool:
"""
Unlocks a device given a device name and the password
:param password:
:param device:
:return: True is sucess, False if error
"""
command_input = ["adb", "-s", device, "shell", "input", "text", password]
command_submit = ["adb", "-s", device, "shell", "input", "keyevent", 66]
if device is None and len(get_connected_devices()) != 1:
print("No device was specified and/or multiple devices are connected")
return False
if device is None:
command_input.pop(1)
command_input.pop(1)
command_submit.pop(1)
command_submit.pop(1)
p = subprocess.Popen(command_input, stdout=None)
p.wait()
p.terminate()
p1 = subprocess.Popen(command_submit, stdout=None)
p1.wait()
p1.terminate()
return True | 29,703 |
def close(account_id: str) -> None:
"""
Closes the account.
:param account_id: the account to close
:return: Nothing
"""
logger.info('closing-account', account_id=account_id)
with transaction.atomic():
account = Account.objects.get(pk=account_id)
account.close()
account.save() | 29,704 |
def create_from_mne_epochs(list_of_epochs, window_size_samples,
window_stride_samples, drop_last_window):
"""Create WindowsDatasets from mne.Epochs
Parameters
----------
list_of_epochs: array-like
list of mne.Epochs
window_size_samples: int
window size
window_stride_samples: int
stride between windows
drop_last_window: bool
whether or not have a last overlapping window, when
windows do not equally divide the continuous signal
Returns
-------
windows_datasets: BaseConcatDataset
X and y transformed to a dataset format that is compativle with skorch
and braindecode
"""
# Prevent circular import
from ..preprocessing.windowers import _check_windowing_arguments
_check_windowing_arguments(0, 0, window_size_samples,
window_stride_samples)
list_of_windows_ds = []
for epochs in list_of_epochs:
event_descriptions = epochs.events[:, 2]
original_trial_starts = epochs.events[:, 0]
stop = len(epochs.times) - window_size_samples
# already includes last incomplete window start
starts = np.arange(0, stop + 1, window_stride_samples)
if not drop_last_window and starts[-1] < stop:
# if last window does not end at trial stop, make it stop there
starts = np.append(starts, stop)
fake_events = [[start, window_size_samples, -1] for start in
starts]
for trial_i, trial in enumerate(epochs):
metadata = pd.DataFrame({
'i_window_in_trial': np.arange(len(fake_events)),
'i_start_in_trial': starts + original_trial_starts[trial_i],
'i_stop_in_trial': starts + original_trial_starts[
trial_i] + window_size_samples,
'target': len(fake_events) * [event_descriptions[trial_i]]
})
# window size - 1, since tmax is inclusive
mne_epochs = mne.Epochs(
mne.io.RawArray(trial, epochs.info), fake_events,
baseline=None,
tmin=0,
tmax=(window_size_samples - 1) / epochs.info["sfreq"],
metadata=metadata)
mne_epochs.drop_bad(reject=None, flat=None)
windows_ds = WindowsDataset(mne_epochs)
list_of_windows_ds.append(windows_ds)
return BaseConcatDataset(list_of_windows_ds) | 29,705 |
def definstance(name, ty, expr):
"""
Arguments:
- `name`: a string
- `ty`: a type of the form ClassName(t1,...,tn)
"""
root, _ = root_app(root_clause(ty))
if root.info.is_class:
class_name = root.name
c = defexpr(name, expr, type=ty, unfold=[class_name])
conf.current_ctxt().class_instances[name] = c.type
conf.current_ctxt().hyps[name] = c.type
return c
else:
raise Exception("Error in definition of {0!s}:"\
"expected {1!s} to be a class name"\
.format(name, root)) | 29,706 |
def do(*args, **kwargs):
"""
Function to perform steps defined under ``nornir:actions`` configuration
section at:
* Minion's configuration
* Minion's grains
* Minion's pillar data
* Master configuration (requires ``pillar_opts`` to be set to True in Minion
config file in order to work)
* File on master file system
To retrieve actions content Salt ``nr.do`` uses ``config.get`` execution module
function with ``merge`` key set to ``True``.
Each step definition requires these keywords to be defined:
* ``function`` - mandatory, name of any execution module function to run
* ``args`` - optional, any arguments to use with function
* ``kwargs`` - optional, any keyword arguments to use with function
* ``description`` - optional, used by ``dir`` to list action description
Any other keywords defined inside the step are ignored.
:param stop_on_error: (bool) if True (default) stops execution on error in step,
continue execution in error if False
:param filepath: (str) path to file with actions steps
:param default_renderer: (str) shebang string to render file using ``slsutil.renderer`,
default ``jinja|yaml``
:param describe: (bool) if True, returns action content without executing it, default is False
:param kwargs: (any) additional ``kwargs`` to use with actions steps, ``kwargs`` override
``kwargs`` dictionary defined within each step, for example, in command
``salt nrp1 nr.do configure_ntp FB="*core*"``, ``FB`` argument will override ``FB`` arguments
defined within steps.
:param tf: (bool) if True, ``ToFileProcessor`` saves each step results in file
named after step name if no ``tf`` argument provided within step, default is False
:param diff: (bool) if True, ``DiffProcessor`` runs diff for each step result using files
named after step name if no ``diff`` argument provided within step, default is False
:returns: dictionary with keys: ``failed`` bool, ``result`` list; ``result`` key contains
a list of results for steps; If ``stop_on_error`` set to ``True`` and error happens, ``failed``
key set to ``True``
Special action names ``dir`` and ``dir_list`` used to list all actions available for
proxy minion where ``dir`` returns table and ``dir_list`` produces a list of actions.
.. note:: if ``filepath`` argument provided, actions defined in other places are ignored; file
loaded using Saltstack ``slsutil.renderer`` execution module function, as a result
file can contain any of Saltstack supported renderers content and can be located
at any url supported by ``cp.get_url`` execution module function. File content must
render to a dictionary keyed by actions names.
Sample actions steps definition using proxy minion pillar::
nornir:
actions:
awr:
function: nr.cli
args: ["wr"]
kwargs: {"FO": {"platform": "arista_eos"}}
description: "Save Arista devices configuration"
configure_ntp:
- function: nr.cfg
args: ["ntp server 1.1.1.1"]
kwargs: {"FB": "*"}
- function: nr.cfg
args: ["ntp server 1.1.1.2"]
kwargs: {"FB": "*"}
- function: nr.cli
args: ["show run | inc ntp"]
kwargs: {"FB": "*"}
Sample actions steps definition using text file under ``filepath``::
awr:
function: nr.cli
args: ["wr"]
kwargs: {"FO": {"platform": "arista_eos"}}
description: "Save Arista devices configuration"
configure_ntp:
- function: nr.cfg
args: ["ntp server 1.1.1.1"]
kwargs: {"FB": "*"}
description: "1. Configure NTP server 1.1.1.1"
- function: nr.cfg
args: ["ntp server 1.1.1.2"]
kwargs: {"FB": "*"}
description: "2. Configure NTP server 1.1.1.2"
- function: nr.cli
args: ["show run | inc ntp"]
kwargs: {"FB": "*"}
description: "3. Cllect ntp configuration"
Action name ``awr`` has single step defined, while ``configure_ntp`` action has multiple
steps defined, each executed in order.
Multiple actions names can be supplied to ``nr.do`` call.
.. warning:: having column ``:`` as part of action name not premitted, as ``:`` used by
Salt ``config.get`` execution module function to split arguments on path items.
Sample usage::
salt nrp1 nr.do dir
salt nrp1 nr.do dir_list
salt nrp1 nr.do awr
salt nrp1 nr.do configure_ntp awr stop_on_error=False
salt nrp1 nr.do configure_ntp FB="*core*" add_details=True
salt nrp1 nr.do awr filepath="salt://actions/actions_file.txt"
Sample Python API usage from Salt-Master::
import salt.client
client = salt.client.LocalClient()
task_result = client.cmd(
tgt="nrp1",
fun="nr.do",
arg=["configure_ntp", "awr"],
kwarg={"FB": "R[12]"},
)
"""
ret = {"failed": False, "result": []}
kwargs = {k: v for k, v in kwargs.items() if not k.startswith("__")}
stop_on_error = kwargs.pop("stop_on_error", True)
filepath = kwargs.pop("filepath", None)
default_renderer = kwargs.pop("default_renderer", "jinja|yaml")
describe = kwargs.pop("describe", False)
tf = kwargs.pop("tf", False)
diff = kwargs.pop("diff", False)
# load file if filepath provided
if filepath:
file_content_dict = __salt__["slsutil.renderer"](
path=filepath,
default_renderer=default_renderer,
)
if not file_content_dict:
ret["failed"] = True
ret["result"].append({filepath: "Failed loading filepath content."})
return ret
# check if need to list all actions
if "dir" in args or "dir_list" in args:
actions_config = (
__salt__["config.get"](key="nornir:actions", merge="recurse")
if not filepath
else file_content_dict
)
# iterate over actions and form brief list of them
for action_name, data in actions_config.items():
ret["result"].append(
{
"action name": action_name,
"description": data.get("description", "")
if isinstance(data, dict)
else "\n".join([i.get("description", "") for i in data]).strip(),
}
)
if "dir" in args:
ret["result"] = TabulateFormatter(
ret["result"],
tabulate={"tablefmt": "grid"},
headers=["action name", "description"],
)
return ret
# run actions
for action_name in args:
try:
if filepath:
action_config = file_content_dict.get(action_name)
else:
action_config = __salt__["config.get"](
key="nornir:actions:{}".format(action_name), merge="recurse"
)
if not action_config:
raise CommandExecutionError(
"'{}' action not loaded, content: '{}'".format(
action_name, action_config
)
)
elif describe:
ret["result"].append({action_name: action_config})
continue
elif isinstance(action_config, dict):
action_config = [action_config]
# run steps
for step in action_config:
# form step kwargs
merged_kwargs = step.get("kwargs", {})
merged_kwargs.update(kwargs)
# add tf ToFileProcessor name if tf_each is True
if tf is True:
merged_kwargs.setdefault("tf", action_name)
# add diff for DiffProcessor
if diff:
merged_kwargs.setdefault("diff", action_name)
# get fun name
fun_name = step["function"].split(".")[1].strip()
# run step
log.debug(
"salt_nornir:nr.do running step {}, args {}, kwargs {}".format(
fun_name, step.get("args", []), merged_kwargs
)
)
result = globals()[fun_name](*step.get("args", []), **merged_kwargs)
ret["result"].append({action_name: result})
except:
tb = traceback.format_exc()
log.error(
"nr.do error while running '{}' action:\n{}".format(action_name, tb)
)
ret["result"].append({action_name: tb})
if stop_on_error:
ret["failed"] = True
break
return ret | 29,707 |
def read_plaintext_inputs(path: str) -> List[str]:
"""Read input texts from a plain text file where each line corresponds to one input"""
with open(path, 'r', encoding='utf8') as fh:
inputs = fh.read().splitlines()
print(f"Done loading {len(inputs)} inputs from file '{path}'")
return inputs | 29,708 |
def extract_tool_and_dsname_from_name(row):
"""
Extract Basecall (MB1.6K) into Basecall and MB1.6K, and 1600 three fields
:param instr:
:return:
"""
try:
toolname, dsname = row['name'].strip().split(' ')
dsname = dsname[1:-1]
except: # No tag process
toolname = row['name'].strip()
dsname = 'None'
if not dsname.startswith('MB'): # not tagged with MB
dsname = 'None'
reads = 0
else:
reads = float(dsname[2:-1])
duration = parse_string_time_to_seconds(row['duration'])
realtime = parse_string_time_to_seconds(row['realtime'])
cpu = float(row['%cpu'][:-1])
peak_rss = parse_mem_str_to_gbsize(row['peak_rss'])
peak_vmem = parse_mem_str_to_gbsize(row['peak_vmem'])
rchar = parse_mem_str_to_gbsize(row['rchar'])
wchar = parse_mem_str_to_gbsize(row['wchar'])
return toolname, dsname, reads, duration, realtime, cpu, peak_rss, peak_vmem, rchar, wchar | 29,709 |
def pyramid_pooling(inputs, layout='cna', filters=None, kernel_size=1, pool_op='mean', pyramid=(0, 1, 2, 3, 6),
flatten=False, name='psp', **kwargs):
""" Pyramid Pooling module. """
shape = inputs.get_shape().as_list()
data_format = kwargs.get('data_format', 'channels_last')
static_shape = np.array(shape[1: -1] if data_format == 'channels_last' else shape[2:])
dynamic_shape = tf.shape(inputs)[1: -1] if data_format == 'channels_last' else tf.shape(inputs)[2:]
axis = -1 if data_format == 'channels_last' else 1
num_channels = shape[axis]
if filters is None:
filters = num_channels // len(pyramid)
with tf.variable_scope(name):
layers = []
for level in pyramid:
if level == 0:
x = inputs
else:
# Pooling
if None not in static_shape:
x = _static_pyramid_pooling(inputs, static_shape, level, pool_op, name='pool-%d' % level)
upsample_shape = static_shape
else:
x = _dynamic_pyramid_pooling(inputs, level, pool_op, num_channels, data_format)
upsample_shape = dynamic_shape
# Conv block to set number of feature maps
x = ConvBlock(layout=layout, filters=filters, kernel_size=kernel_size,
name='conv-%d' % level, **kwargs)(x)
# Output either vector with fixed size or tensor with fixed spatial dimensions
if flatten:
x = tf.reshape(x, shape=(-1, level*level*filters),
name='reshape-%d' % level)
concat_axis = -1
else:
x = Upsample(layout='b', shape=upsample_shape, name='upsample-%d' % level, **kwargs)(x)
concat_axis = axis
layers.append(x)
x = tf.concat(layers, axis=concat_axis, name='concat')
return x | 29,710 |
def flush():
"""Flush out the send buffers."""
_MPI_RANK_ACTOR.flush() | 29,711 |
def df(r, gamma):
"""
divergence-free function
"""
eta = soft_threshold(r, gamma)
return eta - np.mean(eta != 0) * r | 29,712 |
def main(logging_level: str) -> None:
"""noisy-moo CLI"""
logging_levels = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
logging.basicConfig(
format="%(asctime)s [%(levelname)-8s] %(message)s",
level=logging_levels.get(logging_level, logging.INFO),
) | 29,713 |
def clip_3d_liang_barsky(zmin, zmax, p0, p1):
"""Clips the three-dimensional line segment in the canonial view volume by
the algorithm of Liang and Barsky. Adapted from James D. Foley, ed.,
__Computer Graphics: Principles and Practice__ (Reading, Mass. [u.a.]:
Addison-Wesley, 1998), 274 as well as
http://www.eecs.berkeley.edu/Pubs/TechRpts/1992/CSD-92-688.pdf.
Parameters
----------
zmin, zmax : float
p0, p1 : array (size 3) of float
the endpoints to be clipped (in-place operation)
Returns
-------
is_visible : bool
"""
x0, y0, z0 = p0
x1, y1, z1 = p1
# test for a trivial reject
if (x0 > z0 and x1 > z1) or (y0 > z0 and y1 > z1) or \
(x0 < -z0 and x1 < -z1) or (y0 < -z0 and y1 < -z1) or \
(z0 < zmin) and (z1 < zmin) or (z0 > zmax and z1 > zmax):
return False
tmin_tmax = np.array((0.0, 1.0))
dx = x1 - x0
dz = z1 - z0
if clip_t(-dx - dz, x0 + z0, tmin_tmax): # right side
if clip_t(dx - dz, -x0 + z0, tmin_tmax): # left side
# if we got this far, part of the line is in -z <= x <= z
dy = y1 - y0
if clip_t(dy - dz, -y0 + z0, tmin_tmax): # bottom
if clip_t(-dy - dz, y0 + z0, tmin_tmax): # top
# line is in -z <= x <= z, -z <= y <= z
if clip_t(-dz, z0 - zmin, tmin_tmax): # front
if clip_t(dz, zmax - z0, tmin_tmax): # back
# part of the line is visible in -z <= x <= z,
# -z <= y <= z, -1 <= z <= zmin
tmin, tmax = tmin_tmax
if tmax < 1:
p1[0] = x0 + tmax * dx
p1[1] = y0 + tmax * dy
p1[2] = z0 + tmax * dz
if tmin > 0:
p0[0] += tmin * dx
p0[1] += tmin * dy
p0[2] += tmin * dz
return True
return False | 29,714 |
def RunLatencyTest(sending_vm, receiving_vm, use_internal_ip=True):
"""Run the psping latency test.
Uses a TCP request-response time to measure latency.
Args:
sending_vm: the vm to send the tcp request.
receiving_vm: the vm acting as the server.
use_internal_ip: whether or not to use the private IP or the public IP.
Returns:
list of samples representing latency between the two VMs.
"""
server_ip = (receiving_vm.internal_ip if use_internal_ip
else receiving_vm.ip_address)
client_command = (
'cd {psping_exec_dir}; '
'sleep 2;' # sleep to make sure the server starts first.
'.\\psping.exe /accepteula -l {packet_size} -i 0 -q '
'-n {rr_count} -h {bucket_count} {ip}:{port}'
' > {out_file}').format(
psping_exec_dir=sending_vm.temp_dir,
packet_size=FLAGS.psping_packet_size,
rr_count=FLAGS.psping_rr_count,
bucket_count=FLAGS.psping_bucket_count,
ip=server_ip,
port=TEST_PORT,
out_file=PSPING_OUTPUT_FILE)
# PSPing does not have a configurable timeout. To get around this, start the
# server as a background job, then kill it after 10 seconds
server_command = (
'{psping_exec_dir}\\psping.exe /accepteula -s 0.0.0.0:{port};').format(
psping_exec_dir=sending_vm.temp_dir,
port=TEST_PORT)
process_args = [(_RunPsping, (receiving_vm, server_command), {}),
(_RunPsping, (sending_vm, client_command), {})]
background_tasks.RunParallelProcesses(process_args, 200, 1)
cat_command = 'cd {psping_exec_dir}; cat {out_file}'.format(
psping_exec_dir=sending_vm.temp_dir,
out_file=PSPING_OUTPUT_FILE)
output, _ = sending_vm.RemoteCommand(cat_command)
return ParsePspingResults(output, sending_vm, receiving_vm, use_internal_ip) | 29,715 |
def get_descriptor_list(stackdriver):
"""Return a list of all the stackdriver custom metric descriptors."""
type_map = stackdriver.descriptor_manager.fetch_all_custom_descriptors(
stackdriver.project)
descriptor_list = type_map.values()
descriptor_list.sort(compare_descriptor_types)
return descriptor_list | 29,716 |
def dankerize(string: str, upper_case_ratio=0.2) -> str:
"""
Transform a string to lower case, and randomly set some characters
to upper case and return the result.
string: the string to dankerize
upper_case_ratio: the upper_case/letter ratio
"""
ret = ""
for i in range(len(string)):
if uniform(0, 1.0) <= upper_case_ratio:
ret += string[i].upper()
else:
ret += string[i].lower()
return ret | 29,717 |
def distance_constraints_too_complex(wordConstraints):
"""
Decide if the constraints on the distances between pairs
of search terms are too complex, i. e. if there is no single word
that all pairs include. If the constraints are too complex
and the "distance requirements are strict" flag is set,
the query will find some invalid results, so further (slow)
post-filtering is needed.
"""
if wordConstraints is None or len(wordConstraints) <= 0:
return False
commonTerms = None
for wordPair in wordConstraints:
if commonTerms is None:
commonTerms = set(wordPair)
else:
commonTerms &= set(wordPair)
if len(commonTerms) <= 0:
return True
return False | 29,718 |
def greedy_search(model,
decoding_function,
initial_ids,
initial_memories,
int_dtype,
float_dtype,
max_prediction_length,
batch_size,
eos_id,
do_sample,
time_major):
""" Greedily decodes the target sequence conditioned on the output of the encoder and the current output prefix. """
# Declare time-dimension
time_dim = int(not time_major) # i.e. 0 if time_major, 1 if batch_major
# Define the 'body for the tf.while_loop() call
def _decoding_step(current_time_step, all_finished, next_ids, decoded_ids, decoded_score, memories):
""" Defines a single step of greedy decoding. """
# Propagate through decoder
step_logits, memories = decoding_function(next_ids, current_time_step, memories)
step_logits = model.sampling_utils.adjust_logits(step_logits)
# Calculate log probabilities for token prediction at current time-step
step_scores = tf.nn.log_softmax(step_logits)
# Determine next token to be generated, next_ids has shape [batch_size]
if do_sample:
next_ids = tf.squeeze(tf.multinomial(step_scores, num_samples=1, output_dtype=int_dtype), axis=1)
else:
# Greedy decoding
next_ids = tf.argmax(step_scores, -1, output_type=int_dtype)
# Collect scores associated with the selected tokens
score_coordinates = tf.stack([tf.range(batch_size, dtype=int_dtype), next_ids], axis=1)
decoded_score += tf.gather_nd(step_scores, score_coordinates)
# Concatenate newly decoded token ID with the previously decoded ones
decoded_ids = tf.concat([decoded_ids, tf.expand_dims(next_ids, 1)], 1)
# Extend next_id's dimensions to be compatible with input dimensionality for the subsequent step
next_ids = tf.expand_dims(next_ids, time_dim)
# Check if generation has concluded with <EOS>
# all_finished |= tf.equal(tf.squeeze(next_ids, axis=time_dim), eos_id)
all_finished |= tf.equal(tf.reduce_prod(decoded_ids - eos_id, axis=time_dim), eos_id)
return current_time_step + 1, all_finished, next_ids, decoded_ids, decoded_score, memories
# Define the termination condition for the tf.while_loop() call
def _continue_decoding(_current_time_step, _all_finished, *_):
""" Returns 'False' if all of the sequences in the generated sequence batch exceeded the maximum specified
length or terminated with <EOS>, upon which the while loop is exited. """
continuation_check = \
tf.logical_and(tf.less(_current_time_step, max_prediction_length),
tf.logical_not(tf.reduce_all(_all_finished)))
return continuation_check
# Initialize decoding-relevant variables and containers
current_time_step = tf.constant(1)
all_finished = tf.fill([batch_size], False) # None of the sequences is marked as finished
next_ids = initial_ids
decoded_ids = tf.zeros([batch_size, 0], dtype=int_dtype) # Sequence buffer is empty
decoded_score = tf.zeros([batch_size], dtype=float_dtype)
memories = initial_memories
# Execute the auto-regressive decoding step via while loop
_, _, _, decoded_ids, log_scores, memories = \
tf.while_loop(cond=_continue_decoding,
body=_decoding_step,
loop_vars=[current_time_step, all_finished, next_ids, decoded_ids, decoded_score, memories],
shape_invariants=[tf.TensorShape([]),
tf.TensorShape([None]),
tf.TensorShape([None, None]),
tf.TensorShape([None, None]),
tf.TensorShape([None]),
get_memory_invariants(memories)],
parallel_iterations=10,
swap_memory=False,
back_prop=False)
# Should return logits also, for training
return decoded_ids, log_scores | 29,719 |
def database_mostcited(response: Response,
request: Request=Query(None, title=opasConfig.TITLE_REQUEST, description=opasConfig.DESCRIPTION_REQUEST),
morethan: int=Query(15, title=opasConfig.TITLE_CITED_MORETHAN, description=opasConfig.DESCRIPTION_CITED_MORETHAN),
period: str=Query('5', title="Period (5, 10, 20, or all)", description=opasConfig.DESCRIPTION_MOST_CITED_PERIOD),
pubperiod: int=Query(None, title=opasConfig.TITLE_PUBLICATION_PERIOD, description=opasConfig.DESCRIPTION_PUBLICATION_PERIOD),
author: str=Query(None, title=opasConfig.TITLE_AUTHOR, description=opasConfig.DESCRIPTION_AUTHOR),
title: str=Query(None, title=opasConfig.TITLE_TITLE, description=opasConfig.DESCRIPTION_TITLE),
sourcename: str=Query(None, title=opasConfig.TITLE_SOURCENAME, description=opasConfig.DESCRIPTION_SOURCENAME),
sourcecode: str=Query(None, title=opasConfig.TITLE_SOURCECODE, description=opasConfig.DESCRIPTION_SOURCECODE),
sourcetype: str=Query(None, title=opasConfig.TITLE_SOURCETYPE, description=opasConfig.DESCRIPTION_PARAM_SOURCETYPE),
abstract:bool=Query(False, title="Return an abstract with each match", description="True to return an abstract"),
stat:bool=Query(False, title="Return minimal information", description="True to return minimal information for statistical tables"),
limit: int=Query(10, title=opasConfig.TITLE_LIMIT, description=opasConfig.DESCRIPTION_LIMIT),
offset: int=Query(0, title=opasConfig.TITLE_OFFSET, description=opasConfig.DESCRIPTION_OFFSET)
):
"""
## Function
<b>Return a list of documents for a SourceCode source (and optional year specified in query params).</b>
If you don't request abstracts returned, document permissions will not be checked or returned.
This is intended to speed up retrieval, especially for returning large numbers of
articles (e.g., for downloads.)
Note: The GVPi implementation does not appear to support the limit and offset parameter
## Return Type
models.DocumentList
## Status
This endpoint is working.
## Sample Call
/v1/Database/MostCited/
## Notes
## Potential Errors
"""
time.sleep(.25)
ocd, session_info = opasAPISupportLib.get_session_info(request, response)
# session_id = session_info.session_id
#print ("in most cited")
# return documentList
ret_val, ret_status = opasAPISupportLib.database_get_most_cited( period=period,
more_than=morethan,
publication_period=pubperiod,
author=author,
title=title,
source_name=sourcename,
source_code=sourcecode,
source_type=sourcetype, # see VALS_SOURCE_TYPE (norm_val applied in opasCenralDBLib)
abstract_requested=abstract,
req_url=request.url,
limit=limit,
offset=offset,
session_info=session_info
)
if isinstance(ret_val, models.ErrorReturn):
raise HTTPException(
status_code=ret_val.httpcode,
detail = ret_val.error + " - " + ret_val.error_description
)
else:
status_message = opasCentralDBLib.API_STATUS_SUCCESS
status_code = 200
# Don't record in final build - (ok for now during testing)
ocd, session_info = opasAPISupportLib.get_session_info(request, response)
ocd.record_session_endpoint(api_endpoint_id=opasCentralDBLib.API_DATABASE_MOSTCITED,
session_info=session_info,
params=request.url._url,
return_status_code = status_code,
status_message=status_message
)
#print ("out mostcited")
return ret_val | 29,720 |
def captures_ok(api, cfg, size, utils):
"""
Returns normally if patterns in captured packets are as expected.
"""
sender_hardware_addr = [
[0x00, 0x0C, 0x29, 0xE3, 0x53, 0xEA],
[0x00, 0x0C, 0x29, 0xE3, 0x54, 0xEA],
[0x00, 0x0C, 0x29, 0xE3, 0x55, 0xEA],
[0x00, 0x0C, 0x29, 0xE3, 0x56, 0xEA],
[0x00, 0x0C, 0x29, 0xE3, 0x57, 0xEA],
]
target_hardware_addr = [
[0x00, 0x0C, 0x30, 0xE3, 0x54, 0xEA],
[0x00, 0x0C, 0x30, 0xE3, 0x53, 0xEA],
[0x00, 0x0C, 0x30, 0xE3, 0x52, 0xEA],
[0x00, 0x0C, 0x30, 0xE3, 0x51, 0xEA],
[0x00, 0x0C, 0x30, 0xE3, 0x50, 0xEA],
]
sender_protocol_addr = [
[0x0a, 0x01, 0x01, 0x02],
[0x0a, 0x01, 0x01, 0x03],
[0x0a, 0x01, 0x01, 0x04],
[0x0a, 0x01, 0x01, 0x05],
[0x0a, 0x01, 0x01, 0x06],
]
target_protocol_addr = [
[0x14, 0x01, 0x01, 0x05],
[0x14, 0x01, 0x01, 0x04],
[0x14, 0x01, 0x01, 0x03],
[0x14, 0x01, 0x01, 0x02],
[0x14, 0x01, 0x01, 0x01],
]
cap_dict = utils.get_all_captures(api, cfg)
assert len(cap_dict) == 1
for k in cap_dict:
i = 0
for b in cap_dict[k]:
assert b[22:28] == sender_hardware_addr[i]
assert b[28:32] == sender_protocol_addr[i]
assert b[32:38] == target_hardware_addr[i]
assert b[38:42] == target_protocol_addr[i]
i = (i + 1) % 5
assert len(b) == size | 29,721 |
def format_event_leef(event):
"""Format an event as QRadar / LEEF"""
syslog_header = f'<13>1 {event["actionTime"]} {hostname}'
leef_header = f'LEEF:2.0|TrinityCyber|PTI|1|{event.pop("id")}|xa6|'
fields = dict()
fields["devTime"] = event.pop("actionTime")
fields[
"devTimeFormat"
] = "yyyy-MM-dd'T'HH:mm:ss.SSSXXX" # (e.g. 2022-04-25T00:01:19.109+00:00)
# LEEF-standard fields
if "source" in event:
fields["src"] = event.pop("source")
if "destination" in event:
fields["dst"] = event.pop("destination")
if "sourcePort" in event:
fields["srcPort"] = event.pop("sourcePort")
if "destinationPort" in event:
fields["dstPort"] = event.pop("destinationPort")
if "transportProtocol" in event:
fields["proto"] = event.pop("transportProtocol")
# Formula-related metadata
formula_metadata = event.pop("formula")
fields["tcFormulaId"] = formula_metadata["formulaId"]
fields["tcFormulaTitle"] = formula_metadata["title"]
for key, value in formula_metadata["tags"].items():
key = "tcFormula" + key.title().replace(" ", "")
fields[key] = value
# Application / protocol related data
for app_fields in event.pop("applicationData"):
for key, value in app_fields.items():
if value is None:
continue
if isinstance(value, str):
# Escape delimiter
value = value.replace("\xa6", "\\\xa6")
fields[key] = value
# Add any extra information from the query
fields.update(event)
fields_formatted = "\xa6".join([f"{key}={value}" for key, value in fields.items()])
return f"{syslog_header} {leef_header}{fields_formatted}" | 29,722 |
def _file_path(ctx, val):
"""Return the path of the given file object.
Args:
ctx: The context.
val: The file object.
"""
return val.path | 29,723 |
def Mapping_Third(Writelines, ThirdClassDict):
"""
:param Writelines: 将要写入的apk的method
:param ThirdClassDict: 每一个APK对应的第三方的字典
:return: UpDateWritelines
"""
UpDateWriteLines = []
for l in Writelines:
if l.strip() in list(ThirdClassDict.keys()):
UpDateWriteLines.extend(ThirdClassDict[l.strip()])
else:
UpDateWriteLines.extend([l])
return UpDateWriteLines | 29,724 |
def rotate_points_around_origin(points, origin, angle):
"""
Rotate a 2D array of points counterclockwise by a given angle around a given origin.
The angle should be given in degrees.
"""
angle = angle * np.pi / 180
ox, oy = origin.tolist()
new_points = np.copy(points)
new_points[:, 0] = ox + np.cos(angle) * (
points[:, 0] - ox) - np.sin(angle) * (points[:, 1] - oy)
new_points[:, 1] = oy + np.sin(angle) * (
points[:, 0] - ox) + np.cos(angle) * (points[:, 1] - oy)
return new_points | 29,725 |
def get_names_to_aliases(inp) -> dict:
"""
Returns pair,
- out[0] = dictionary of names to sets of aliases
- out[1] = erros when calling names_to_links, i.e., when file-reading
@param inp: string vault directory or names_to_links dictionary
if string then get_names_to_links method is used
"""
if type(inp) is str:
inp = get_names_to_links(inp)
# now inp must be names_to_links_pair
out = dict()
for filename, dict_links_to_aliases in inp[0].items():
for link_filename, set_of_aliases in dict_links_to_aliases.items():
try:
out[link_filename].update(set_of_aliases)
except KeyError:
out[link_filename] = set(set_of_aliases)
return [out,inp[1]] | 29,726 |
def main():
"""
A simple test program to do PFASST runs for the heat equation
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-10
level_params['dt'] = 0.25
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = Collocation
sweeper_params['node_type'] = 'LEGENDRE'
sweeper_params['quad_type'] = 'LOBATTO'
sweeper_params['num_nodes'] = 3
# sweeper_params['QI'] = 'LU' # For the IMEX sweeper, the LU-trick can be activated for the implicit part
# initialize problem parameters
problem_params = dict()
problem_params['Vs'] = 100.0
problem_params['Rs'] = 1.0
problem_params['C1'] = 1.0
problem_params['Rpi'] = 0.2
problem_params['C2'] = 1.0
problem_params['Lpi'] = 1.0
problem_params['Rl'] = 5.0
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20
controller_params['hook_class'] = log_data
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = piline # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
# set time parameters
t0 = 0.0
Tend = 20
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params,
description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
fname = 'piline.dat'
f = open(fname, 'wb')
dill.dump(stats, f)
f.close() | 29,727 |
def main():
"""Main Func"""
print("This is Password Generator.")
length = int(input("\nEnter the length of password: "))
pwd = pwd_generator(length)
print("Password generated! -> ", pwd)
try_decision = input("\nTry again? (y/n): ")
if try_decision == "y":
main()
else:
print("Have a good day!")
sys.exit() | 29,728 |
def controller_plots(model_dir, ds, ds_eval, groundtruth, prediction, communication):
"""
:param model_dir: directory containing the trained model
:param ds: name of the dataset
:param ds_eval: name of the dataset for the evaluation (usually the manual one)
:param groundtruth: evidence
:param prediction: output control
:param communication: states if the communication is used by the network
"""
model_img = '%s/images/' % model_dir
utils.check_dir(model_img)
# Plot R^2 of the regressor between prediction and ground truth
title = 'Regression %s vs %s' % (ds_eval, ds)
file_name = 'regression-%svs%s' % (ds_eval, ds)
if not communication:
groundtruth = np.array(groundtruth).flatten()
prediction = np.array(prediction).flatten()
x_label = 'groundtruth'
y_label = 'prediction'
my_plots.plot_regressor(groundtruth, prediction, x_label, y_label, model_img, title, file_name) | 29,729 |
def length(vec):
"""
Length of a given vector. If vec is an scalar, its length is 1.
Parameters
----------
vec: scalar or arr
Input vector
Returns
-------
length: int
Length of vec. If vec is an scalar, its length is 1.
"""
if np.ndim(vec)==0:
length=1
else:
length=len(vec)
return length | 29,730 |
def help_(ctx):
"""Show this delightful help message and exit."""
click.echo(ctx.parent.get_help()) | 29,731 |
def test_fetch_genes_to_hpo_to_disease(hpo_genes_file):
"""Test fetch resource"""
# GIVEN an URL
url = scout_requests.HPO_URL.format("genes_to_phenotype.txt")
with open(hpo_genes_file, "r") as hpo_file:
content = hpo_file.read()
responses.add(
responses.GET,
url,
body=content,
status=200,
)
# WHEN fetching the resource
data = scout_requests.fetch_genes_to_hpo_to_disease()
# THEN assert that the HPO header is there
assert "#Format: entrez" in data[0] | 29,732 |
def build_eval_infeeds(params):
"""Create the TPU infeed ops."""
eval_size = get_eval_size(params)
num_eval_steps = eval_size // params.eval_batch_size
dev_assign = params.device_assignment
host_to_tpus = {}
for replica_id in range(params.num_replicas):
host_device = dev_assign.host_device(replica=replica_id, logical_core=0)
tpu_ordinal = dev_assign.tpu_ordinal(replica=replica_id, logical_core=0)
if host_device not in host_to_tpus:
host_to_tpus[host_device] = [tpu_ordinal]
else:
assert tpu_ordinal not in host_to_tpus[host_device]
host_to_tpus[host_device].append(tpu_ordinal)
infeed_ops = []
infeed_graphs = []
num_inputs = len(host_to_tpus)
for i, (host, tpus) in enumerate(host_to_tpus.items()):
infeed_graph = tf.Graph()
infeed_graphs.append(infeed_graph)
with infeed_graph.as_default():
def enqueue_fn(host_device=host, input_index=i, device_ordinals=tpus):
"""Docs."""
worker_infeed_ops = []
with tf.device(host_device):
dataset = build_eval_dataset(
params,
batch_size=params.eval_batch_size // num_inputs,
num_workers=num_inputs,
worker_index=input_index)
inputs = tf.data.make_one_shot_iterator(dataset).get_next()
if params.use_xla_sharding and params.num_cores_per_replica > 1:
inputs, partition_dims = pad_inputs_for_xla_sharding(params, inputs)
num_splits = len(device_ordinals)
if len(device_ordinals) > 1:
inputs = [tf.split(v, num_splits, 0) for v in inputs]
else:
inputs = [[v] for v in inputs]
q = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(inputs),
host_id=int(host_device.split('/task:')[-1].split('/')[0]),
input_partition_dims=partition_dims,
device_assignment=dev_assign)
inputs = [[v[i] for v in inputs] for i in range(num_splits)]
worker_infeed_ops.extend(q.generate_enqueue_ops(inputs))
else:
num_splits = len(device_ordinals)
if len(device_ordinals) > 1:
inputs = [tf.split(v, num_splits, 0) for v in inputs]
else:
inputs = [[v] for v in inputs]
input_shapes = [v[0].shape for v in inputs]
for j, device_ordinal in enumerate(device_ordinals):
worker_infeed_ops.append(tf.raw_ops.InfeedEnqueueTuple(
inputs=[v[j] for v in inputs],
shapes=input_shapes,
device_ordinal=device_ordinal))
return worker_infeed_ops
def _body(i):
with tf.control_dependencies(enqueue_fn()):
return i+1
infeed_op = tf.while_loop(
lambda step: tf.less(step, tf.cast(num_eval_steps, step.dtype)),
_body, [0], parallel_iterations=1, name='eval_infeed').op
infeed_ops.append(infeed_op)
return infeed_ops, infeed_graphs, eval_size | 29,733 |
def add_auth(opts):
"""Add authorization entry.
If all options are None, then use interactive 'wizard.'
"""
conn = opts.connection
if not conn:
_stderr.write(
"VOLTTRON is not running. This command "
"requires VOLTTRON platform to be running\n"
)
return
fields = {
"domain": opts.domain,
"address": opts.address,
"mechanism": opts.mechanism,
"credentials": opts.credentials,
"user_id": opts.user_id,
"identity": opts.user_id,
"groups": _comma_split(opts.groups),
"roles": _comma_split(opts.roles),
"capabilities": _parse_capabilities(opts.capabilities),
"rpc_method_authorizations": None,
"comments": opts.comments,
}
if any(fields.values()):
# Remove unspecified options so the default parameters are used
fields = {k: v for k, v in fields.items() if v}
fields["enabled"] = not opts.disabled
entry = fields
else:
# No options were specified, use interactive wizard
responses = _ask_for_auth_fields()
responses["rpc_method_authorizations"] = None
entry = responses
if opts.add_known_host:
if entry["address"] is None:
raise ValueError(
"host (--address) is required when " "--add-known-host is "
"specified"
)
if entry["credentials"] is None:
raise ValueError(
"serverkey (--credentials) is required when "
"--add-known-host is specified"
)
opts.host = entry["address"]
opts.serverkey = entry["credentials"]
add_server_key(opts)
try:
conn.server.vip.rpc.call(AUTH, "auth_file.add", entry).get(timeout=4)
_stdout.write("added entry {}\n".format(entry))
except AuthException as err:
_stderr.write("ERROR: %s\n" % str(err)) | 29,734 |
def save_training_config(args_dict: Dict[str, Any], model_out_path: Path):
"""Saves training_config to a file.
:param args_dict: dictionary with all training parameters.
:param model_out_path: where to store training_config.json.
"""
_, training_args = load_config(schema_version=CURRENT_SCHEMA_VERSION, return_unused_kwargs=True, **args_dict)
training_config = TrainingConfig(**training_args)
if not os.path.exists(model_out_path):
os.makedirs(model_out_path)
training_config.save(model_out_path) | 29,735 |
def get_featured_parks(request):
""" Returns recommended parks as JSON
"""
featured_parks = Park.objects.filter(featured=True).prefetch_related('images')
response = {
'featured_parks': [{'id': n.pk, 'name': n.name, 'image': n.thumbnail} for n in featured_parks]
}
return HttpResponse(json.dumps(response), mimetype='application/json') | 29,736 |
def plot_accuracy(raw_all_grids_df, option=None):
"""
Input: raw condition df
facets: None, 'subjects',
Output: figure(s) that visualize the difference in accuracy btw. el and pl
"""
# Rearrange columns for better readability in temporal order of the experiment
condition_list = raw_all_grids_df['condition'].value_counts().index.tolist()
condition_list_sorted = ['GRID', 'SMALLGRID_BEFORE', 'SMALLGRID_AFTER']
if not set(condition_list).issubset(set(condition_list_sorted)) :
raise ValueError('are you sure everything is okay with the condition reordering?')
# Create a categorical type
condition_cat = CategoricalDtype(categories=condition_list_sorted, ordered=True)
# Cast the existing categories into the new category. Due to a bug in pandas we need to do this via a string.
raw_all_grids_df['condition'] = raw_all_grids_df['condition'].astype(str).astype(condition_cat)
# Get the x and y position of the 13 elements shown in the small Grid condition
element_pairs = raw_all_grids_df.query('condition=="SMALLGRID_BEFORE"').loc[:,['posx', 'posy']]
only_13_elements = pd.merge(raw_all_grids_df, element_pairs, on=['posx', 'posy'], how='inner')
# specify aggregators for different levels
# element level - - block level - - (subject level)
# mean median (mean)
agg_level=[winmean, winmean]
# aggregate data of only_13_elements
mean_over_elements = only_13_elements.groupby(['condition', 'block','subject','et'], as_index=False).agg(agg_level[0])
winmean_over_elements_winmean_over_blocks = mean_over_elements.groupby(['condition', 'subject','et'], as_index=False).agg(agg_level[1])
if option is None:
# compare accuracy values btw eyetrackers. Taking the mean over the subjects
(ggplot(winmean_over_elements_winmean_over_blocks, aes(x='et', y='accuracy',color='condition')) +
# TODO or points or violins??
geom_boxplot(data=winmean_over_elements_winmean_over_blocks, position=position_dodge(width=0.9)) +
ggtitle('Comparing accuracy of conditions')).draw()
elif option == 'facet_subjects':
# plot mean accuracy over all blocks for each subject
(ggplot(winmean_over_elements_winmean_over_blocks, aes(x='et', y='accuracy',color='condition')) +
geom_point(alpha=0.1,data=winmean_over_elements_winmean_over_blocks, position=position_dodge(width=0.5)) +
geom_point(position=position_dodge(width=0.5)) +
geom_line(aes(group='condition'),alpha=0.6, position=position_dodge(width=0.5)) +
facet_grid('.~subject') +
ggtitle('Comparing accuracy of conditions')).draw()
elif option == 'show_variance_for_blocks':
# plot mean accuracy over all blocks for each subject and show range by plotting the mean accuracy for each block
(ggplot(winmean_over_elements_winmean_over_blocks, aes(x='et', y='accuracy',color='condition')) +
# get the mean for each block
geom_point(alpha=0.1,data=raw_all_grids_df.groupby(['et', 'subject','condition','block']).mean().reset_index(level=['et','subject','condition','block']),position=position_dodge(width=0.5)) +
geom_point(position=position_dodge(width=0.5))+
geom_line(aes(group='condition'),position=position_dodge(width=0.5)) +
facet_grid('.~subject') +
ggtitle('Comparing accuracy of conditions')).draw()
elif option == 'final_figure':
# save old theme and set the one for fixation plotting
old_theme = theme_get()
theme_set(mythemes.before_after_grid_theme)
# simple: eyetracker vs mean accuracy over all blocks and subjects
return (ggplot(winmean_over_elements_winmean_over_blocks,aes(x='condition', y='accuracy', fill='et',group='et', color='et')) +
stat_summary(fun_y=winmean, geom='line',position=position_dodge(width=0.1)) +
# pointrange makes a 0.95 bootstrap CI
stat_summary(fun_data=winmean_cl_boot, geom='pointrange', position=position_dodge(width=0.1)) +
#geom_point(aes(group="subject"),data=winmean_over_elements_winmean_over_blocks.query("et=='Pupil Labs'"),alpha=0.5,color='blue')+
#geom_point(aes(group="subject"),data=winmean_over_elements_winmean_over_blocks.query("et=='EyeLink'"),alpha=0.5,color='red')+
ylab("Accuracy [$^\circ$]") +
labs(title='Course of Accuracy'))
# restore old theme
theme_set(old_theme)
elif option == 'subjectvariance':
mean_over_elements.loc[:,'group'] = mean_over_elements.et + mean_over_elements.block
return (ggplot(mean_over_elements,aes(x='condition', y='accuracy', fill='et',group='group', color='et')) +
geom_point(alpha=0.5)+
geom_line()+
ylab("Accuracy [$^\circ$]") +
labs(title='Course of Accuracy'))+facet_wrap('subject',scales='free')
else:
raise ValueError('You must set facets to a valid option. See documentation.') | 29,737 |
def locate_references(path: Union[Path, str], encoding: str = DEFAULT_ENCODING):
"""Locates add_reference in path.
It looks recursively for add_reference markers, taking note of the module, line
short_purpose and actual reference string.
Returns
None
"""
if os.path.isdir(path):
filenames = sorted(Path(path).rglob("*.py"))
else:
filenames = [Path(path)]
for filename in filenames:
locate_references_in_file(filename, encoding=encoding) | 29,738 |
def make_data_output(structures: Sequence[Artefact[bytes]]) -> Artefact[list[Any]]:
"""Take xyz structure from xtb and parse them to a list of dicts."""
def to_dict(xyz: bytes) -> dict[str, Any]:
as_str = xyz.decode().strip()
energy = float(as_str.splitlines()[1].split()[1])
return {"structure": as_str, "energy": energy}
def sort_by_energy(*elements: dict[str, Any]) -> list[dict[str, Any]]:
out = [el for el in elements]
out = sorted(out, key=lambda x: x["energy"]) # type:ignore
return out
out = []
for s in structures:
out += [f.morph(to_dict, s, out=Encoding.json)] # elements to dicts
return f.reduce(sort_by_energy, *out) | 29,739 |
def total_angular_momentum(particles):
"""
Returns the total angular momentum of the particles set.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.x = [-1.0, 1.0] | units.m
>>> particles.y = [0.0, 0.0] | units.m
>>> particles.z = [0.0, 0.0] | units.m
>>> particles.vx = [0.0, 0.0] | units.ms
>>> particles.vy = [-1.0, 1.0] | units.ms
>>> particles.vz = [0.0, 0.0] | units.ms
>>> particles.mass = [1.0, .5] | units.kg
>>> particles.total_angular_momentum()
quantity<[0.0, 0.0, 1.5] m**2 * kg * s**-1>
"""
# equivalent to:
# lx=(m*(y*vz-z*vy)).sum()
# ly=(m*(z*vx-x*vz)).sum()
# lz=(m*(x*vy-y*vx)).sum()
return (particles.mass.reshape((-1,1)) *particles.position.cross(particles.velocity)).sum(axis=0) | 29,740 |
def svn_repos_get_logs3(*args):
"""
svn_repos_get_logs3(svn_repos_t repos, apr_array_header_t paths, svn_revnum_t start,
svn_revnum_t end, int limit, svn_boolean_t discover_changed_paths,
svn_boolean_t strict_node_history,
svn_repos_authz_func_t authz_read_func,
svn_log_message_receiver_t receiver,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_logs3(*args) | 29,741 |
def insertTimerOnOutput (signal, type):
"""
Plug the signal sout of the return entity instead of `signal` to
input signal to enable the timer.
- param signal an output signal.
- return an Timer entity.
"""
Timer = getTimerType (type)
timer = Timer ("timer_of_" + signal.name)
plug(signal, timer.sin)
return timer | 29,742 |
def anomaly_metrics(contended_task_id: TaskId, contending_task_ids: List[TaskId]):
"""Helper method to create metric based on anomaly.
uuid is used if provided.
"""
metrics = []
for task_id in contending_task_ids:
uuid = _create_uuid_from_tasks_ids(contending_task_ids + [contended_task_id])
metrics.append(Metric(
name='anomaly',
value=1,
labels=dict(
contended_task_id=contended_task_id, contending_task_id=task_id,
resource=ContendedResource.MEMORY_BW, uuid=uuid, type='contention'
),
type=MetricType.COUNTER
))
return metrics | 29,743 |
def alarm():
"""."""
if request.method == 'POST':
response = {'message': 'POST Accepted'}
logging.info('alarm POSTED!')
data = request.data
logging.info(data)
string = json.dumps(data)
producer.send('SIP-alarms', string.encode())
return response
return "" | 29,744 |
def train_net(solver_prototxt, roidb, output_dir,
pretrained_model=None, detection_pretrained_model =None, max_iters=40000):
"""Train a TD-CNN network."""
roidb = filter_roidb(roidb)
sw = SolverWrapper(solver_prototxt, roidb, output_dir, pretrained_model=pretrained_model, detection_pretrained_model=detection_pretrained_model)
print 'Solving...'
model_paths = sw.train_model(max_iters)
print 'done solving'
return model_paths | 29,745 |
def get_druminst_order(x):
"""helper function to determine order of drum instruments
relies on standard sequence defined in settings
"""
y = shared.get_inst_name(x + shared.octave_length + shared.note2drums)
return shared.standard_printseq.index(y) | 29,746 |
def polyTransfer(*args, **kwargs):
"""
Transfer information from one polygonal object to another one. Both objects must have identical topology, that is same
vertex, edge, and face numbering. The flags specify which of the vertices, UV sets or vertex colors will be copied.
Flags:
- alternateObject : ao (unicode) [create,query,edit]
Name of the alternate object.
- caching : cch (bool) [create,edit]
Toggle caching for all attributes so that no recomputation is needed.
- constructionHistory : ch (bool) [create,query]
Turn the construction history on or off (where applicable). If construction history is on then the corresponding node
will be inserted into the history chain for the mesh. If construction history is off then the operation will be
performed directly on the object. Note:If the object already has construction history then this flag is ignored and the
node will always be inserted into the history chain.
- frozen : fzn (bool) []
- name : n (unicode) [create]
Give a name to the resulting node.
- nodeState : nds (int) [create,query,edit]
Defines how to evaluate the node. 0: Normal1: PassThrough2: Blocking3: Internally disabled. Will return to Normal state
when enabled4: Internally disabled. Will return to PassThrough state when enabled5: Internally disabled. Will return to
Blocking state when enabledFlag can have multiple arguments, passed either as a tuple or a list.
- uvSets : uv (bool) [create,query,edit]
When true, the UV sets are copied from the alternate object. C: Default is on.
- vertexColor : vc (bool) [create,query,edit]
When true, the colors per vertex are copied from the alternate object. C: Default is off.
- vertices : v (bool) [create,query,edit]
When true, the vertices positions are copied from the alternate object. C: Default is off. Common flags
Derived from mel command `maya.cmds.polyTransfer`
"""
pass | 29,747 |
def scalbnf(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_scalbnf.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: int32
:rtype: float32
""" | 29,748 |
def find_nearest_idx(array, value):
"""
Find index of value nearest to value in an array
:param np.ndarray array: Array of values in which to look
:param float value: Value for which the index of the closest value in
`array` is desired.
:rtype: int
:return: The index of the item in `array` nearest to `value`
"""
return (np.abs(array - value)).argmin() | 29,749 |
def hard_tanh(x):
"""Hard tanh function
Arguments:
x: Input value
hard_tanh(x) = {-1, for x < -2,
tanh(x), for x > -2 and x < 2
1, for x > 2 }
returns value according to hard tanh function
"""
return tf.maximum(
tf.cast(-1, tf.float32), tf.minimum(tf.cast(1, tf.float32), tf.cast(keras.backend.tanh(x) * 1.05, tf.float32))
) | 29,750 |
async def get_publications(publication_id: str, embedded: bool = False):
"""
Given a Publication ID, get the Publication record from metadata store.
"""
publication = await get_publication(publication_id, embedded)
return publication | 29,751 |
def RandomNormal(inp):
"""
Random normally distributed weight initialization.
"""
return np.random.randn(inp) | 29,752 |
async def notes_active(svd):
""" For .notes command, list all of the notes saved in a chat. """
try:
from userbot.modules.sql_helper.notes_sql import get_notes
except AttributeError:
await svd.edit("`Running on Non-SQL mode!`")
return
message = "`There are no saved notes in this chat`"
notes = get_notes(svd.chat_id)
for note in notes:
if message == "`There are no saved notes in this chat`":
message = "Notes saved in this chat:\n"
message += "`#{}`\n".format(note.keyword)
else:
message += "`#{}`\n".format(note.keyword)
await svd.edit(message) | 29,753 |
def get_transceiver_description(sfp_type, if_alias):
"""
:param sfp_type: SFP type of transceiver
:param if_alias: Port alias name
:return: Transceiver decsription
"""
return "{} for {}".format(sfp_type, if_alias) | 29,754 |
def find_contiguous_set(target_sum: int, values: list[int]) -> list[int]:
"""Returns set of at least 2 contiguous values that add to target sum."""
i = 0
set_ = []
sum_ = 0
while sum_ <= target_sum:
sum_ += values[i]
set_.append(values[i])
if sum_ == target_sum and len(set_) >= 2:
return set_
i += 1
return [] | 29,755 |
def disconnect(filename="cache.json"):
"""
Connect to the local cache, so no internet connection is required.
:returns: void
"""
global _CONNECTED, _CACHE
try:
with open(filename, 'r') as f:
_CACHE = _recursively_convert_unicode_to_str(json.load(f))['data']
except FileNotFoundError:
raise RedditException("""The cache file '{0}' was not found, and I cannot disconnect without one. If you have not been given a cache.json file, then you can create a new one:
>>> from reddit import reddit
>>> reddit.connect()
>>> reddit._start_editing()
...
>>> reddit.get_posts("askreddit")
...
>>> reddit._save_cache('{0}')
""".format(filename))
for key in _CACHE.keys():
_CACHE_COUNTER[key] = 0
_CONNECTED = False | 29,756 |
def init(args):
"""
Initialise a new repo in your pyhome
"""
# Make sure repo dir exists
if not os.path.exists(settings.PYHOME_REPO):
os.makedirs(settings.PYHOME_REPO)
print('Initialising repo ...')
git.init(settings.PYHOME_REPO, args.name) | 29,757 |
def nesoni_report_to_JSON(reportified):
"""
Convert a nesoni nway.any file that has been reportified to JSON
See: tables.rst for info on what is stored in RethinkDB
:param reportified: the reportified nway.any file (been through
nway_reportify()). This is essentially a list of tuples
:returns: a list of JSON
"""
stats = {}
parsed_list = []
for position in reportified:
for elem in position:
skip = False
ref_id, pos, strain, old, ftype, new, evidence, cons, uncalled = elem
ref_id = '.'.join(ref_id.split('.')[:-1])
# Initialise the stats...
if strain not in stats:
stats[strain] = 0
if new == old:
# Have no change
#dat = ["conserved"]+[None]*9
skip = True
elif new == 'N':
# Have an uncalled base
#dat = ["uncalled"]+[None]*9
skip = True
# Check for mixtures...
elif ftype == "substitution" and new.find('-') != -1:
# Deletion hidden in substitution
ftype = 'deletion'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "substitution" and len(new) > 1:
# Insertion hidden in substitution
ftype = 'insertion'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "deletion" and new.find('-') == -1 and len(new) == 1:
# Substitution hidden in deletions
ftype = 'substitution'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "deletion" and new.find('-') == -1 and len(new) > 1:
# Insertion hidden in deletions
ftype = 'insertion'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "insertion" and new.find('-') != -1:
# Deletion hidden in insertions
ftype = 'deletion'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "insertion" and new.find('-') == -1 and len(new) == 1:
# Substitution hidden in insertions
ftype = 'substitution'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
# We have the same change state across all strains
else:
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
obs_count = parsers.parse_evidence(evidence)
# Some simple tests
the_classes = ['insertion', 'deletion', 'substitution']
if not skip:
assert dat[0] in the_classes
json = {"id": strain+'_'+ref_id+'_'+str(pos),
"StrainID": strain,
"Position": pos,
"LocusTag": dat[2],
"Class": dat[0],
"SubClass": dat[1],
"RefBase": old,
"ChangeBase": new,
"CDSBaseNum": dat[3],
"CDSAANum": dat[4],
"CDSRegion": dat[5],
"RefAA": dat[6],
"ChangeAA": dat[7],
"Product": dat[8],
"CorrelatedChange": dat[9],
"Evidence": obs_count,
"UncalledBlock": uncalled
}
parsed_list.append(json)
return parsed_list, stats | 29,758 |
def DeferredLightInfoEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder) | 29,759 |
def qiita_get_config():
"""設定ファイルを読む."""
config = configparser.ConfigParser()
path = Path(os.getenv('HOME'), '.qiita.ini')
config.read_file(open(path))
return config | 29,760 |
def move_character(character: dict, direction_index=None, available_directions=None) -> tuple:
"""
Change character's coordinates.
:param character: a dictionary
:param direction_index: a non-negative integer, optional
:param available_directions: a list of strings, optional
:precondition: character keys must contain "X-coordinate" and "Y-coordinate"
:precondition: character values must be integers
:precondition: direction_index must be a non-negative integer validated by validate_option function or None
:precondition: availabe_directions each item must be either "north", "south", "east" or "west", or None
:postcondition: updates character X or Y coordinate based on direction choice if availabe_directions is not None
:postcondition: makes character X or Y coordinate be equal to the previous coordinates
:return: new character's coordinates as a tuple
>>> protagonist = {"Y-coordinate": 1, "X-coordinate": 1, "Previous coordinates": (0, 1)}
>>> move_character(protagonist, 0, ["south", "west"])
(2, 1)
>>> protagonist = {"Y-coordinate": 1, "X-coordinate": 1, "Previous coordinates": (0, 1)}
>>> move_character(protagonist)
(0, 1)
"""
directions_dictionary = {"north": -1, "south": 1, "west": -1, "east": 1}
if available_directions is not None:
direction = available_directions[direction_index]
character["Previous coordinates"] = character["Y-coordinate"], character["X-coordinate"]
if direction in "north south":
character["Y-coordinate"] += directions_dictionary[direction]
else:
character["X-coordinate"] += directions_dictionary[direction]
else:
character["Y-coordinate"] = character["Previous coordinates"][0]
character["X-coordinate"] = character["Previous coordinates"][1]
return character["Y-coordinate"], character["X-coordinate"] | 29,761 |
def expandednodeid_to_str(exnode):
"""SOPC_ExpandedNodeId or SOPC_ExpandedNodeId* to its str representation in the OPC-UA XML syntax."""
a = ''
if exnode.ServerIndex:
a += 'srv={};'.format(exnode.ServerIndex)
nsu = string_to_str(ffi.addressof(exnode.NamespaceUri))
if nsu:
a += 'nsu={};'.format(nsu)
b = ffi.string(libsub.SOPC_NodeId_ToCString(ffi.addressof(exnode.NodeId))).decode()
return a + b | 29,762 |
def scrap_insta_description(inst) -> str:
"""
Scrap description from instagram account HTML.
"""
description = inst.body.div.section.main.div.header.section.find_all(
'div')[4].span.get_text()
return description | 29,763 |
def merge_by_sim(track_sim_list, track_data_dic, track_list, reid_th):
"""
Merge by sim.
Ref: https://stackoverflow.com/questions/30089675/clustering-cosine-similarity-matrix
"""
print('start clustering')
merge_start_time = time.time()
cost_matrix = get_cost_matrix(track_sim_list, track_data_dic, track_list)
cluster_labels = AgglomerativeClustering(n_clusters=None, distance_threshold=reid_th, affinity='precomputed',
linkage='average').fit_predict(cost_matrix)
labels = get_match(cluster_labels)
# print(merged_index_list)
print('we have %d global tracks after merge, time for merge %.4f s' % (len(labels), time.time()-merge_start_time))
# get real data
valid_global_list = []
valid_count = 0
for person_track_list in labels:
temp = []
for index in person_track_list:
record_name = track_list[index]
temp.append(record_name)
if len(temp) > 1:
cameras = set([t[0] for t in temp])
if len(cameras) > 1:
valid_count += 1
valid_global_list.append(temp)
#clustered_list.append(temp)
print(f'after merge, %d valid global ids are created: {valid_global_list}' % valid_count)
return valid_global_list | 29,764 |
def get_ros_hostname():
""" Try to get ROS_HOSTNAME environment variable.
returns: a ROS compatible hostname, or None.
"""
ros_hostname = os.environ.get('ROS_HOSTNAME')
return ros_hostname if is_legal_name(ros_hostname) else None | 29,765 |
def cycle(*args, **kargs):
"""
Returns the next cycle of the given list
Everytime ``cycle`` is called, the value returned will be the next item
in the list passed to it. This list is reset on every request, but can
also be reset by calling ``reset_cycle()``.
You may specify the list as either arguments, or as a single list argument.
This can be used to alternate classes for table rows::
# In Myghty...
% for item in items:
<tr class="<% cycle("even", "odd") %>">
... use item ...
</tr>
% #endfor
You can use named cycles to prevent clashes in nested loops. You'll
have to reset the inner cycle, manually::
% for item in items:
<tr class="<% cycle("even", "odd", name="row_class") %>
<td>
% for value in item.values:
<span style="color:'<% cycle("red", "green", "blue",
name="colors") %>'">
item
</span>
% #endfor
<% reset_cycle("colors") %>
</td>
</tr>
% #endfor
"""
if len(args) > 1:
items = args
else:
items = args[0]
name = kargs.get('name', 'default')
cycles = request_config().environ.setdefault('railshelpers.cycles', {})
cycle = cycles.setdefault(name, iterdict(items))
if cycles[name].get('items') != items:
cycle = cycles[name] = iterdict(items)
return cycle['iter'].next() | 29,766 |
def calc_rmsd(struct1, struct2):
"""
Basic rmsd calculator for molecules and molecular clusters.
"""
geo1 = struct1.get_geo_array()
ele1 = struct1.elements
geo2 = struct2.get_geo_array()
ele2 = struct2.elements
dist = cdist(geo1,geo2)
idx1,idx2 = linear_sum_assignment(dist)
geo1 = geo1[idx1]
geo2 = geo2[idx2]
rmsd = np.mean(np.linalg.norm(geo1 - geo2,axis=-1))
return rmsd | 29,767 |
def validate_flat_dimension(d):
"""Return strue if a 'key:value' dimension is valid."""
key, _, val = d.partition(':')
return validate_dimension_value(val) and validate_dimension_key(key) | 29,768 |
def file_size(f):
"""
Returns size of file in bytes.
"""
if isinstance(f, (six.string_types, six.text_type)):
return os.path.getsize(f)
else:
cur = f.tell()
f.seek(0, 2)
size = f.tell()
f.seek(cur)
return size | 29,769 |
def _check_fill_arg(kwargs):
"""
Check if kwargs contains key ``fill``.
"""
assert "fill" in kwargs, "Need to have fill in kwargs." | 29,770 |
def __get_from_imports(import_tuples):
""" Returns import names and fromlist
import_tuples are specified as
(name, fromlist, ispackage)
"""
from_imports = [(tup[0], tup[1]) for tup in import_tuples
if tup[1] is not None and len(tup[1]) > 0]
return from_imports | 29,771 |
def test_no_error_correction_with_two_logical_qubits():
"""Checks that an error is thrown when a circuit is set up for extra ancilla for two logical qubits"""
with raises(ValueError, match = "Can't set up extra ancilla with two logical qubits due to memory size restrictions"):
SteaneCodeLogicalQubit(2, parity_check_matrix, codewords, extend_ancilla = True) | 29,772 |
def logging(on : bool, *, dest : TextIO = sys.stderr) -> None:
"""Whether to log received and transmitted JSON."""
__get_designated_connection().logging(on=on,dest=dest) | 29,773 |
def change_dt_utc_to_local(dt):
"""
change UTC date time to local time zone Europe/Paris
"""
return convert_utctime_to_timezone(dt,'%Y%m%dT%H%M%SZ','Europe/Paris','%Y%m%dT%H%M%S') | 29,774 |
def sarimax_ACO_PDQ_search(endo_var, exog_var_matrix, PDQS, searchSpace, options_ACO, low_memory=False, verbose=False):
"""
Searchs SARIMAX PDQ parameters.
endo_var: is the principal variable.
exog_var_matrix: is the matrix of exogenous variables.
PDQS: list of pdqs parameters. EG: [1, 1, 1, 24].
searchSpace: is the space of search for the particles. E.G.:
p = d = q = range(0, 2)
searchSpace = [p, d, q]
pso_particles: is the number of particles.
pso_interations: is the number of interations.
options_ACO: parametrization for ACO algorithm. E.G.:
{'antNumber':2, 'antTours':1, 'alpha':2, 'beta':2, 'rho':0.5, 'Q':2}
"""
def SARIMAX_AICc(X, *args):
endo = args[0][0]
exog = args[0][1]
param_seasonal = args[0][2]
param = X[0:3]
if param_seasonal[-1] < 0:
param_seasonal[-1] = 1
mod = SARIMAX(endo, exog=exog, order=param, seasonal_order=param_seasonal,
enforce_stationarity=False, enforce_invertibility=False)
aicc = np.inf
try:
results = mod.fit(disp=False, low_memory=low_memory)
aicc = results.aicc
except:
pass
return aicc
antNumber = options_ACO['antNumber']
antTours = options_ACO['antTours']
alpha = options_ACO['alpha']
beta = options_ACO['beta']
rho = options_ACO['rho']
Q = options_ACO['Q']
if verbose:
logging.info("Original search Space: {0}".format(searchSpace))
warnings.filterwarnings("ignore") # specify to ignore warning messages
ACOsearch = ACO(alpha, beta, rho, Q)
best_result, _ = ACOsearch.optimize(antNumber, antTours, dimentionsRanges=searchSpace, function=SARIMAX_AICc,
functionArgs=[endo_var, exog_var_matrix, PDQS], verbose=verbose)
logging.info("BEST result: {0}.".format(best_result))
param = best_result
param_seasonal = PDQS
mod = SARIMAX(endo_var, exog=exog_var_matrix, order=param, seasonal_order=param_seasonal,
enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit(disp=False)
return results.aicc, best_result | 29,775 |
def convert_vue_i18n_format(locale: str, po_content: Any) -> str:
"""
done: will markdown be parsed to html in this method? Or should we do that on the fly, everywhere...
It seems the logical place will be to parse it here. Otherwise the rest of the application becomes more
complex. Using markdown will have the benefit of the output being a single html string with proper
formatting.
todo: change parameters {{ param }} to hello: '%{msg} world'
see: http://kazupon.github.io/vue-i18n/guide/formatting.html#list-formatting
The change is very large we don't need to do that, as we don't need those sentences.
The content is added to the 'internet_nl' key, like this:
const internet_nl_messages = {
en: {
internet_nl: {
key: 'value',
key: 'value'
},
},
}
There is a slight challenge that translations in vue are based on javascript properties, meaning, no quotes.
:return:
"""
content: str = _vue_format_start()
content += _vue_format_locale_start(locale)
for entry in po_content:
# to save a boatload of data, we're not storing the 'content' from the pages of internet.nl
# we'll just have to point to this content.
if entry.msgid.endswith('content'):
continue
content += f" {_js_safe_msgid(entry.msgid)}: '{_js_safe_msgstr(entry.msgstr)}',\n"
content += _vue_format_locale_end()
content += _vue_format_end()
return content | 29,776 |
def p_on(p):
"""
on : ON columnlist
"""
p[0] = p[2] | 29,777 |
def get_index(x, value, closest=True):
"""Get the index of an array that corresponds to a given value.
If closest is true, get the index of the value closest to the
value entered.
"""
if closest:
index = np.abs(np.array(x) - value).argsort()[0]
else:
index = list(x).index(value)
return index | 29,778 |
def process_spawn(window, args):
"""
Spawns a child process with its stdin/out/err wired to a PTY in `window`.
`args` should be a list where the first item is the executable and the
remaining will be passed to it as command line arguments.
Returns a process object.
"""
return (yield Trap.PROCESS_SPAWN, window, args) | 29,779 |
def insertTaskParams(taskParams, verbose=False, properErrorCode=False, parent_tid=None):
"""Insert task parameters
args:
taskParams: a dictionary of task parameters
verbose: True to see verbose messages
properErrorCode: True to get a detailed error code
parent_tid: ID of the parent task
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code, message from the server, and taskID if successful, or error message if failed
0: request is processed
1: duplication in DEFT
2: duplication in JEDI
3: accepted for incremental execution
4: server error
"""
# serialize
taskParamsStr = json.dumps(taskParams)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/insertTaskParams'
data = {'taskParams':taskParamsStr,
'properErrorCode':properErrorCode}
if parent_tid:
data['parent_tid'] = parent_tid
status,output = curl.post(url,data)
try:
loaded_output = list(pickle_loads(output))
# extract taskID
try:
m = re.search('jediTaskID=(\d+)', loaded_output[-1])
taskID = int(m.group(1))
except Exception:
taskID = None
loaded_output.append(taskID)
return status, loaded_output
except Exception as e:
errStr = dump_log("insertTaskParams", e, output)
return EC_Failed, output+'\n'+errStr | 29,780 |
def pref_infos():
"""
to update user infos
"""
form = UserParametersForm()
# print current_user
if request.method == 'POST' :
print
log_cis.info("updating an user - POST \n")
# for debugging purposes
for f_field in form :
log_cis.info( "form name : %s / form data : %s ", f_field.name, f_field.data )
if form.validate_on_submit():
existing_user = mongo_users.find_one({"_id" : ObjectId(form.userOID.data)} )
log_cis.debug("existing_user : %s", pformat(existing_user) )
### check if new email is already used by someone else
is_new_email_taken = False
existing_email = mongo_users.find_one( {"userEmail" : form.userEmail.data} )
log_cis.debug("existing_email : %s", pformat(existing_email) )
if existing_email is not None :
if existing_user["_id"] != existing_email["_id"] :
is_new_email_taken = True
if existing_user is None :
flash(u"Erreur : utilisateur inexistant", category='warning')
return redirect(url_for('pref_infos'))
if is_new_email_taken :
flash(u"Erreur : cet email déjà utilisé", category='warning')
return redirect(url_for('pref_infos'))
else :
### saving updated infos in user
user_obj = User()
user_obj.populate_from_dict(dict_input=existing_user)
# # update visitor to user in db --> function from ModelMixin
log_cis.warning("updating new_user in mongo_users" )
user_obj.populate_from_form( form=form )
user_obj.update_document_in_mongo( document=existing_user, coll=mongo_users )
### relog user
login_user( user_obj, remember=existing_user['userRememberMe'] )
flash(u"Vos informations ont bien été mises à jour", category='primary')
return redirect(url_for('pref_infos'))
else :
log_cis.error("form was not validated : form.errors : %s", form.errors )
flash(u"Erreur : formulaire invalide", category='warning')
return redirect(url_for('pref_infos'))
elif request.method == 'GET' :
log_cis.info("updating an user - GET \n")
print current_user.__dict__
# prepopulate input fields
form.userOID.data = str(current_user.userOID)
form.userName.data = current_user.userName.capitalize()
form.userSurname.data = current_user.userSurname.capitalize()
form.userEmail.data = current_user.userEmail
form.userOtherStructure.data = current_user.userOtherStructure
# prepopulate select fields
form.userProfile.process_data(current_user.userProfile)
form.userPartnerStructure.process_data(current_user.userPartnerStructure)
form.userStructureProfile.process_data(current_user.userStructureProfile)
# prepopulate boolean fields
form.userHaveProjects.process_data(current_user.userHaveProjects)
form.userJoinCollective.process_data(current_user.userJoinCollective)
form.userNewsletter.process_data(current_user.userNewsletter)
return render_template('user_preferences/user_parameters.html',
config_name = config_name, # prod or default...
app_metas = app_metas,
language = "fr" ,
languages_dict = app_languages_dict ,
site_section = "preferences",
site_subsection = "infos",
form = form,
user_infos = current_user.get_public_infos # cf model_user.py
) | 29,781 |
async def test_missing_optional_config(hass, calls):
"""Test: missing optional template is ok."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b"]) | 29,782 |
def tree_cc(flag, width, mbl='-', xmin='-', xmax='-', ymin='-', ymax='-', logpath=None):
"""
| Phase unwrapping tree generation with low correlation search (modified ARW algorithm)
| Copyright 2014, Gamma Remote Sensing, v2.9 20-Jan-2014 clw/uw
Parameters
----------
flag:
(input) phase unwrapping flag file
width:
number of samples/row
mbl:
maximum branch length (default=32, maximum=64)
xmin:
starting range pixel offset (default = 0)
xmax:
last range pixel offset (default = width-1)
ymin:
starting azimuth row, relative to start (default = 0)
ymax:
last azimuth row, relative to start (default = nlines-1)
logpath: str or None
a directory to write command logfiles to
"""
process(['/cluster/GAMMA_SOFTWARE-20161207/ISP/bin/tree_cc', flag, width, mbl, xmin, xmax, ymin, ymax], logpath=logpath) | 29,783 |
def read_domains(file_name):
"""
读取域名存储文件,获取要探测的域名,以及提取出主域名
注意:若是不符合规范的域名,则丢弃
"""
domains = []
main_domains = []
no_fetch_extract = tldextract.TLDExtract(suffix_list_urls=None)
file_path = './unverified_domain_data/'
with open(file_path+file_name,'r') as fp:
for d in fp.readlines():
domain_tld = no_fetch_extract(d.strip())
tld, reg_domain = domain_tld.suffix, domain_tld.domain # 提取出顶级域名和主域名部分
if tld and reg_domain:
main_domains.append(reg_domain+'.'+tld)
domains.append(d.strip())
else:
logger.logger.warning('域名%s不符合规范,不进行探测' % d.strip())
return domains, main_domains | 29,784 |
def collectUserInput():
""" Collects user input from console and verifies it is a letter before proceeding """
user_input = input(MSG_START).lower()
while user_input != 'q':
initGame(user_input)
user_input = input(MSG_START).lower()
print(MSG_THANKS) | 29,785 |
def determine_last_contact_incomplete(end_time_skyfield, events, times, antenna):
"""
gibt letzten Kontakt vervollständigt und Anzahl der an events in unvollständiger Folge zurück
:param end_time_skyfield: skyfield time
:param events: array of int
:param times: array of skyfield times
:param antenna: Antenna object
:return: Contact object, int
"""
incomplete_event_sequence_end = list()
# letztes Kontakt bei Ende des Analysezeitraums enden lassen
incomplete_event_sequence_end.append(end_time_skyfield)
# falls letztes Event Höchststand, dieses zu ende hinzufügen
if events[-1] == int(SkyfieldEventTypes.Culminate):
incomplete_event_sequence_end.append(times[-1])
# falls letztes Event in Liste Aufgehen, dieses zu Ende hinzufügen
if events[-1 - len(incomplete_event_sequence_end)] == int(SkyfieldEventTypes.Rise):
incomplete_event_sequence_end.append(times[-1 - len(incomplete_event_sequence_end)])
# Invertieren der Liste für chronologisch korrekte Reihenfolge
incomplete_event_sequence_end.reverse()
incomplete_contact_end = Contact(antenna)
for time in incomplete_event_sequence_end:
incomplete_contact_end.add_relative_position_by_skyfield_time(time)
return incomplete_contact_end, len(incomplete_event_sequence_end) | 29,786 |
def lam_est(data, J, B, Q, L = 3,
paras = [3, 20], n_trees = 200, include_reward = 0, fixed_state_comp = None, method = "QRF"):
"""
construct the pointwise cov lam (for both test stat and c.v.), by combine the two parts (estimated and observed)
Returns
-------
lam: (Q-1)-len list of four lam matrices (n * T-q * B)
"""
dx, da = data[0][0].shape[1], data[0][1].shape[1]
if fixed_state_comp is not None:
dx += 1
# generate uv
rseed(0); npseed(0)
if include_reward:
uv = [randn(B, dx + 1), randn(B, dx + da)]
else:
uv = [randn(B, dx), randn(B, dx + da)]
# estimate characteristic values (cross-fitting): phi_R, psi_R, phi_I,
# psi_I
estimated = cond_char_vaule_est(data = data, uv = uv,
paras = paras, n_trees = n_trees, L = L,
J = J,
include_reward = include_reward, fixed_state_comp = fixed_state_comp,
method = method) # ,obs_ys
if paras == "CV_once":
CV_paras = estimated
return CV_paras
else:
estimated_cond_char = estimated
# cos and sin in batch. (n*T*dx) * (dx* B) = n * T * B:
# c_X,s_X,c_XA,s_XA
observed_cond_char = obs_char(data = data, uv = uv,
include_reward = include_reward, fixed_state_comp = fixed_state_comp)
# combine the above two parts to get cond. corr. estimation.
lam = lam_formula(estimated_cond_char, observed_cond_char, J, Q)
return lam | 29,787 |
def cli(ctx, **kwargs):
"""A command-line tool for non-regression testing of RESTful APIs.
Helps you get better REST!
"""
ctx.obj = ResortOptions(**kwargs)
daiquiri.setup(program_name=constants.APP_NAME, level=ctx.obj.loglevel) | 29,788 |
def func(x, kw=3.0):
"""[summary]
Args:
x ([type]): [description]
Raises:
Exception: [description]
""" | 29,789 |
def read_training_data(rootpath):
"""
Function for reading the images for training.
:param rootpath: path to the traffic sign data
:return: list of images, list of corresponding image information: width, height, class, track
"""
images = [] # images
img_info = [] # corresponding labels
# loop over all classes
for img_class in os.listdir(rootpath):
prefix = rootpath + '/' + img_class + '/' # subdirectory for class
gt_file = open(prefix + 'GT-' + img_class + '.csv') # annotations file
gt_reader = csv.reader(gt_file, delimiter=';') # csv parser for annotations file
next(gt_reader) # skip header
# loop over all images in current annotations file
for row in gt_reader:
images.append(plt.imread(prefix + row[0])) # numpy array representation of image
img_info.append([int(row[1]), int(row[2]),
img_class, row[0][:5]]) # width, height, class, track
gt_file.close()
return images, img_info | 29,790 |
def initialize_all(y0, t0, t1, n):
""" An initialization routine for the different ODE solving
methods in the lab. This initializes Y, T, and h. """
if isinstance(y0, np.ndarray):
Y = np.empty((n, y.size)).squeeze()
else:
Y = np.empty(n)
# print y0
# print Y
Y[0] = y0
T = np.linspace(t0, t1, n)
h = float(t1 - t0) / (n - 1)
return Y, T, h | 29,791 |
def _sequence_event(values, length, verb):
"""Returns sequence (finite product) event.
Args:
values: List of values to sample from.
length: Length of the sequence to generate.
verb: Verb in infinitive form.
Returns:
Instance of `probability.FiniteProductEvent`, together with a text
description.
"""
del verb # unused
samples = [random.choice(values) for _ in range(length)]
events = [probability.DiscreteEvent([sample]) for sample in samples]
event = probability.FiniteProductEvent(events)
sequence = ''.join(str(sample) for sample in samples)
event_description = 'sequence {sequence}'.format(sequence=sequence)
return event, event_description | 29,792 |
def ndarrayToQImage(img):
""" convert numpy array image to QImage """
if img.dtype != 'uint8':
raise ValueError('Only support 8U data')
if img.dim == 3:
t = QtGui.QImage.Format_RGB888
elif img.dim == 2:
t = QtGui.QImage.Format_Grayscale8
else:
raise ValueError('Only support 1 and 3 channel image')
qimage = QtGui.QImage(img.data,
img.shape[1], img.shape[0],
img.strides[0], t)
return qimage | 29,793 |
def create_release_html(filename='index.html'):
"""
Create a html file with the links to the sphinx and doxygen documentations of the supported releases.
Parameters:
filename (str): The name the html file.
"""
page = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<meta name="keywords" content="Belle II, basf2, documentation, doxygen, sphinx" />
<meta name="description" content="The documentation of the Belle II software." />
<title>Documentation of the Belle II software</title>
<style type="text/css" media="all">
@import "https://b2-master.belle2.org/build_style/index_documentation.css";
</style>
</head>
<body>
<div id="wrap">
<div id="header">
<div id="headerleft"></div>
<div id="headerright"></div>
<div id="topnavigation">
<ul>
<li><a href="http://www.belle2.org">Belle II</a></li>
<li><a href="https://confluence.desy.de/display/BI/Belle+II">Wiki</a></li>
<li><a href="https://stash.desy.de/projects/B2/repos/software/browse">Git</a></li>
<li><a href="https://agira.desy.de/projects/BII">Issues</a></li>
<li><a href="https://b2-master.belle2.org/development_build/index.html">Development Build</a></li>
</ul>
</div>
</div>
<div id="content">
<br/>
<br/>
<div id="resulttable">
<div class="roundbox">
<h2></h2>
<table>
<tbody>
<tr class="odd">
<th>Sphinx documentation</th>
<th>Doxygen documentation</th>
%s
</tbody>
</table>
</div>
<br/>
</div>
</div>
</div>
<div id="footer">
Copyright 2018-2019 Belle II software group <br />
Uses icons from the gnome-colors package under the <a href="http://www.gnu.org/licenses/gpl.html">GNU GENERAL PUBLIC LICENSE</a>
and from http://www.famfamfam.com under the <a href="http://creativecommons.org/licenses/by/2.5/">Creative Commons Attribution 2.5 License</a>
</div>
</body>
</html>
"""
table = ""
recommended = ' (recommended)'
for supported in reversed(_supported_releases):
table += ('<tr class="even">\n<td><a href="sphinx/%s/index.html"><b>%s%s</b></a></td>\n<td><a href="%s/index.html"><b>%s</b></a></td>\n</tr>\n' % (supported, supported, recommended, supported, supported))
recommended = ''
recommended = ' (recommended)'
for supported in reversed(_supported_light_releases):
table += ('<tr class="odd">\n<td><a href="sphinx/%s/index.html"><b>%s%s</b></a></td>\n<td><a href="%s/index.html"><b>%s</b></a></td>\n</tr>\n' % (supported, supported, recommended, supported, supported))
recommended = ''
table += '<tr class="even">\n<td><a href="development/sphinx/index.html"><b>development</b></a></td>\n<td><a href="development/index.html"><b>development</b></a></td>\n</tr>\n'
with open(filename, 'w') as htmlfile:
htmlfile.write(page % table) | 29,794 |
def object_hook(dct, compile_re=False, ensure_tzinfo=True, encoding=None):
"""
Object hook used by hoplite_loads. This object hook can encode the
dictionary in the right text format. For example, json.loads by default
will decode '{'hey':'hey'}' into {u'hey':u'hey'} rather than {'hey':'hey'}.
If encoding is set to utf-8, this object_hook can make '{'hey':'hey'}'
decode to {'hey':'hey'} This object hook also decodes extended json types
such as objectId and datetime objects. Datetime objects also
have the option to be decoded with or without timezone information.
:param dct: Dictionary this object hook is to operate on.
:param ensure_tzinfo: Boolean deciding if timezone info should be added to
decoded datetime objects
:param encoding: choice of text decoding(unicode/utf-8, perhaps others)
:return:
"""
if encoding:
# Converts all keys and unicode values in the top layer of the current
# dictionary to the desired encoding type.
new_dct = {}
for key, value in dct.iteritems():
if isinstance(key, unicode):
key = key.encode(encoding)
if isinstance(value, unicode):
value = value.encode(encoding)
new_dct[key] = value
dct = new_dct
if "$oid" in dct:
return ObjectId(str(dct["$oid"]))
if "$ref" in dct:
return DBRef(dct["$ref"], dct["$id"], dct.get("$db", None))
if "$date" in dct:
secs = float(dct["$date"]) / 1000.0
if ensure_tzinfo:
return EPOCH_AWARE + datetime.timedelta(seconds=secs)
else:
# Avoid adding time zone info by default, unlike
# bson.json_util.loads. If the developer really wants this, they
# will have to specify it.
return EPOCH_NAIVE + datetime.timedelta(seconds=secs)
if "$regex" in dct:
flags = 0
# PyMongo always adds $options but some other tools may not.
for opt in dct.get("$options", ""):
flags |= _RE_OPT_TABLE.get(opt, 0)
if compile_re:
return re.compile(dct["$regex"], flags)
else:
return Regex(dct["$regex"], flags)
if "$minKey" in dct:
return MinKey()
if "$maxKey" in dct:
return MaxKey()
if "$binary" in dct:
if isinstance(dct["$type"], int):
dct["$type"] = "%02x" % dct["$type"]
subtype = int(dct["$type"], 16)
if subtype >= 0xffffff80: # Handle mongoexport values
subtype = int(dct["$type"][6:], 16)
return Binary(base64.b64decode(dct["$binary"].encode()), subtype)
if "$code" in dct:
return Code(dct["$code"], dct.get("$scope"))
if "$uuid" in dct:
return uuid.UUID(dct["$uuid"])
if "$undefined" in dct:
return None
if "$numberLong" in dct:
return Int64(dct["$numberLong"])
if "$timestamp" in dct:
tsp = dct["$timestamp"]
return Timestamp(tsp["t"], tsp["i"])
return dct | 29,795 |
def grade_submissions(course_name, assignment_name):
"""Grade all submissions for a particular assignment.
A .zip archive should be uploaded as part of the POST request to this endpoint.
The archive should contain a single directory 'Submissions', which should
contain a directory for each student's submission.
See the grade_assignment module for the implementation of the actual testing
logic - this function is merely the endpoint to receive the request and
return the response. The respose returned has an identifier to allow the
client to request a CSV of grade results, as well as some JSON of
results to be displayed to the user on the page."""
try:
course = find_course(course_name)
assignment = course.assignments.filter_by(name=assignment_name).one()
except NoResultFound:
return abort(404)
submissions_archive = request.files['submissions']
csv_id= str(uuid.uuid4())
csvfile, final_results, unittest_stats = grade_assignment(assignment, submissions_archive)
os.makedirs(assignment.grades_dir, exist_ok=True)
shutil.copy(csvfile.name, os.path.join(assignment.grades_dir, csv_id))
csvfile.close()
grade_response = {'csv_id': csv_id, 'results': final_results, 'stats': unittest_stats}
if assignment.problems:
grade_response["problems_max"] = {p.problem_name: p.score for p in assignment.problems}
grade_response["max_score"] = sum(p.score for p in assignment.problems)
else:
grade_response["max_score"] = assignment.max_score
return jsonify(grade_response) | 29,796 |
def print_costs(costs, start):
"""
Print costs of all but the unused zeroth vertex and the start vertex.
Vertices that are unreachable have infinite cost, but we print -1 instead.
"""
print(*((-1 if cost is None else cost)
for cost in costs[1:start] + costs[(start + 1):])) | 29,797 |
def scrape_sp500_tickers():
"""Scrape the wikipedia page for the latest list of sp500 companies
Returns:
[pickle]: [list of sp500 companies]
"""
#set get file to look at wikipedia's list of sp500 companies
resp = requests.get('http://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup = bs.BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class': 'wikitable sortable'})
tickers = []
#cycle through wiki table to find get all of the stock tickers
for row in table.findAll('tr')[1:]:
ticker = row.findAll('td')[0].text
tickers.append(ticker)
#save to pickle to speed up process
with open("sp500tickers.pickle","wb") as f:
pickle.dump(tickers,f)
print ('Scraping Complete')
return tickers | 29,798 |
def get_top_k_recs(user_reps, item_reps, k):
"""
For each user compute the `n` topmost-relevant items
Args:
user_reps (dict): representations for all `m` unique users
item_reps (:obj:`np.array`): (n, d) `d` latent features for all `n` items
k (int): no. of most relevant items
Returns:
item_recs ([[int]]): list of personalized recommendations for each user
as lists of item IDs
"""
n_user = len(user_reps)
item_recs = []
for u in range(n_user):
user_embed = user_reps[u]['embed']
user_item_scores = np.dot(item_reps, user_embed)
item_recs.append(list(np.argsort(user_item_scores)[::-1][:k]))
return item_recs | 29,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.