content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_ammr_version(folder=None):
"""Return the AMMR version if possible.
The function will walk up a directory tree looking
for a ammr_verion.any file to parse.
"""
folder = folder or os.getcwd()
any_version_file = "AMMR.version.any"
xml_version_file = "AMMR.version.xml"
files = os.listdir(folder)
if any_version_file in files:
return ammr_any_version(os.path.join(folder, any_version_file))
elif xml_version_file in files:
return amm_xml_version(os.path.join(folder, xml_version_file))
else:
return ""
| 5,339,200
|
def env():
"""check local Environment"""
valid_icon = "\U00002714"
failed_icon = "\U0000274C"
click.echo(f"Current Environment:")
# viur-cli
if shutil.which("viur-cli"):
app_server_version = subprocess.check_output(['viur-cli', '-v']).decode("utf-8")
click.echo(f"{valid_icon} {app_server_version}")
else:
click.echo(f"{failed_icon} ViUR-CLI")
# app_server
if shutil.which("app_server"):
app_server_version = subprocess.check_output(['app_server', '-V']).decode("utf-8")
click.echo(f"{valid_icon} {app_server_version}")
else:
click.echo(f"{failed_icon} app_server")
# python3
if shutil.which("python3"):
npm_version = subprocess.check_output(['python3', '-V']).decode("utf-8")
click.echo(f"{valid_icon} python3 > {npm_version}")
else:
click.echo(f"{failed_icon}")
# python
if shutil.which("python"):
npm_version = subprocess.check_output(['python', '-V']).decode("utf-8")
click.echo(f"{valid_icon} python > {npm_version}")
else:
click.echo(f"{failed_icon}")
# npm
if shutil.which("npm"):
npm_version = subprocess.check_output(['npm', '-v']).decode("utf-8")
click.echo(f"{valid_icon} npm {npm_version}")
else:
click.echo(f"{failed_icon} npm")
# node
if shutil.which("node"):
npm_version = subprocess.check_output(['node', '-v']).decode("utf-8")
click.echo(f"{valid_icon} node {npm_version}")
else:
click.echo(f"{failed_icon} node")
# pnpm
if shutil.which("pnpm"):
npm_version = subprocess.check_output(['pnpm', '-v']).decode("utf-8")
click.echo(f"{valid_icon} pnpm {npm_version}")
else:
click.echo(f"{failed_icon} pnpm (optional)")
# gcloud
if shutil.which("gcloud"):
gcloud_version = subprocess.check_output(['gcloud', '-v']).decode("utf-8").split("\n\n")[0]
versionList = []
for line in gcloud_version.split("\n"):
if not line:
continue
if not line.startswith("Google Cloud SDK"):
line = " - " + line
versionList.append(line)
versionString = '\n'.join(versionList)
click.echo(f"{valid_icon} {versionString}")
else:
click.echo(f"{failed_icon} gcloud")
| 5,339,201
|
def to_vector(texto,model,idf):
""" Receives a sentence string along with a word embedding model and
returns the vector representation of the sentence"""
tokens = normalizer(texto).split() # splits the text by space and returns a list of words
vec = np.zeros(300) # creates an empty vector of 300 dimensions
for word in tokens: # iterates over the sentence
if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model
vec += model[word]*idf[word] # adds every word embedding to the vector
if np.linalg.norm(vec) > 0:
return vec / np.linalg.norm(vec) # divides the vector by their normal
else:
return vec
| 5,339,202
|
def evaluate_interval_detection(labels, predictions, event_val, def_val, seq_length, other_vals=[]):
"""Evaluate interval detection for sequences by calculating
tp, fp, and fn.
Extends the metric outlined by Kyritsis et al. (2019) in
Modeling wrist micromovements to measure in-meal eating behavior from
inertial sensor data
https://ieeexplore.ieee.org/abstract/document/8606156/
by introducing additional possible events.
Args:
labels: The ground truth [batch_size, seq_length], encoding relevant
sequences using the vals given in parameters.
predictions: The predictions [batch_size, seq_length], encoding relevant
sequences using the vals given in parameters.
event_val: The value for true events.
def_val: The default value for non-events.
other_vals: List or 1-D tensor of vals for other events.
seq_length: The sequence length.
Returns:
tp: True positives (number of true sequences of event_vals predicted
with at least one predicting event_val) - scalar
fp_1: False positives type 1 (number of excess predicting event_vals
matching a true sequence of event_val in excess) - scalar
fp_2: False positives type 2 (number of predicting event_vals matching
def_val instead of event_val) - scalar
fp_3: False positives type 3 (number of predicting event_vals matching
other_vals instead of event_val) - 1D tensor with value for each
element in other_vals
fn: False negatives (number of true sequences of event_vals not matched
by at least one predicting event_val)
"""
def sequence_masks(labels, event_val, def_val, batch_size, seq_length):
"""Generate masks [labels, max_seq_count, seq_length] for all event sequences in the labels"""
# Mask non-event elements as False and event elements as True
event_mask = tf.equal(labels, event_val)
# Mask elements that are not equal to previous elements
diff_mask = tf.not_equal(event_mask[:, 1:], event_mask[:, :-1])
prev_mask = tf.concat([tf.ones_like(labels[:, :1], tf.bool), diff_mask], axis=1)
next_mask = tf.concat([diff_mask, tf.ones_like(labels[:, :1], tf.bool)], axis=1)
# Test if there are no sequences
empty = tf.equal(tf.reduce_sum(tf.cast(event_mask, tf.int32)), 0)
# Mask sequence starts and ends
seq_start_mask = tf.logical_and(prev_mask, event_mask)
seq_end_mask = tf.logical_and(next_mask, event_mask)
# Scatter seq_val
seq_count_per_batch = tf.reduce_sum(tf.cast(seq_start_mask, tf.int32), axis=[1])
max_seq_count = tf.reduce_max(seq_count_per_batch)
seq_val_idx_mask = tf.reshape(tf.sequence_mask(seq_count_per_batch, maxlen=max_seq_count), [-1])
seq_val_idx = tf.boolean_mask(tf.range(tf.size(seq_val_idx_mask)), seq_val_idx_mask)
seq_vals = tf.boolean_mask(labels, seq_start_mask)
seq_val = tf.scatter_nd(
indices=tf.expand_dims(seq_val_idx, axis=1),
updates=seq_vals,
shape=tf.shape(seq_val_idx_mask))
seq_val = tf.reshape(seq_val, [batch_size, max_seq_count])
# Set elements of seq_val that are not event_val to def_val
seq_val = tf.where(
tf.not_equal(seq_val, tf.fill(tf.shape(seq_val), event_val)),
x=tf.fill(tf.shape(seq_val), def_val), y=seq_val)
# Scatter seq_start
seq_start_idx = tf.where(seq_start_mask)[:,1]
seq_start = tf.scatter_nd(
indices=tf.expand_dims(seq_val_idx, axis=1),
updates=seq_start_idx,
shape=tf.shape(seq_val_idx_mask))
seq_start = tf.reshape(seq_start, [batch_size, max_seq_count])
# Scatter seq_end
seq_end_idx = tf.where(seq_end_mask)[:,1]
seq_end = tf.scatter_nd(
indices=tf.expand_dims(seq_val_idx, axis=1),
updates=seq_end_idx,
shape=tf.shape(seq_val_idx_mask))
seq_end = tf.reshape(seq_end, [batch_size, max_seq_count])
def batch_seq_masks(starts, ends, length, vals, def_val):
"""Return seq masks for one batch"""
def seq_mask(start, end, length, val, def_val):
"""Return one seq mask"""
return tf.concat([
tf.fill([start], def_val),
tf.fill([end-start+1], val),
tf.fill([length-end-1], def_val)], axis=0)
return tf.map_fn(
fn=lambda x: seq_mask(x[0], x[1], length, x[2], def_val),
elems=(starts, ends, vals),
dtype=tf.int32)
seq_masks = tf.cond(empty,
lambda: tf.fill([batch_size, 0, seq_length], def_val),
lambda: tf.map_fn(
fn=lambda x: batch_seq_masks(x[0], x[1], seq_length, x[2], def_val),
elems=(seq_start, seq_end, seq_val),
dtype=tf.int32))
return seq_masks, max_seq_count
labels = tf.cast(labels, dtype=tf.int32)
predictions = tf.cast(predictions, dtype=tf.int32)
def_val = tf.cast(def_val, dtype=tf.int32)
event_val = tf.cast(event_val, dtype=tf.int32)
# Dimensions
batch_size = labels.get_shape()[0]
# Compute whether labels are empty (no event_val sequences)
event_mask = tf.equal(labels, event_val)
empty = tf.equal(tf.reduce_sum(tf.cast(event_mask, tf.int32)), 0)
# Derive positive ground truth mask; reshape to [n_gt_seq, seq_length]
pos_mask, max_seq_count = sequence_masks(labels, event_val=event_val,
def_val=def_val, batch_size=batch_size, seq_length=seq_length)
pos_mask = tf.reshape(pos_mask, [-1, seq_length])
# Mask of default events
def_mask = tf.equal(labels, def_val)
# Masks for other events
other_masks = tf.map_fn(fn=lambda x: tf.equal(labels, x),
elems=tf.convert_to_tensor(other_vals, dtype=tf.int32), dtype=tf.bool)
# Retain only event_val in predictions
predictions = tf.where(
tf.not_equal(predictions, tf.fill(tf.shape(predictions), event_val)),
x=tf.fill(tf.shape(predictions), def_val), y=predictions)
# Stack predictions accordingly
pred_stacked = tf.reshape(tf.tile(tf.expand_dims(predictions, axis=1), [1, max_seq_count, 1]), [-1, seq_length])
# Remove empty masks and according preds
keep_mask = tf.greater(tf.reduce_sum(tf.cast(tf.not_equal(pos_mask, def_val), tf.int32), axis=1), 0)
pos_mask = tf.cond(empty,
lambda: pos_mask,
lambda: tf.boolean_mask(pos_mask, keep_mask))
pred_stacked = tf.cond(empty,
lambda: pred_stacked,
lambda: tf.boolean_mask(pred_stacked, keep_mask))
# Calculate number predictions per pos sequence
# Reduce predictions to elements in pos_mask that equal event_val, then count them
pred_sums = tf.map_fn(
fn=lambda x: tf.reduce_sum(tf.cast(tf.equal(tf.boolean_mask(x[0], tf.equal(x[1], event_val)), event_val), tf.int32)),
elems=(pred_stacked, pos_mask), dtype=tf.int32)
# Calculate true positive, false positive and false negative count
tp = tf.reduce_sum(tf.map_fn(lambda count: tf.cond(count > 0, lambda: 1, lambda: 0), pred_sums))
fn = tf.reduce_sum(tf.map_fn(lambda count: tf.cond(count > 0, lambda: 0, lambda: 1), pred_sums))
fp_1 = tf.cond(empty,
lambda: 0,
lambda: tf.reduce_sum(tf.map_fn(lambda count: tf.cond(count > 1, lambda: count-1, lambda: 0), pred_sums)))
# False positives of type 2 are any detections on default events
fp_2 = tf.reduce_sum(tf.cast(tf.equal(tf.boolean_mask(predictions, def_mask), event_val), tf.int32))
# False positives of type 3 are any detections on other events
fp_3 = tf.map_fn(
fn=lambda x: tf.reduce_sum(tf.cast(tf.equal(tf.boolean_mask(predictions, x), event_val), tf.int32)),
elems=other_masks, dtype=tf.int32)
tp = tf.cast(tp, tf.float32)
fp_1 = tf.cast(fp_1, tf.float32)
fp_2 = tf.cast(fp_2, tf.float32)
fp_3 = tf.cast(fp_3, tf.float32)
fn = tf.cast(fn, tf.float32)
return tp, fp_1, fp_2, fp_3, fn
| 5,339,203
|
def commit_datasource(ctx,
info,
datasource_id,
message,
schema,
orchestration_backend,
orchestration_args,
processing_backend,
processing_args,
force):
"""Creates a commit for a datasource"""
api = ce_api.DatasourcesApi(api_client(info))
if not force:
confirmation('Committing will trigger a pipeline that will create a '
'snapshot of your datasources current state. '
'This might take a while. '
'Are you sure you wish to continue?', abort=True)
# find closest, this a heavy call for now
all_ds = api_call(api.get_datasources_api_v1_datasources_get)
ds_uuid = find_closest_uuid(datasource_id, all_ds)
if schema:
try:
with open(schema, 'rt', encoding='utf8') as f:
schema_dict = yaml.load(f)
except:
error('Badly formatted YAML!')
schema_dict = dict()
else:
schema_dict = dict()
commit = api_call(
api.create_datasource_commit_api_v1_datasources_ds_id_commits_post,
DatasourceCommitCreate(
message=message,
used_schema=schema_dict,
orchestration_backend=orchestration_backend,
orchestration_args=orchestration_args,
processing_backend=processing_backend,
processing_args=processing_args,
),
ds_id=ds_uuid,
)
declare('Commit successful: {}'.format(format_uuid(commit.id)))
active_commit = '{datasource_id}:{commit_id}'.format(datasource_id=ds_uuid,
commit_id=commit.id)
user = info[constants.ACTIVE_USER]
info[user][constants.ACTIVE_DATASOURCE_COMMIT] = active_commit
info.save()
declare('Active datasource commit set to: {}'.format(
format_uuid(active_commit)))
| 5,339,204
|
def _div(v):
"""Pure spatial divergence"""
return _div_id(np.vstack((v, [np.zeros_like(v[0])])), l1_ratio=0.)
| 5,339,205
|
async def delete_share_handler(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Handle unshare-container endpoint query."""
try:
await request.app["db_conn"].delete_share(
request.match_info["owner"],
request.match_info["container"],
request.query["user"].split(","),
)
except KeyError:
# If can't find user from query, the client wants a bulk unshare
return await delete_container_shares_handler(request)
MODULE_LOGGER.log(
logging.DEBUG,
"Deleted following shared containers: %s",
str(request.match_info["container"]),
)
return aiohttp.web.Response(status=204, body="OK")
| 5,339,206
|
def iterate_news(
session: requests.Session, page: int, categories_by_id: Dict[int, str],
categories_by_slug: Dict[str, str], api_url: str
) -> Iterable[NewsItem]:
"""Fetch news items using API and iterate over them."""
params = {
'offset': 1,
'page': page
}
r = session.get(api_url, params=params)
data = r.json()
for element in data:
title_str = element['post_title']
if not isinstance(title_str, str):
raise ValueError()
name_str = element['post_name']
if not isinstance(name_str, str):
raise ValueError()
date_str = element['post_date'][:-1]
if not isinstance(date_str, str):
raise ValueError()
tags_str = element['tax']
if not isinstance(tags_str, str):
raise ValueError()
date = datetime.datetime.fromisoformat(date_str)
tags = json.loads('[' + tags_str + ']')
category_id: Optional[int] = None
category_slug: Optional[str] = None
category_title: Optional[str] = None
tag_titles: List[str] = []
for tag in tags:
if 'category' in tag:
category_id = int(tag['category']['id'])
category_slug = tag['category']['slug']
category_title = tag['category']['name']
elif 'post_tag' in tag:
tag_titles.append(tag['post_tag']['name'])
if category_id is None:
raise ValueError()
if category_slug is None:
raise ValueError()
if category_title is None:
raise ValueError()
yield NewsItem.initialize(
name_str, title_str, category_id, category_slug, category_title,
date, categories_by_id, categories_by_slug, tag_titles
)
| 5,339,207
|
def _always_run(*args, **kwargs) -> bool:
""" This returns False to indicate that the step is not already completed. """
return False
| 5,339,208
|
def generate_urls(search):
"""Generates a URLS in the correct format that brings to Google Image seearch page"""
return [(BASE_URL+quote(word)+GOOGLE_PICTURE_ID) for word in search]
| 5,339,209
|
def delete_record_from_index(person):
"""
Deletes person record from index.
Args:
person: Person who should be removed
Raises:
search.Error: An error occurred when the index name is unknown
or the query has a syntax error.
"""
doc_id = person.repo + ':' + person.record_id
person_location_index = appengine_search.Index(
name=PERSON_LOCATION_FULL_TEXT_INDEX_NAME)
person_location_index.delete(doc_id)
| 5,339,210
|
def _delete_sys_path_0():
"""Delete the first entry on `sys.path`, but only if this routine has not deleted it
already."""
global deleted_sys_path_0_value
if deleted_sys_path_0_value is None:
deleted_sys_path_0_value = sys.path[0]
del sys.path[0]
| 5,339,211
|
def batch_provider(data, batch_size, processor=None, worker_count=1, queue_size=16, report_progress=True):
""" Return an object that produces a sequence of batches from input data
Input data is split into batches of size :attr:`batch_size` which are processed with function :attr:`processor`
Data is split and processed by separate threads and dumped into a queue allowing continuous
provision of data. The main purpose of this primitive is to provide easy to use tool for parallel batch
processing/generation in background while main thread runs the main algorithm.
Batches are processed in parallel, allowing better utilization of CPU cores and disk that may improve
GPU utilization for DL tasks with Storage/IO bottleneck.
This primitive can be used in various ways. For small datasets, the input :attr:`data` list may contain actual
dataset, while :attr:`processor` function does from small to no data processing. For larger datasets, :attr:`data`
list may contain just filenames or keys while :attr:`processor` function reads data from disk or db.
There are many purposes that function :attr:`processor` can be used for, depending on your use case.
- Reading data from disk or db
- Data decoding, e.g. from JPEG.
- Augmenting data, flipping, rotating adding nose, etc.
- Concatenation of data, stacking to single ndarray, conversion to a tensor, uploading to GPU.
- Data generation.
Note:
Sequential order of batches is guaranteed only if number of workers is 1 (Default), otherwise batches might
be supplied out of order.
Args:
data (list): Input data, each entry in the list should be a separate data point.
batch_size (int): Size of a batch. If size of data is not divisible by :attr:`batch_size`, then
the last batch will have smaller size.
processor (Callable[[list], Any], optional): Function for processing batches. Receives slice of the :attr:`data`
list as input. Can return object of any type. Defaults to None.
worker_count (int, optional): Number of workers, should be greater or equal to one. To process data in parallel
and fully load CPU :attr:`worker_count` should be close to the number of CPU cores. Defaults to one.
queue_size (int, optional): Maximum size of the queue, which is number of batches to buffer. Should be larger
than :attr:`worker_count`. Typically, one would want this to be as large as possible to amortize all disk
IO and computational costs. Downside of large value is increased RAM consumption. Defaults to 16.
report_progress (bool, optional): Print a progress bar similar to `tqdm`. You still may use `tqdm` if you set
:attr:`report_progress` to False. To use `tqdm` just do
::
for x in tqdm(batch_provider(...)):
...
Defaults to True.
Returns:
Iterator: An object that produces a sequence of batches. :meth:`next()` method of the iterator will return
object that was produced by :attr:`processor` function
Raises:
StopIteration: When all data was iterated through. Stops the for loop.
Example:
::
def process(batch):
images = [misc.imread(x[0]) for x in batch]
images = np.asarray(images, dtype=np.float32)
images = images.transpose((0, 3, 1, 2))
labeles = [x[1] for x in batch]
labeles = np.asarray(labeles, np.int)
return torch.from_numpy(images) / 255.0, torch.from_numpy(labeles)
data = [('some_list.jpg', 1), ('of_filenames.jpg', 2), ('etc.jpg', 4), ...] # filenames and labels
batches = dlutils.batch_provider(data, 32, process)
for images, labeles in batches:
result = model(images)
loss = F.nll_loss(result, labeles)
loss.backward()
optimizer.step()
"""
class State:
def __init__(self):
self.current_batch = 0
self.lock = Lock()
self.data_len = len(data)
self.batch_count = self.data_len // batch_size + (1 if self.data_len % batch_size != 0 else 0)
self.quit_event = Event()
self.queue = Queue(queue_size)
self.batches_done_count = 0
self.progress_bar = None
if report_progress:
self.progress_bar = ProgressBar(self.batch_count)
def get_next_batch_it(self):
try:
self.lock.acquire()
if self.quit_event.is_set() or self.current_batch == self.batch_count:
raise StopIteration
cb = self.current_batch
self.current_batch += 1
return cb
finally:
self.lock.release()
def push_done_batch(self, batch):
try:
self.lock.acquire()
self.queue.put(batch)
self.batches_done_count += 1
finally:
self.lock.release()
def all_done(self):
return self.batches_done_count == self.batch_count and self.queue.empty()
if processor is None:
def processor(x):
return x
def _worker(state):
while not state.quit_event.is_set():
try:
cb = state.get_next_batch_it()
data_slice = data[cb * batch_size:min((cb + 1) * batch_size, state.data_len)]
b = processor(data_slice)
state.push_done_batch(b)
except StopIteration:
break
class Iterator:
def __init__(self):
self.state = State()
self.workers = []
for i in range(worker_count):
worker = Thread(target=_worker, args=(self.state, ))
worker.daemon = True
worker.start()
self.workers.append(worker)
def __len__(self):
return self.state.batch_count
def __iter__(self):
return self
def __next__(self):
if not self.state.quit_event.is_set() and not self.state.all_done():
item = self.state.queue.get()
self.state.queue.task_done()
if self.state.progress_bar is not None:
self.state.progress_bar.increment()
return item
else:
self.state.quit_event.set()
raise StopIteration
def __del__(self):
self.state.quit_event.set()
while not self.state.queue.empty():
self.state.queue.get(False)
self.state.queue.task_done()
for worker in self.workers:
worker.join()
return Iterator()
| 5,339,212
|
def simplex(key, log_L_constraint, live_points_U,
loglikelihood_from_constrained,
prior_transform, sampler_state, replace_id):
"""
Samples from the prior restricted to the likelihood constraint.
This undoes the shrinkage at each step to approximate a bound on the contours.
First it does a scaling on each dimension.
Args:
key:
log_L_constraint:
live_points_U:
loglikelihood_from_constrained:
Returns:
"""
N,D = live_points_U.shape
key, width_key = random.split(key, 2)
def body(state):
(key, i, u_test, x_test, log_L_test) = state
key, sample_key, select_key, R_key = random.split(key, 4)
i = random.randint(select_key, shape=(), minval=0, maxval=N + 1)
# M,M
R = random_ortho_matrix(R_key, D)
# initial L, R for each direction
# t_R[i] = max_(k) (points[k,j] - spawn_point_U[j]) @ R[j,i]
# t_L[i] = max_(k) (points[k,j] - spawn_point_U[j]) @ -R[j,i]
# N, M
dx = live_points_U[sampler_state.knn_indices[i, :], :] - live_points_U[i, :]
# [N, M]
t = dx @ R
# [M]
t_R = jnp.maximum(jnp.max(t, axis=0), 0.)
t_L = jnp.minimum(jnp.min(t, axis=0), 0.)
u_test = live_points_U[i,:] + R @ random.uniform(sample_key, shape=[D], minval=t_L, maxval=t_R)
u_test = jnp.clip(u_test, 0., 1.)
x_test = prior_transform(u_test)
log_L_test = loglikelihood_from_constrained(**x_test)
return (key, i + 1, u_test, x_test, log_L_test)
(key, num_likelihood_evaluations, u_new, x_new, log_L_new) = while_loop(lambda state: state[-1] <= log_L_constraint,
body,
(key, 0, live_points_U[0, :],
prior_transform(live_points_U[0, :]),
log_L_constraint))
new_dist = jnp.linalg.norm(u_new - dynamic_update_slice(live_points_U, u_new[None, :], [replace_id,0]), axis=1)
new_dist = jnp.where(new_dist == 0., jnp.inf, new_dist)
new_indices = jnp.argsort(new_dist)[:D+1]
knn_indices = dynamic_update_slice(sampler_state.knn_indices,
new_indices[None, :],
[replace_id, 0])
sampler_state = sampler_state._replace(knn_indices=knn_indices)
CubesResults = namedtuple('CubesResults',
['key', 'num_likelihood_evaluations', 'u_new', 'x_new', 'log_L_new',
'sampler_state'])
return CubesResults(key, num_likelihood_evaluations, u_new, x_new, log_L_new, sampler_state)
| 5,339,213
|
def has_anonymous_link(node, auth):
"""check if the node is anonymous to the user
:param Node node: Node which the user wants to visit
:param str link: any view-only link in the current url
:return bool anonymous: Whether the node is anonymous to the user or not
"""
if auth.private_link:
return auth.private_link.anonymous
return False
| 5,339,214
|
def lstm(c_prev, x):
"""Long Short-Term Memory units as an activation function.
This function implements LSTM units with forget gates. Let the previous
cell state :math:`c_{\\text{prev}}` and the incoming signal :math:`x`.
First, the incoming signal :math:`x` is split into four arrays
:math:`a, i, f, o` of the same shapes along the second axis.
It means that :math:`x` 's second axis must have 4 times the length of
:math:`c_{\\text{prev}}`.
The splitted input signals are corresponding to:
- :math:`a` : sources of cell input
- :math:`i` : sources of input gate
- :math:`f` : sources of forget gate
- :math:`o` : sources of output gate
Second, it computes outputs as:
.. math::
c &= \\tanh(a) \\text{sigmoid}(i)
+ c_{\\text{prev}} \\text{sigmoid}(f), \\\\
h &= \\tanh(c) \\text{sigmoid}(o).
These are returned as a tuple of two variables.
Args:
c_prev (~chainer.Variable): Variable that holds the previous cell
state. The cell state should be a zero array or the output of the
previous call of LSTM.
x (~chainer.Variable): Variable that holds the incoming signal. It must
have the second dimension four times of that of the cell state,
Returns:
tuple: Two :class:`~chainer.Variable` objects ``c`` and ``h``. ``c`` is
the updated cell state. ``h`` indicates the outgoing signal.
See the original paper proposing LSTM with forget gates:
`Long Short-Term Memory in Recurrent Neural Networks \
<http://www.felixgers.de/papers/phd.pdf>`_.
.. admonition:: Example
Assuming ``y`` is the current input signal, ``c`` is the previous cell
state, and ``h`` is the previous output signal from an ``lstm``
function. Each of ``y``, ``c`` and ``h`` has ``n_units`` channels.
Most typical preparation of ``x`` is:
>>> model = FunctionSet(w=F.Linear(n_units, 4 * n_units),
... v=F.Linear(n_units, 4 * n_units),
... ...)
>>> x = model.w(y) + model.v(h)
>>> c, h = F.lstm(c, x)
It corresponds to calculate the input sources :math:`a, i, f, o` from
the current input ``y`` and the previous output ``h``. Different
parameters are used for different kind of input sources.
"""
return LSTM()(c_prev, x)
| 5,339,215
|
def hals(video,
video_factorization,
maxiter_hals=30,
nnt=False,
verbose=False,
indent='',
device='cuda',
**kwargs):
"""Perform maxiter HALS updates To Temporal & Spatial Components
Parameter:
video: LowRankVideo class object
video_factorization: localized NMF factors
maxiter_hals: maximum number of iterations to tune hals
nnt: whether or not temporal components should be constrained to be nonnegative
verbose: whether or not print status update
indent: previous identation for printing status update
device: computation device
**kwargs: optional additional input arguments
Return:
hals iteration counter
"""
for itr in range(maxiter_hals):
if verbose:
if device=='cuda': torch.cuda.synchronize()
print(indent + '|--v HALS Iteration {:g}'.format(itr+1))
itr_t0 = time()
step_t0 = itr_t0
# Spatial Update Step
video_factorization.update_spatial(video)
if verbose:
if device=='cuda': torch.cuda.synchronize()
print(indent + '| |--> Spatial update took {:g} seconds'.format(time()-step_t0))
step_t0 = itr_t0
# Remove Empty Components
video_factorization.prune_empty_components()
video_factorization.normalize_spatial()
if verbose:
if device=='cuda': torch.cuda.synchronize()
print(indent + '| |--> Component prune after spatial update took {:g} seconds'.format(time()-step_t0))
step_t0 = itr_t0
# Temporal Update Step
video_factorization.update_temporal(video, nonnegative=nnt)
if verbose:
if device=='cuda': torch.cuda.synchronize()
print(indent + '| |--> Temporal update took {:g} seconds'.format(time()-step_t0))
print(indent + '| \'-total : {:g} seconds'.format(time()-itr_t0))
# Remove Empty Components
video_factorization.prune_empty_components()
if verbose:
if device=='cuda': torch.cuda.synchronize()
print(indent + '| |--> Component prune after temporal update took {:g} seconds'.format(time()-step_t0))
step_t0 = itr_t0
return itr + 1
| 5,339,216
|
def test_patch_array_operations_order(sut: SystemUnderTest):
"""Perform tests for Assertion.REQ_PATCH_ARRAY_OPERATIONS_ORDER."""
# TODO(bdodd): Need more thought on how to test this
| 5,339,217
|
def generate_random_sd(error, seq = None):
""" generates random sd with error% error rate
If seq is specified, random sd is generated from a substring of it."""
if seq == None:
seq1 = randSeq(rand(minLen, maxLen))
else:
length = rand(minLen, maxLen)
start = rand(0, len(seq) - length - 1)
seq1 = seq[start:start + length]
sED = rand(max(0, error - maxLED),min(maxSED, error))
seq2 = makeSmall(seq1, sED)[0]
seq2 = makeLarge(seq2, error-sED)[0]
return seq1, seq2, sED
| 5,339,218
|
def is_successful(gsm_log):
"""
Success is defined as having converged to a transition state.
"""
with open(gsm_log) as f:
for line in reversed(f.readlines()):
if '-XTS-' in line or '-TS-' in line:
return True
return False
| 5,339,219
|
def indicator(function_array_to_be_indicated, its_domain, barrier):
"""the indicator influences the function argument, not value. So here it iterates through x-domain and cuts any
values of function with an argument less than H"""
indicated = []
for index in range(len(its_domain)):
if its_domain[index] > barrier:
indicated.append(function_array_to_be_indicated[index])
else:
indicated.append(0)
return indicated
| 5,339,220
|
def test_elemwise_collapse():
""" Test when all inputs have one(and the same) broadcastable dimension """
shape = (4,5,60)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape),dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle(0,'x',1,2)
b = tcn.CudaNdarrayType((False, True, False, False))()
c = a3+b
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(shape[0],1,*shape[1:]),dtype='float32')
v=cuda_ndarray.CudaNdarray(v)
if False:
for id,n in enumerate(f.maker.env.toposort()):
print id, n
#let debugmode catch errors
out=f(v)[0]
assert numpy.allclose(out,a.reshape(shape[0],1,*shape[1:])+v)
print "Expected collapse of all dimensions"
| 5,339,221
|
def list_calendars(service):
"""
Given a google 'service' object, return a list of
calendars. Each calendar is represented by a dict, so that
it can be stored in the session object and converted to
json for cookies. The returned list is sorted to have
the primary calendar first, and selected (that is, displayed in
Google Calendars web app) calendars before unselected calendars.
"""
app.logger.debug("Entering list_calendars")
calendar_list = service.calendarList().list().execute()["items"]
result = []
for cal in calendar_list:
kind = cal["kind"]
id = cal["id"]
if "description" in cal:
desc = cal["description"]
else:
desc = "(no description)"
summary = cal["summary"]
# Optional binary attributes with False as default
selected = ("selected" in cal) and cal["selected"]
primary = ("primary" in cal) and cal["primary"]
result.append(
{ "kind": kind,
"id": id,
"summary": summary,
"selected": selected,
"primary": primary
})
return sorted(result, key=cal_sort_key)
| 5,339,222
|
def _causes_name_clash(candidate, path_list, allowed_occurences=1):
"""Determine if candidate leads to a name clash.
Args:
candidate (tuple): Tuple with parts of a path.
path_list (list): List of pathlib.Paths.
allowed_occurences (int): How often a name can occur before we call it a clash.
Returns:
bool
"""
duplicate_counter = -allowed_occurences
for path in path_list:
parts = tuple(reversed(path.parts))
if len(parts) >= len(candidate) and parts[: len(candidate)] == candidate:
duplicate_counter += 1
return duplicate_counter > 0
| 5,339,223
|
def run_eqm(results: Results, options: Options, state: PromisedObject) -> dict:
"""Run the eqm jobs."""
# set user-defined valuess
results['job_opts_eqm'] = edit_calculator_options(
options, ['eqm', 'xtpdft', 'esp2multipole'])
cmd_eqm_write = create_promise_command(
"xtp_parallel -e eqm -o {} -f {} -s 0 -j write", results['job_opts_eqm']['eqm'],
state)
results['job_setup_eqm'] = call_xtp_cmd(
cmd_eqm_write, options.scratch_dir,
expected_output={"eqm_jobs": "eqm.jobs"})
# Select the number of jobs to run based on the input provided by the user
results['job_select_eqm_jobs'] = edit_jobs_file(
results['job_setup_eqm']['eqm_jobs'],
options.eqm_jobs)
jobs_eqm = distribute_eqm_jobs(results, options, state)
# Finally move all the OR_FILES to the same folder in the scratch_dir
names = ('molecule_orb', 'dft_orb', 'mps_file')
return move_results_to_workdir(jobs_eqm, names, options.scratch_dir)
| 5,339,224
|
def main():
"""Main
"""
logging.basicConfig(level=logging.INFO)
session_pool = ph.HSessionManager.get_or_create_session_pool()
# run producer and consumer forever
session_pool.run_on_task_producer(producer)
| 5,339,225
|
def is_within_boundary(boundary_right_most_x, boundary_top_most_y,
boundary_left_most_x, boundary_bottom_most_y,
cursor):
"""
Checks if cursor is within given boundary
:param boundary_right_most_x:
:param boundary_top_most_y:
:param boundary_left_most_x:
:param boundary_bottom_most_y:
:param cursor:
:return: boolean
"""
if cursor.y < boundary_top_most_y:
return False
elif cursor.y > boundary_bottom_most_y:
return False
elif cursor.x < boundary_left_most_x:
return False
elif cursor.x > boundary_right_most_x:
return False
return True
| 5,339,226
|
def clean_novel(id_or_url: str, content_only: bool):
"""Remove accessory fields from novel
Removes all except vital information related to novel, this includes chapters, metadata, and assets.
If content_only flag is specified, only the chapter content is deleted, keeping the chapter entries as is.
"""
if content_only:
delete_downloaded_content(id_or_url)
else:
delete_associations(id_or_url)
| 5,339,227
|
def mixrng(numbytes, port='COM4'):
"""Returns bitwise xor of an inbuilt and hardware CSRNG"""
internal = os.urandom(numbytes)
external = extrng(numbytes, port)
return xorbytes(internal, external)
| 5,339,228
|
def show_quantization(X, X_c):
"""
Visualise a vector quantization.
show_quantization(X, X_c)
Inputs:
- X: numpy.ndarray containing the instances (p x 2)
- X_c: numpy.ndarray containing the centroids (q x 2)
A figure is created and the p instances are shown as dots,
whereas the q centroids are shown as diamonds.
The instances' colors show their appartenance to the centroid
of the same color
Authors: Cyril de Bodt (2016) - cyril.debodt@uclouvain.be
Antoine Vanderschueren (2019) - antoine.vanderschueren@uclouvain.be
Version: 08-10-2019
"""
# Checking the arguments
if not (isinstance(X, np.ndarray) and isinstance(X_c, np.ndarray)):
raise ValueError("""X and X_c must be numpy.ndarray""")
if not ((len(X.shape) == 2) and (len(X_c.shape) == 2)):
raise ValueError("""X and X_c must be numpy.ndarray with 2 dimensions""")
if not ((X.shape[1] == 2) and (X_c.shape[1] == 2)):
raise ValueError("""X and X_c must be numpy.ndarray with 2 dimensions and 2 columns""")
# Finding the index of the nearest centroid in X_c for each point in X
n_centroids = len(X_c)
n_points = len(X)
distances = np.sum((np.repeat(X, n_centroids, axis=0).reshape(n_points, n_centroids, 2) - X_c)**2, axis=-1)
closest = np.argmin(distances, axis=-1)
# Showing the vector quantization
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.scatter(X[:,0], X[:,1], c=closest, alpha=0.6)
plt.scatter(X_c[:,0], X_c[:,1], c=range(n_centroids), marker='d', edgecolor='black', s=150, alpha=0.8)
plt.grid()
ax.set_title("Vector quantization ("+str(n_points)+" samples - "+str(n_centroids)+" centroids)", fontsize=15)
ax.set_xlabel("$X_1$", fontsize=15)
ax.set_ylabel("$X_2$", fontsize=15)
plt.show()
plt.close()
| 5,339,229
|
async def hello(ctx: discord.ApplicationContext):
"""Say hello to the bot""" # The command description can be supplied as the docstring
await ctx.respond(f"Hello {ctx.author}!")
# Please note that you MUST respond with ctx.respond(), ctx.defer(), or any other
# interaction response within 3 seconds in your slash command code, otherwise the
# interaction will fail.
| 5,339,230
|
def write(df, filename):
"""Write a dataframe to the data directory."""
df.to_pickle(os.path.join(TEST_DATA_DIR, '{0}.pkl'.format(filename)))
| 5,339,231
|
def save_dict_to_json(json_path, save_dict):
"""
将字典保存成JSON文件
"""
json_str = json.dumps(save_dict, indent=2, ensure_ascii=False)
with open(json_path, 'w', encoding='utf-8') as json_file:
json_file.write(json_str)
| 5,339,232
|
def save_matchmaking_auth_key(auth_key: str) -> bool:
"""Register a new matchmaking auth key. !This will overwrite the existing matchmaking key for this chain!
Args:
auth_key: auth_key to add for matchmaking
Returns:
Boolean if successful
"""
try:
redis.set_sync(MATCHMAKING_KEY_LOCATION, auth_key)
return True
except Exception:
return False
| 5,339,233
|
def get_game_page(url):
"""
Get the HTML for a given URL, where the URL is a game's page in the Xbox
Store
"""
try:
response = requests.get(url)
except (requests.exceptions.MissingSchema, ConnectionError):
return None
game_page = BeautifulSoup(response.content, "html.parser")
return game_page
| 5,339,234
|
async def generate_spam_round_tx_xdrs(pool, prioritizers: List[Keypair], prioritized_builders, unprioritized_builders, rnd):
"""Generate transaction XDRs for a single spam round (ledger) according to given builders.
Some of the generated transactions are prioritized using given prioritizer seeds,
and some are unprioritized and not signed by a prioritizer account.
All prioritized transactions are expected to be included in the next ledger.
Only one out of all unprioritized transactions is expected to be included in the next ledger.
Return a metadata dictionary with the generated XDRs along with additional information.
"""
logging.info('generating transaction xdrs for round %d', rnd)
# make a cyclic list of builders.
# we will use this list to fetch a destination address for each payment tx,
# making all builders send a tx to the next builder right after them
# in line in a cyclic manner. this is done in order to cycle through
# destination addresses instead of sending call txs to a single destination
# account.
cycl = itertools.cycle(unprioritized_builders)
next(cycl) # make sure the next cycle call will return the next builder after the current one
# generate unprioritized payment transactions
# we generate them first, thus will submit them first,
# because we want to test if prioritized transactions actually get priority over them
loop = asyncio.get_running_loop()
futurs = []
for builder in unprioritized_builders:
dest_address = next(cycl).keypair.address().decode()
f = loop.run_in_executor(
pool, build_and_sign,
builder, dest_address, PAYMENT_AMOUNT, None)
futurs.append(f)
if not futurs:
raise RuntimeError('no futures to gather')
tx_metadata = {}
for tx_hash, tx_xdr in await asyncio.gather(*futurs):
tx_metadata[tx_hash] = {'round': rnd, 'prioritized': False, 'xdr': tx_xdr}
# generate prioritized transactions
futurs = []
cycl = itertools.cycle(prioritized_builders)
for builder, prioritizer in zip(prioritized_builders, prioritizers):
dest_address = next(cycl).keypair.address().decode()
f = loop.run_in_executor(
pool, build_and_sign,
builder, dest_address, PAYMENT_AMOUNT, prioritizer.secret_seed)
futurs.append(f)
if not futurs:
raise RuntimeError('no futures to gather')
for tx_hash, tx_xdr in await asyncio.gather(*futurs):
tx_metadata[tx_hash] = {'round': rnd, 'prioritized': True, 'xdr': tx_xdr}
return tx_metadata
| 5,339,235
|
async def cleanup_device_registry(
hass: HomeAssistant, device_manager: TuyaDeviceManager
) -> None:
"""Remove deleted device registry entry if there are no remaining entities."""
device_registry = dr.async_get(hass)
for dev_id, device_entry in list(device_registry.devices.items()):
for item in device_entry.identifiers:
if DOMAIN == item[0] and item[1] not in device_manager.device_map:
device_registry.async_remove_device(dev_id)
break
| 5,339,236
|
def test_unsupported_upstream_entity_type():
"""
Checks to see how invalid types work in the upstream node.
If validation is working correctly, it should raise a ConfigurationError
"""
with pytest.raises(ConfigurationError):
unsupported_upstream_entity_type_mcp()
| 5,339,237
|
def test_decode_open_order_account_layout():
"""Test decode event queue."""
with open(OPEN_ORDER_ACCOUNT_BIN_PATH, "r") as input_file:
base64_res = input_file.read()
data = base64.decodebytes(base64_res.encode("ascii"))
open_order_account = OPEN_ORDERS_LAYOUT.parse(data)
assert open_order_account.account_flags.open_orders
assert open_order_account.account_flags.initialized
assert PublicKey(open_order_account.market) == PublicKey("4r5Bw3HxmxAzPQ2ATUvgF2nFe3B6G1Z2Nq2Nwu77wWc2")
assert PublicKey(open_order_account.owner) == PublicKey("7hJx7QMiVfjZSSADQ18oNKzqifJPMu18djYLkh4aYh5Q")
# if there is no order the byte returned here will be all 0. In this case we have three orders.
assert len([order for order in open_order_account.orders if int.from_bytes(order, "little") != 0]) == 3
# the first three order are bid order
assert int.from_bytes(open_order_account.is_bid_bits, "little") == 0b111
| 5,339,238
|
def json_custom_parser(obj):
"""
A custom json parser to handle json.dumps calls properly for Decimal and Datetime data types.
"""
if isinstance(obj, Decimal):
return float(obj)
elif not isinstance(obj, basestring) and isinstance(obj, collections.Iterable):
return list(obj)
elif isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date):
dot_ix = 19 # 'YYYY-MM-DDTHH:MM:SS.mmmmmm+HH:MM'.find('.')
return obj.isoformat()[:dot_ix]
else:
raise TypeError(obj)
| 5,339,239
|
def _renew_re_entrant_lock(conn: Redis, lock_name: str, identifier: str, lock_timeout: int):
"""
为锁续约
:param conn:redis链接
:param lock_name:lock_key
:param identifier:lock_唯一id
:param lock_timeout: 上锁超时时间ms
:return:
"""
while _renew_re_entrant_lock_lua(conn, [lock_name], [lock_timeout, identifier],force_eval=True):
# 若未续约成功则直接退出
time.sleep(lock_timeout / 3000)
| 5,339,240
|
def process(arguments: list = None) -> str:
"""
Run the process
Parameters
----------
arguments : list
Injectable arguments to execute
Returns
-------
str
The name of the library command to execute
"""
if not arguments:
arguments = []
__prepare()
args = __parse_arguments(arguments if arguments else sys.argv[1:])
if not __validate_arguments(args):
print_error("Argument combination not valid!")
sys.exit(1)
__check_devices(args)
return __dispatch_command(args)
| 5,339,241
|
def fs_open(path, flag, mode=default_file_mode):
"""
Open a file, potentially creating it. Return the new fd's id or else -1 if
file can not be opened (or potentially created)
"""
# Check if file should be created if it doesn't exist
O_CREAT = 64
create = flag & 64
# If requested, try to create the file
if create:
try:
filesys.add_file(path, mode, 0)
except AlreadyExistsError:
# File may already exist, which is ok with O_CREAT
pass
except Exception:
return -1
# Call the virtual fs to open the file
try:
inodeid = filesys.open_file(path)
except DoesNotExistError:
return -1
# Add an fd for this file to the open files state
return fstate.create_fd(inodeid)
| 5,339,242
|
def printMat(inMat, thresh=0.8):
"""
打印矩阵
"""
for i in range(32):
for k in range(32):
if float(inMat[i, k]) > thresh:
print(1,end='')
else:
print(0,end='')
print('')
| 5,339,243
|
def new_user():
"""
Create Instance of User class to be used by the module
"""
user_details = ['Daudi', 'Jesee', 'dj@mail.com', 'password']
user = Users(user_details)
return user
| 5,339,244
|
def normalise_angle(angle: float) -> float:
"""Normalises the angle in the range (-pi, pi].
args:
angle (rad): The angle to normalise.
return:
angle (rad): The normalised angle.
"""
while angle > math.pi:
angle -= 2 * math.pi
while angle <= -math.pi:
angle += 2 * math.pi
return angle
| 5,339,245
|
def test_constraints1():
"""
simplest senario
"""
pm = ParameterHelper(
species=["O", "C", "H"],
kernels={
"twobody": [["*", "*"], ["O", "O"]],
"threebody": [["*", "*", "*"], ["O", "O", "O"]],
},
parameters={
"twobody0": [1, 0.5],
"twobody1": [2, 0.2],
"threebody0": [1, 0.5],
"threebody1": [2, 0.2],
"cutoff_twobody": 2,
"cutoff_threebody": 1,
},
constraints={
"twobody0": [True, False],
"threebody0": [False, True],
"noise": False,
},
verbose="DEBUG",
)
hm = pm.as_dict()
Parameters.check_instantiation(hm["hyps"], hm["cutoffs"], hm["kernels"], hm)
assert hm["train_noise"] is False
hyps = hm["hyps"]
assert len(hyps) == 6
assert hyps[0] == 1
assert hyps[1] == 2
assert hyps[2] == 0.2
assert hyps[3] == 2
assert hyps[4] == 0.5
assert hyps[5] == 0.2
| 5,339,246
|
def get_hdf_len(*path):
"""
Returns the number of rows in an hdf file as an int.
"""
path = construct_path(*path)
with pd.HDFStore(path) as store:
numrows = store.get_storer('data').nrows
return numrows
| 5,339,247
|
def test_fastparcel_water_content_scalar():
"""Test FastParcel.water_content for scalar input."""
height = 2000*units.meter
z_init = 3000*units.meter
q_initial = 0.004751707262581661*units.dimensionless
l_initial = 2e-3*units.dimensionless
rate = 0.5/units.km
actual = sydneyfast.water_content(
z_init, q_initial, l_initial, rate)(height)
truth = 0.00450332*units.dimensionless
assert_almost_equal(actual, truth, 6)
assert not hasattr(actual, 'size')
| 5,339,248
|
def matmul(A, B, transpose_A=False, transpose_B=False, master='/gpu:0'):
"""
distributed matrix multiplication.
A: DistMat,
B: single tensor or a list of tensors.
Note: returns a single tensor or a list of tensors, Not a DistMat.
"""
if isinstance(A, tf.Tensor) or isinstance(A, tf.Variable):
if isinstance(B, tf.Tensor) or isinstance(B, tf.Variable):
return tf.matmul(A, B)
else:
raise NotImplementedError
if transpose_B:
raise NotImplementedError
else:
if transpose_A: # distributed dim is inner axis
if isinstance(B, tf.Tensor) or isinstance(B, tf.Variable):
# broadcast
partial_sums = []
for i, t in enumerate(A.tensors):
with tf.device(t.device):
partial_sums.append(tf.matmul(t, B[A.partition[i]:A.partition[i+1],:], transpose_a=True))
with tf.device(master):
return tf.add_n(partial_sums)
else:
partial_sums = []
for t_A, t_B in zip(A.tensors, B.tensors):
#print(t_A.device)
#print(t_B.device)
#assert t_A.device == t_B.device
with tf.device(t_A.device):
partial_sums.append(tf.matmul(t_A, t_B, transpose_a=True))
with tf.device(master):
return tf.add_n(partial_sums)
# distributed computation necessary
#return tf.add_n([tf.matmul(Apart, Bpart) for Apart, Bpart in zip(A.tensors, B.tensors)])
else: # non-distributed dim is inner axis. merely broacast B.
if isinstance(B, tf.Tensor) or isinstance(B, tf.Variable):
slices = []
for t in A.tensors:
with tf.device(t.device):
slices.append(tf.matmul(t, B))
return distmat.DistMat(slices)
else:
raise NotImplementedError
| 5,339,249
|
def celerybeat_started():
"""
Returns true/false depending on whether the celerybeat service is started or not
"""
if is_systemd():
running = 'active' in fabric.api.sudo('systemctl is-active %s' % celerybeat_service_name())
return running
return fabtools.service.is_running(celerybeat_service_name())
| 5,339,250
|
def highest_greedy_score(board, disks):
"""
Compute the highest possible score that can be obtained by dropping each
of the given disks on the given board in a greedy way.
- The disks must be dropped in the order in which they appear in the
given list of disks. Each disk is dropped in the best column as
computed by the function best_drop_for_disk.
- Upon exit from the function, the board reflects the state obtained from
dropping the disks. If not all the given disks can be dropped because
the board gets completely filled, the function only drops the disks it can
drop.
- The function returns a tuple of (1) the highest score followed by (2) a tuple
of columns in which the successive disks have been dropped.
- Upon return, the given list of disks only stores disks that have not been
dropped on the board.
- The function will not take into account possible raises of level while
dropping disks, i.e. the resulting score only reflects scores obtained
from dropping disks as computed by the function drop_disk_at.
- This function must be implemented in a RECURSIVE way.
ASSUMPTIONS
- The given board is a playable board, and each of the given disks is a
proper disk for the given board.
- None of the given disks is cracked.
"""
score = 0
columns = ()
if len(disks) == 0 or Board.is_full(board): # No more disks to drop
return score, columns
else:
disk_to_drop = disks[0]
column_best_drop, score_best_drop = best_drop_for_disk(board, disk_to_drop)
del disks[0]
score, columns = highest_greedy_score(board, disks)
columns = (column_best_drop,) + columns
score += score_best_drop
return score, columns
| 5,339,251
|
def normalize_and_discard(df: pd.DataFrame) -> pd.DataFrame:
"""
Normalize numeric values between 0 and 1 and discard records that are out of bounds.
"""
# ## 2. Discard values out of range of x and y
df_cleaned = df[(df.x >= 0) & (df.x <= 120) & (df.y >= 0) & (df.y <= (160 / 3))]
print(f'Shape difference {df.shape[0] - df_cleaned.shape[0]}')
# ## 3. Normalize x, y , s, a, dis, o, dir on scale 0-1
# thresholds are determined by examining data from all weeks
df_cleaned.x = df_cleaned.x / df.x.max()
df_cleaned.y = df_cleaned.y / df.y.max()
df_cleaned.s = df_cleaned.s / SPEED_MAX_THRESHOLD
df_cleaned.a = df_cleaned.a / ACCELERATION_MAX_THRESHOLD
df_cleaned.dis = df_cleaned.dis / DISTANCE_MAX_THRESHOLD
df_cleaned.o = df_cleaned.o / 360
df_cleaned.dir = df_cleaned.dir / 360
df_n2 = df_cleaned[[
'time', 'x', 'y', 's', 'a', 'dis', 'o', 'dir', 'event', 'frameId', 'team', 'gameId',
'playId', 'quarter', 'homeHasPossession',
'down', 'playType', 'defendersInTheBox',
'numberOfPassRushers', 'passResult', 'isDefensivePI'
]]
df_n2.quarter /= 5.0 # max quarters
df_n2.down /= 4.0 # max quarters
df_n2.defendersInTheBox /= 11.0
df_n2.numberOfPassRushers /= 11.0
return df_n2
| 5,339,252
|
def gravatar_url(email, size=16):
"""Return the gravatar image for the given email address."""
return 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \
(md5(email.strip().lower().encode('utf-8')).hexdigest(), size)
| 5,339,253
|
def x2bin(v):
"""
convert a value into a binary string
v: int, bytes, bytearray
bytes, bytearray must be in *big* endian.
"""
if isinstance(v, int):
bits = bin(v)
size = 8
elif isinstance(v, (bytes,bytearray)):
bits = bin(int.from_bytes(v, "big"))
size = len(v)*8
return bits[2:].zfill(size)
| 5,339,254
|
def run_server():
"""
This is the main runtime function of the server.
It creates a server socket and waits for a connection.
According to the received message, it will either send experiment values or receive experiment results.
In the case of experiment results retrieving, this function will download the file, make a verification of the downloaded file
by using md5sum, then will unzip the .tar.gz and then will create the coresponding graphs out the data received, by using a child process
for that time costly grpah creation operation procedure.
The jobs(experiment values) are in exp_values_together list and are pop-ing out to clients and to exp_values_pending.
In case of md5sum match, the values are getting removed from exp_values_pending.
In case of md5sum mismatch, the values are getting back in queue, with priority number one
"""
init_progress_file()
global exp_values_together
global exp_values_pending
global address_port
## print("DEBUG: " + str(exp_values_together))
# for el in exp_values_together: # debug
# print(el) # debug
print("INFO: address_port<" + str(address_port) + ">")
try:
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # create a socket object
except socket.error as serr:
print("ERROR: Error while trying to create socket object")
try:
serversocket.bind((str(address_port[0]), int(address_port[1]))) # bind to the port
except socket.error as error:
print("ERROR: Error while creating socket:<" + str(error) + ">")
if re.search("Errno 98", str(error)):
my_regex = r"([0-9]{1,3}\.?){1,4}:[0-9]{1,5}" # Matches IPv4:PORT
address_port = input("Please provide a diferrent <IP:PORT> pair$")
while not re.search(my_regex, address_port):
input("ERROR: There is something wrong with IP:PORT input")
address_port = input("Please provide a diferrent <IP:PORT> pair$")
address_port = address_port.split(':')
run_server_loop()
serversocket.listen() # act like host
my_inactive_dict = []
run_java_tracking() # runs java gui
# while exp_values_together or exp_values_pending:
while True:
if not exp_values_together and not exp_values_pending:
break
#print("DEBUG: exp_values_together[0] is:<" + exp_values_together[0] + ">")
with open(sys_live_track_file, 'r') as file:
fileContent = file.readlines()
for d in fileContent:
if d[0] == "#": # if its a comment line proceed.
continue
elif len(d) > 2: # if its smaller than two, its a space line.
keyvalue = d.split(":") # keyvalue[0] has the 1@rps100,nmMem10240,.... and keyvalue[1] has the status
keyvalue[1] = keyvalue[1].strip()
#print("DEBUG DEUBG: keyvalue[1] is:<" + str(keyvalue[1]) + ">")
if keyvalue[1] == "Fail":
if keyvalue[0] in my_inactive_dict: # if it is already back to queue
continue
else:
#print("DEBUG DEBUG: exp_values_together0 is <" + str(exp_values_together[0]) + ">")
my_inactive_dict.append(keyvalue[0]) # check it as added to queue
values = keyvalue[0].split('@')
values[1] = values[1].strip()
exp_values_together.insert(0, values[1]) # put it back at the top of the queue
exp_values_pending.remove(values[1]) # remove it from the pending experiments
#print("DEBUG DEBUG: values[1] is <" + str(values[1]) + ">")
#print("DEBUG DEBUG: exp_values_together0 is <" + str(exp_values_together[0]) + ">")
print("INFO: Waiting for connection ==============>")
clientsocket, addr = serversocket.accept() # Establish a connection. Blocking function
print("INFO: Got a connection from %s" % str(addr))
msg_rcv = clientsocket.recv(16) # Status of worker (GiveMeWork or ResultsIncoming)
print("INFO: Message received: <" + str(msg_rcv.decode('ascii')) + ">")
if re.search("GiveMeWork", msg_rcv.decode('ascii')) and exp_values_together: # if true, worker is waiting for job
exp_values_pending.append(exp_values_together[0])
msg = "SLS_Experiment_On_Values:" + str(exp_values_together.pop(0)) # 25
try:
clientsocket.send(msg.encode('ascii'))
print("INFO: Successfully send experiment values to client")
except socket.error as serr:
print("ERROR: Error sending Experiment Values: <" + str(serr) + ">")
elif re.search("ResultsIncoming", msg_rcv.decode('ascii')):
try:
print("INFO: About to send Ack")
clientsocket.send("Ack".encode('ascii'))
print("INFO: Ack sent!")
except socket.error as serr:
print("ERROR: Error sending Ack: <" + str(serr) + ">")
try:
print("INFO: About to receive file name and md5sum")
msg_rcv = clientsocket.recv(1024)
print("INFO: Received file and md5sum:<" + str(msg_rcv.decode('ascii')) + ">")
except socket.error as serr:
print("ERROR: Error receiving file name and md5sum: <" + str(serr) + ">")
msg_rcv = msg_rcv.decode('ascii')
msg_rcv = msg_rcv.split(",")
msg_rcv[0] = re.sub(r'^(.*:)', '', msg_rcv[0]) # Transform SendingFile: <file>, into <file>
msg_rcv[1] = re.sub(r'^(.*:)', '', msg_rcv[1]) # Transform WithMD5Hash: <hash>, into <hash>
print("INFO: About to receive file:<" + str(msg_rcv[0]) + ">")
print("INFO: ...with md5sum of:<" + str(msg_rcv[1]) + ">")
try:
os.makedirs(sys_output_directory + msg_rcv[0] + "/")
except OSError as oser:
print("TERMINAL ERROR: Cannot create directory:\n" + str(oser))
downloaded_file = open(sys_output_directory + msg_rcv[0] + "/" + msg_rcv[0] + ".tar.gz", "wb")
try:
print("INFO: About to send StartTransfer")
clientsocket.send("StartTransfer".encode('ascii'))
print("INFO: Successfully send StartTransfer")
except socket.error as serr:
print("ERROR: Error while sending StartTransfer acknowledgment")
print("=========================")
print("INFO: About to start download")
transfered_data = clientsocket.recv(1024)
while transfered_data:
downloaded_file.write(transfered_data)
transfered_data = clientsocket.recv(1024)
downloaded_file.close()
print("INFO: Downloaded file successfully!")
print("=========================")
process = subprocess.Popen("md5sum " + sys_output_directory + msg_rcv[0] + "/" + msg_rcv[0] + ".tar.gz",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
results = str(stdout)
print("INFO: md5sum of sent folder at localhost is:<" + results[:34] + ">") # md5 is first 34 digits
print("INFO: md5sum of sent folder at client is:<" + str(msg_rcv[1]) + ">")
remove_value = msg_rcv[0].replace("_", ",")
if results[:30] == msg_rcv[1][:30]: # Comparing the first 30 characters of md5sums. Rest 4 digits are not taken into consideration because of possible loss of them in the network buffer
print("INFO: md5sum match! Experiment completed successfully!")
try:
exp_values_pending.remove(remove_value)
print("INFO: Removed values from the pending list")
print("INFO: Starting automatic creation of graphs")
unzip_process = subprocess.Popen("tar -zxvf " + sys_output_directory + msg_rcv[0] + "/" + msg_rcv[0] + ".tar.gz -C " +
sys_output_directory + msg_rcv[0],
shell=True,
stdout=subprocess.PIPE)
unzip_process.wait()
another_child = os.fork()
if another_child == 0: # child code
os.chdir(sys_output_directory + msg_rcv[0] + "/")
create_graphs(sys_output_directory + msg_rcv[0] + "/")
sys.exit(1)
except ValueError as verr:
print("ERROR: Error removing values from the pending list:" + str(verr) + "")
else:
print("ERROR: md5sum mismatch! Values for this experiment will be assigned to a client again")
exp_values_together.insert(0, remove_value)
exp_values_pending.remove(remove_value)
clientsocket.close()
| 5,339,255
|
def test_ll3_access_clearing_manager(test_client,test_single_sample_request_nonadmin,test_single_sample_request_ahoag,test_login_ll3):
""" Test that Laura (ll3, a clearing admin) can access the clearing task manager
and see entries made by multiple users """
response = test_client.get(url_for('clearing.clearing_manager')
, follow_redirects=True)
assert b'Clearing management GUI' in response.data
assert b'admin_request' in response.data
assert b'nonadmin_request' in response.data
| 5,339,256
|
def upload_to_remote(local_path):
"""upload data onto S3"""
remote_key_name = local_path.replace(root + '/./', '')
s3.meta.client.upload_file(Filename=local_path,
Bucket=BUCKETNAME,
Key=remote_key_name)
| 5,339,257
|
def normalize_url(url):
"""Function to normalize the url. It will be used as document id value.
Returns:
the normalized url string.
"""
norm_url = re.sub(r'http://', '', url)
norm_url = re.sub(r'https://', '', norm_url)
norm_url = re.sub(r'/', '__', norm_url)
return norm_url
| 5,339,258
|
def test_bubble_sort_empty_list():
"""Test that bbs properly handles an empty list."""
assert bubble_sort([]) == []
| 5,339,259
|
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Validates if filesystem has remote protocol.
Args:
fs (``fsspec.spec.AbstractFileSystem``): An abstract super-class for pythonic file-systems, e.g. :code:`fsspec.filesystem(\'file\')` or :class:`datasets.filesystems.S3FileSystem`
"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
| 5,339,260
|
def remove_gate(gate: List[Block], screen: pygame.Surface) -> None:
"""
After the snake has passed the gate, remove it
Args:
gate (List[Block]): The gate
screen (pygame.Surface): The screen to remove the gate from
Returns:
None
"""
update_rects = []
for block in gate:
update_rects.append(erase_block(block, screen))
pygame.display.update(update_rects)
| 5,339,261
|
def get_element_attribute_or_empty(element, attribute_name):
"""
Args:
element (element): The xib's element.
attribute_name (str): The desired attribute's name.
Returns:
The attribute's value, or an empty str if none exists.
"""
return element.attributes[attribute_name].value if element.hasAttribute(attribute_name) else ""
| 5,339,262
|
def Matcher(y_true, y_pred_logits, y_pred_bbox):
"""
y_true: GT list of len batch with each element is an array of
shape (n_gt_objects, 5) ; n_gt_objects are number of
objects in that image sample and 5 -> (cx,cy,w,h,class_label)
where cordinates are in [0,1] range
y_pred_logits: model output of shape (batch, num_queries, classes)
y_pred_bbox: model output of shape (batch, num_queries, 4) in [0,1] range -> cx,cy,w,h
"""
y_pred_bbox = y_pred_bbox.numpy()
out_loss = 0
batch = len(y_true)
b,num_queries,_ = y_pred_logits.shape
assert b == batch, 'Batch mismatch!!'
batch_query_indices = []
y_pred_logits = tf.math.softmax(y_pred_logits).numpy()
for i in range(batch):
out_cls_loss = -y_pred_logits[i][:,(y_true[i][:,-1]).astype(int)]
out_cdist = distance.cdist(y_pred_bbox[i], y_true[i][:,:4], 'euclidean')
out_iou = []
for j in range(len(y_true[i])):
giou = tfa.losses.giou_loss(cxcywh_to_xyxy(y_pred_bbox[i]), cxcywh_to_xyxy(y_true[i][j,:4][np.newaxis,:]))
out_iou.append(giou)
out_iou = -np.array(out_iou).transpose(1,0)
comb_loss = out_cls_loss + out_cdist + out_iou
row_ind, col_ind = linear_sum_assignment(comb_loss)
batch_query_indices.append((row_ind,col_ind))
return batch_query_indices
| 5,339,263
|
def diff_runs(ctx, args):
"""Diff two runs.
If `RUN1` and `RUN2` are omitted, the latest two filtered runs are
diffed. See FILTERING topics below for details on filtering runs
to diff.
If `RUN1` or `RUN2` is specified, both must be specified.
{{ runs_support.op_and_label_filters }}
{{ runs_support.status_filters }}
### Diff command
By default the ``diff`` program is used to diff run details. An
alternative default command may be specified in
``~/.guild/config.yml`` using the ``command`` attribute of the
``diff`` section.
To use a specific diff program with the command, use `--cmd`.
"""
from . import diff_impl
diff_impl.main(args, ctx)
| 5,339,264
|
def set_test_variables():
"""
Sets up variables for the unit tests below.
:return: dictionary of test input variables for the unit tests.
"""
test_variables = {
"asl_valid_full": os.path.join(
SRC_ROOT, "resources/schemas/tests_jsons/asl_valid/test_asl_schema001.json"
),
"asl_valid_absent_conditional_field": os.path.join(
SRC_ROOT, "resources/schemas/tests_jsons/asl_valid/test_asl_schema002.json"
),
"asl_valid_labeling_duration_array": os.path.join(
SRC_ROOT, "resources/schemas/tests_jsons/asl_valid/test_asl_schema003.json"
),
"asl_schema": os.path.join(SRC_ROOT, "resources/schemas/asl_bids_schema.json"),
}
return test_variables
| 5,339,265
|
def _addBindInput(self, name, type = DEFAULT_TYPE_STRING):
"""(Deprecated) Add a BindInput to this shader reference."""
warnings.warn("This function is deprecated; shader references have been replaced with shader nodes in 1.38.", DeprecationWarning, stacklevel = 2)
return self.addInput(name, type)
| 5,339,266
|
def process_days_step(message):
"""Update period field of film object with given data."""
chat_id = message.chat.id
film = film_dict[chat_id]
try:
if message.text == 'Сьогодні':
film.period = film.today
elif message.text == 'Завтра':
film.period = film.today + timedelta(days=1)
else:
day, month, *other = re.split('[.,]', message.text)
film.period = datetime(film.today.year, int(month), int(day))
bot.send_chat_action(chat_id, 'typing')
session_data = get_films_data(film)
send_session_data(chat_id, session_data)
except Exception as e:
log_uncorrect_messages(message.from_user, 'EXC: days step')
hide_markup = types.ReplyKeyboardRemove()
bot.reply_to(message, 'Упс..Щось пішло не так. Cпробуй ще раз (/start).', reply_markup=hide_markup)
log_request_messages(message.from_user, film)
hide_markup = types.ReplyKeyboardRemove()
bot.send_message(message.from_user.id,
'\U0001F3A5 Гарного перегляду!',
reply_markup=hide_markup)
| 5,339,267
|
def test_get_html_page_invalid_content_type(
mock_raise_for_status: mock.Mock,
caplog: pytest.LogCaptureFixture,
content_type: str,
) -> None:
"""`_get_html_page()` should warn if an invalid content-type is given.
Only text/html is allowed.
"""
caplog.set_level(logging.DEBUG)
url = "https://pypi.org/simple/pip"
link = Link(url)
session = mock.Mock(PipSession)
session.get.return_value = mock.Mock(
**{
"request.method": "GET",
"headers": {"Content-Type": content_type},
}
)
assert _get_html_page(link, session=session) is None
mock_raise_for_status.assert_called_once_with(session.get.return_value)
assert (
"pip._internal.index.collector",
logging.WARNING,
"Skipping page {} because the GET request got Content-Type: {}."
"The only supported Content-Type is text/html".format(url, content_type),
) in caplog.record_tuples
| 5,339,268
|
def sf(x, c, d, scale):
"""
Survival function of the Burr type XII distribution.
"""
_validate_params(c, d, scale)
with mpmath.extradps(5):
x = mpmath.mpf(x)
c = mpmath.mpf(c)
d = mpmath.mpf(d)
scale = mpmath.mpf(scale)
if x < 0:
return mpmath.mp.one
return (1 + (x/scale)**c)**(-d)
| 5,339,269
|
def substitute_word(text):
"""
word subsitution to make it consistent
"""
words = text.split(" ")
preprocessed = []
for w in words:
substitution = ""
if w == "mister":
substitution = "mr"
elif w == "missus":
substitution = "mrs"
else:
substitution = w
preprocessed.append(substitution)
return " ".join(preprocessed)
| 5,339,270
|
def google_wiki(keyword, langid='en', js={}):
"""Google query targets, output if English wikipedia entry is found"""
targets = []
headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:32.0) Gecko/20100101 Firefox/32.0',}
googlerx = re.compile('(http[s]?[^\&]*)') # /url?q=https://fr.wikipedia.org/wiki/La_Banque_postale&sa=U&ei=Zn...
infoboxrx = re.compile('infobox')
domainrx = re.compile('^[a-zA-Z\-]+\.([a-zA-Z\-]+\.)*[a-zA-Z\-]+$')
# query = 'http://www.google.com/search?q=wikipedia%20{}%20{}'.format(langid, keyword)
query = 'http://www.google.com/search?q=wikipedia%20{}'.format(keyword)
r = requests.get(query, headers=headers)
soup = BeautifulSoup(r.content)
keywords = extract_keywords(js)
# phish_tokens = set([word for li in keywords for word in li])
# print(phish_tokens)
for a in soup.find_all('a'):
search = googlerx.search(a.get('href', ''))
if not search:
continue
url = search.groups()[0]
mld, rd = registered_domain(url)
if rd == 'wikipedia.org' and '#' not in url:
# if '.wikipedia.org' in url and '#' not in url:
# if url.startswith('https://{}.wikipedia.org'.format(langid)) and '#' not in url:
wikiurl = url
r = requests.get(url)
html = str(r.content)
wikisoup = BeautifulSoup(r.content)
title = wikisoup.find(id="firstHeading")
title = title.text
if not title or keyword not in title.lower():
continue
print(wikiurl)
infobox = wikisoup.find(class_=infoboxrx)
if infobox:
for anchor in infobox.find_all('a'):
if 'href' in anchor.attrs:
targeturl = anchor['href']
# is the link internal
if targeturl.startswith('/'):
continue
reg_domain = registered_domain(targeturl)[1]
if reg_domain:
t = (title, reg_domain, wikiurl)
print(reg_domain)
targets.append(t)
external_links = wikisoup.find_all('a', class_="external text")
external_domains = set()
for anchor in external_links.find_all('a'):
if 'href' in anchor.attrs:
targeturl = anchor['href']
# is the link internal
if targeturl.startswith('/'):
continue
reg_domain = registered_domain(targeturl)[1]
if reg_domain:
external_domains.add((title, reg_domain, wiki_url))
return targets, sorted(external_domains)
| 5,339,271
|
def _get_test_words() -> WordDict:
"""
>>> _get_test_words()['?og']
['dog', 'log']
"""
from .compile_words import read_words
dir_path = os.path.dirname(os.path.realpath(__file__))
return read_words(os.path.join(dir_path, 'test_data.txt'))
| 5,339,272
|
def escape(line, chars):
"""Escapes characters 'chars' with '\\' in 'line'."""
def esc_one_char(ch):
if ch in chars:
return "\\" + ch
else:
return ch
return u"".join([esc_one_char(ch) for ch in line])
| 5,339,273
|
def polinomsuzIntegralHesapla(veriler):
"""
Gelen verileri kullanarak integral hesaplar.
:param veriler: İntegrali hesaplanacak veriler. Liste tipinde olmalı.
"""
a,b=5,len(veriler)
deltax = 1
integral = 0
n = int((b - a) / deltax)
for i in range(n-1):
integral += deltax * (veriler[a] + veriler[a+deltax]) / 2
a += deltax
return integral
| 5,339,274
|
def _keep_extensions(files, extension):
""" Filters by file extension, this can be more than the extension!
E.g. .png is the extension, gray.png is a possible extension"""
if isinstance(extension, str):
extension = [extension]
def one_equal_extension(some_string, extension_list):
return any([some_string.endswith(one_extension) for one_extension in extension_list])
return list(filter(lambda x: one_equal_extension(x, extension), files))
| 5,339,275
|
def make_all_rules(
schema: "BaseOpenAPISchema", bundles: Dict[str, CaseInsensitiveDict], connections: EndpointConnections
) -> Dict[str, Rule]:
"""Create rules for all endpoints, based on the provided connections."""
return {
f"rule {endpoint.verbose_name}": make_rule(
endpoint, bundles[endpoint.path][endpoint.method.upper()], connections
)
for endpoint in schema.get_all_endpoints()
}
| 5,339,276
|
async def test_duplicate_error(hass):
"""Test that errors are shown when duplicate entries are added."""
conf = {CONF_IP_ADDRESS: "192.168.1.100", CONF_PORT: 7777}
MockConfigEntry(domain=DOMAIN, unique_id="192.168.1.100", data=conf).add_to_hass(
hass
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
| 5,339,277
|
def audio_to_magnitude_db_and_phase(n_fft, hop_length_fft, audio):
"""This function takes an audio and convert into spectrogram,
it returns the magnitude in dB and the phase"""
stftaudio = librosa.stft(audio, n_fft=n_fft, hop_length=hop_length_fft)
stftaudio_magnitude, stftaudio_phase = librosa.magphase(stftaudio)
stftaudio_magnitude_db = librosa.amplitude_to_db(
stftaudio_magnitude, ref=np.max)
return stftaudio_magnitude_db, stftaudio_phase
| 5,339,278
|
def check_racs_exists(base_dir: str) -> bool:
"""
Check if RACS directory exists
Args:
base_dir: Path to base directory
Returns:
True if exists, False otherwise.
"""
return os.path.isdir(os.path.join(base_dir, "EPOCH00"))
| 5,339,279
|
def has_rc_object(rc_file, name):
"""
Read keys and values corresponding to one settings location
to the qutiprc file.
Parameters
----------
rc_file : str
String specifying file location.
section : str
Tags for the saved data.
"""
config = ConfigParser()
try:
config.read(_full_path(rc_file))
except (MissingSectionHeaderError, ParsingError):
return False
return section in config
| 5,339,280
|
def apply_transform_test(batch_size, image_data_dir, tensor_data_dir, limited_num = None, shuffle_seed = 123, dataset = None):
"""
"""
std = [1.0, 1.0, 1.0]
mean = [0.0, 0.0, 0.0]
# if dataset is None:
# std = [1.0, 1.0, 1.0]
# mean = [0.0, 0.0, 0.0]
# elif dataset == "cifar10":
# std = [0.247, 0.243, 0.261]
# mean = [0.4914, 0.4822, 0.4465]
# elif dataset == "cifar100":
# std = [0.2673342858792401, 0.2564384629170883, 0.27615047132568404]
# mean = [0.5070751592371323, 0.48654887331495095, 0.4409178433670343]
# elif dataset == "imagenet":
# std = [0.229, 0.224, 0.225]
# mean = [0.485, 0.456, 0.406]
# elif dataset == "facescrub":
# std = [0.5, 0.5, 0.5]
# mean = [0.5, 0.5, 0.5]
trainTransform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean, std)
])
dataset = ImageTensorFolder(img_path=image_data_dir, tensor_path=tensor_data_dir, label_path=tensor_data_dir,
img_fmt="jpg", tns_fmt="pt", lbl_fmt="label", transform=trainTransform, limited_num = limited_num)
# dataset_size = len(dataset)
# indices = list(range(dataset_size))
# np.random.seed(shuffle_seed)
# np.random.shuffle(indices)
# test_indices = indices[0:]
# test_sampler = SubsetRandomSampler(test_indices)
testloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
shuffle=False, num_workers=4)
return testloader
| 5,339,281
|
def split_datasets(data_dir, word_dict, num_folds, fold_idx):
"""Split known words (including silence) and unknown words into training
and validation datasets respectively.
"""
modes = ['training', 'validation']
knowns = {m: [] for m in modes}
unknowns = {m: [] for m in modes}
word_excluded = set()
reg = re.compile('.*/([^/]+)/(.*)_nohash_(.*).wav')
# to find the most common known word
known_counter = {m: collections.Counter() for m in modes}
key_words = word_dict.key_words
divider = set_divider(data_dir, key_words, num_folds)
for wav in gfile.Glob(os.path.join(data_dir, '*', '*nohash*.wav')):
groups = reg.search(wav).groups()
word = groups[0].lower()
speaker = groups[1].lower()
mode = which_set(speaker, divider, fold_idx)
indices = word_dict.word_to_indices(word)
if indices:
if word in key_words:
knowns[mode].append([wav, indices])
known_counter[mode][word] += 1
else:
unknowns[mode].append([wav, indices])
else:
word_excluded.add(word)
print('words not in word_map.txt:', word_excluded)
# make an all-zero silence wave
silence_dir = os.path.join(data_dir, SILENCE_CLASS)
if not os.path.exists(silence_dir):
os.makedirs(silence_dir)
silence_0 = os.path.join(silence_dir, '%s_0.wav' % SILENCE_WORD)
encode_audio(np.zeros([SILENCE_LENGTH]), silence_0)
for mode in modes:
silence_indices = word_dict.word_to_indices(SILENCE_CLASS)
silence_size = known_counter[mode].most_common(1)[0][1]
knowns[mode] += [[silence_0, silence_indices]] * silence_size
return knowns, unknowns
| 5,339,282
|
def take_t(n):
"""
Transformation for Sequence.take
:param n: number to take
:return: transformation
"""
return Transformation(
"take({0})".format(n), lambda sequence: islice(sequence, 0, n), None
)
| 5,339,283
|
def metric_fn(loss):
"""Evaluation metric Fn which runs on CPU."""
perplexity = tf.exp(tf.reduce_mean(loss))
return {
"eval/loss": tf.metrics.mean(loss),
"eval/perplexity": tf.metrics.mean(perplexity),
}
| 5,339,284
|
def gensig_choi(distsmat, minlength=1, maxlength=None, rank=0):
""" The two dimensional sigma function for the c99 splitting """
if rank:
distsmat = rankify(distsmat, rank)
def sigma(a, b):
length = (b - a)
beta = distsmat[a:b, a:b].sum()
alpha = (b - a)**2
if minlength:
if (b - a) < minlength:
beta += np.inf
if maxlength:
if (b - a) > maxlength:
beta += np.inf
return (-beta, alpha)
return sigma
| 5,339,285
|
def check_ie_v3(base, add: Optional[str] = None) -> str:
"""Check country specific VAT-Id"""
s = sum((w * int(c) for w, c in zip(range(8, 1, -1), base)),
9 * (ord(add) - ord('@'))) # 'A' - 'I' -> 1 - 9
i = s % 23
return _IE_CC_MAP[i]
| 5,339,286
|
def delete_missing_cgacs(models, new_data):
""" If the new file doesn't contain CGACs we had before, we should delete the non-existent ones.
Args:
models: all existing frec models in the database
new_data: All the entries gathered from the agency file
"""
to_delete = set(models.keys()) - set(new_data['cgac_code'])
sess = GlobalDB.db().session
if to_delete:
sess.query(CGAC).filter(CGAC.cgac_code.in_(to_delete)).delete(synchronize_session=False)
for cgac_code in to_delete:
del models[cgac_code]
| 5,339,287
|
def filter_experiment_model(faultgroup, faultmodel, interestlist=None):
"""
Filter for a specific fault model. If interestlist is given only experiments
in this list will be analysed.
0 set 0
1 set 1
2 Toggle
"""
if not isinstance(faultmodel, int):
if "set0" in faultmodel:
faultmodel = 0
elif "set1" in faultmodel:
faultmodel = 1
elif "toggle" in faultmodel:
faultmodel = 2
else:
raise ValueError("Faultmodel not understood")
return generic_filter_faults(faultgroup, 'fault_model', faultmodel, None, interestlist)
| 5,339,288
|
def _str2bool(v):
"""Parser type utility function."""
if v.lower() in ('yes', 'true', 't', 'y', 'on', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', 'off', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean type expected. '
'Received {}'.format(v))
| 5,339,289
|
def _default_command(cmds, argv):
"""Evaluate the default command, handling ``**kwargs`` case.
`argparse` and `argh` do not understand ``**kwargs``, i.e. pass through command.
There's a case (`pykern.pkcli.pytest`) that requires pass through so we wrap
the command and clear `argv` in the case of ``default_command(*args, **kwargs)``.
Args:
cmds (list): List of commands
argv (list): arguments (may be edited)
Returns:
function: default command or None
"""
if len(cmds) != 1 or cmds[0].__name__ != DEFAULT_COMMAND:
return None
dc = cmds[0]
spec = inspect.getargspec(dc)
if not (spec.varargs and spec.keywords):
return dc
save_argv = argv[:]
def _wrap_default_command():
return dc(*save_argv)
del argv[:]
return _wrap_default_command
| 5,339,290
|
def keypair_to_file(keypair):
"""Looks for the SSH private key for keypair under ~/.ssh/
Prints an error if the file doesn't exist.
Args:
keypair (string) : AWS keypair to locate a private key for
Returns:
(string|None) : SSH private key file path or None is the private key doesn't exist.
"""
file = os.path.expanduser("~/.ssh/{}.pem".format(keypair))
if not os.path.exists(file):
print("Error: SSH Key '{}' does not exist".format(file))
return None
return file
| 5,339,291
|
def animation(n, movie_file=None, writer=None, **kwargs):
"""Context for animations."""
fig, ax = plt.subplots(**kwargs)
fig.set_tight_layout(True)
ax.grid(linestyle='dotted')
ax.set_aspect(1.0, 'datalim')
ax.set_axisbelow(True)
context = {'fig': fig, 'ax': ax, 'update_function': None}
yield context
ani = mpl_animation.FuncAnimation(fig, context['update_function'], range(n))
if movie_file:
ani.save(movie_file, writer=writer)
fig.show()
plt.close(fig)
| 5,339,292
|
def most_common_words(vocab_dir, visual_fld, num_visualize):
""" create a list of num_visualize most frequent words to visualize on TensorBoard.
saved to visualization/vocab_[num_visualize].tsv
"""
words = open(os.path.join(vocab_dir, 'vocab.tsv'), 'r').readlines()[:num_visualize]
words = [word for word in words]
safe_mkdir(visual_fld)
file = open(os.path.join(visual_fld, 'vocab_' + str(num_visualize) + '.tsv'), 'w')
for word in words:
file.write(word)
file.close()
| 5,339,293
|
def test_pt_br_ulb_tn_en_ulb_wa_tn_wa_luk_language_book_order() -> None:
"""
Produce verse level interleaved document for Brazilian Portuguese
and English scripture and translation notes for the book of Luke.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "language_book_order",
"resource_requests": [
{
"lang_code": "pt-br",
"resource_type": "ulb",
"resource_code": "luk",
},
{
"lang_code": "pt-br",
"resource_type": "tn",
"resource_code": "luk",
},
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "luk",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "luk",
},
],
},
)
finished_document_path = "pt-br-ulb-luk_pt-br-tn-luk_en-ulb-wa-luk_en-tn-wa-luk_language_book_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
| 5,339,294
|
def nameable_op(node_factory_function): # type: (Callable) -> Callable
"""Set the name to the ngraph operator returned by the wrapped function."""
@wraps(node_factory_function)
def wrapper(*args, **kwargs): # type: (*Any, **Any) -> Node
node = node_factory_function(*args, **kwargs)
node = _set_node_name(node, **kwargs)
return node
return wrapper
| 5,339,295
|
def update_states():
"""
Called once per interact to walk the tree of displayables and find
the old and new live2d states.
"""
def visit(d):
if not isinstance(d, Live2D):
return
if d.name is None:
return
state = states[d.name]
if state.mark:
return
state.mark = True
if state.new is d:
return
# Shouldn't happen, but stop thrashing if it does.
if state.old is d:
return
if state.cycle_new:
state.old = state.new
state.old_base_time = state.new_base_time
else:
state.old = None
state.old_base_time = None
state.expressions = [ ]
state.old_expressions = [ ]
state.new = d
if d.sustain:
state.new_base_time = state.old_base_time
else:
state.new_base_time = None
state.cycle_new = True
sls = renpy.display.core.scene_lists()
for d in sls.get_all_displayables(current=True):
if d is not None:
d.visit_all(visit)
for s in states.values():
if not s.mark:
s.cycle_new = False
s.mark = False
| 5,339,296
|
def load_mlflow(output):
"""Load the mlflow run id.
Args:
output (str): Output directory
"""
with open(os.path.join(output, STAT_FILE_NAME), 'r') as stream:
stats = load(stream, Loader=yaml.FullLoader)
return stats['mlflow_run_id']
| 5,339,297
|
def by_pattern(finding: finding.Entry, ignore: ignore_list.Entry) -> bool:
"""Process a regex ignore list entry."""
# Short circuit if no pattern is set.
if not ignore.pattern:
return False
# If there's a match on the path, check whether the ignore is for the same module.
if re.search(ignore.pattern, finding.path):
if ignore.module != finding.source.module:
return False
# Then check whether the ignore is for the particular reference.
if ignore.references:
if finding.source.reference in ignore.references:
return True
return False
# Or check whether the ignore is for the same offest.
if ignore.offset is not None:
if finding.location.offset == ignore.offset:
return True
return False
# In this case this is a fairly permissive ignore.
return True
return False
| 5,339,298
|
def object_radius():
"""
Check for Radius:
"""
error_msg = "INCORRECT Object(s) Selection:\n\nYou Must Select One Arc!"
Selection = Gui.Selection.getSelectionEx()
try:
result = "Radius:"
m_found = False
for m_sel in Selection:
m_name = m_sel.ObjectName
if hasattr(m_sel, 'SubObjects'):
for m_sub, m_sub_name in zip(m_sel.SubObjects, m_sel.SubElementNames):
if hasattr(m_sub, 'Curve'):
r = m_sub.Curve
if hasattr(r, 'Radius'):
m_radius = r.Radius
result += "\nObject: " + str(m_name) + "." + str(m_sub_name)
result += "\nRadius is " + str(m_radius)
m_found = True
else:
if hasattr(m_sel, 'Curve'):
r = m_sel.Curve
if hasattr(r, 'Radius'):
m_radius = r.Radius
result += "\nObject: " + str(m_name)
result += "\nRadius is " + str(m_radius)
m_found = True
if m_found:
print_gui_msg(result)
else:
printError_msg(error_msg)
except:
printError_msg(error_msg)
| 5,339,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.