content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
from typing import List
from typing import Dict
def delete_nodes_list(
nodes: List[str],
credentials: HTTPBasicCredentials = Depends(
check_credentials
), # pylint: disable=unused-argument
) -> Dict[str, str]:
"""Deletes a list of nodes (that are discoverables with lldp) to the db.
Exple of simplest call :
curl -X DELETE --user u:p -H "Content-type: application/json" \
http://127.0.0.1/api/nodes \
-d '["node1", "node2", "node3"]'"""
for node in nodes:
delete_node(node)
return {"response": "Ok"}
|
1b7d4e25e67f1a0d2a5eec23b12b1ca87242a066
| 3,643,400
|
def index():
"""
Application Home page
"""
module_name = settings.modules[c].get("name_nice")
response.title = module_name
return {"module_name": module_name,
}
|
527aa4b19eff87bb5c6fde6c0578ced5e876f59b
| 3,643,401
|
import os
import requests
def download_abstruse_goose(program_abs_path, abstruse_page_num):
"""Downloads latest Abstruse Goose comics."""
# Create/change appropriate comic folder.
comic_folder = os.path.join(program_abs_path, "abstruse")
if os.path.exists(comic_folder):
os.chdir(comic_folder)
else:
os.mkdir(comic_folder)
os.chdir(comic_folder)
while True:
# Create the comic URL.
url = 'https://abstrusegoose.com/' + str(abstruse_page_num)
try:
# Get the comic page.
res = requests.get(url)
res.raise_for_status()
# Extract the image src.
soup = bs4.BeautifulSoup(res.text, 'html.parser')
match1 = soup.select("img[src*='/strips/']")
if not match1: # If img element is not found, no futher new comics can be downloaded.
return abstruse_page_num
comic_url = match1[0].get("src")
# Get the comic image.
res = requests.get(comic_url)
res.raise_for_status()
# Download the comic image.
image_file = open('abstruse' + str(abstruse_page_num) + '.jpg', 'wb')
for chunk in res.iter_content(100000):
image_file.write(chunk)
image_file.close()
# Increment the latest comic num.
abstruse_page_num += 1
except requests.exceptions.HTTPError:
return abstruse_page_num
|
06381d566168a997e6ff6deaf8cb55c68a9ea09f
| 3,643,402
|
def is_CW_in_extension(G):
"""
Returns True if G is 'CW in expansion', otherwise it returns False.
G: directed graph of type 'networkx.DiGraph'
EXAMPLE
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
G=nx.DiGraph()
e_list = [(0,1),(0,2),(0,3),(0,4),(1,2),(1,3),(1,4)]
G.add_edges_from(e_list)
plt.figure(1)
nx.draw_circular(G,with_labels=True)
plt.show()
print("is_CW_in_extension(G):",is_CW_in_extension(G))
G.remove_edge(0,1)
plt.figure(1)
nx.draw_circular(G,with_labels=True)
plt.show()
print("is_CW_in_extension(G):",is_CW_in_extension(G))
G.remove_edge(0,3)
plt.figure(1)
nx.draw_circular(G,with_labels=True)
plt.show()
print("is_CW_in_extension(G):",is_CW_in_extension(G))
<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
"""
assert type(G) is nx.DiGraph, "'G' has to be of type 'networkx.DiGraph'."
nodes = list(G.nodes)
m = len(nodes)
nr_beaten_list = np.zeros(m) #nr_beaten_list[i] is the number of nodes v with an edge i->v in G if i is NOT beaten by any other node. Otherwise its -1
for i in range(0,m):
for j in range(0,m):
if i != j and G.has_edge(nodes[i],nodes[j]) and nr_beaten_list[i] != -1:
nr_beaten_list[i]+=1
if i != j and G.has_edge(nodes[j],nodes[i]):
nr_beaten_list[i]=-1
#print(nr_beaten_list)
if len(np.where(nr_beaten_list==m-1)[0]) >0: #G has a CW
return(True)
buf = np.where(nr_beaten_list==m-2)[0]
if len(buf)==2:
[i0,i1] = buf
if not G.has_edge(i0,i1) and not G.has_edge(i1,i0): # There exist i0, i1 which are connected to every other node and i0 is not connected to i1
return(True)
return(False)
|
3a1af65be274d23de16cdc15253185a0bbeda0ec
| 3,643,403
|
from typing import Callable
from typing import Iterable
from typing import List
def get_index_where(condition: Callable[..., bool], iterable: Iterable) -> List[int]:
"""Return index values where `condition` is `True`."""
return [idx for idx, item in enumerate(iterable) if condition(item)]
|
6f99086730dfc2ab1f87df90632bc637fc6f2b93
| 3,643,404
|
def geom_crossbar(mapping=None, *, data=None, stat=None, position=None, show_legend=None, sampling=None, tooltips=None,
fatten=None,
**other_args):
"""
Display bars with horizontal median line.
Parameters
----------
mapping : `FeatureSpec`
Set of aesthetic mappings created by `aes()` function.
Aesthetic mappings describe the way that variables in the data are
mapped to plot "aesthetics".
data : dict or `DataFrame`
The data to be displayed in this layer. If None, the default, the data
is inherited from the plot data as specified in the call to ggplot.
stat : str, default='identity'
The statistical transformation to use on the data for this layer, as a string.
Supported transformations: 'identity' (leaves the data unchanged),
'count' (counts number of points with same x-axis coordinate),
'bin' (counts number of points with x-axis coordinate in the same bin),
'smooth' (performs smoothing - linear default),
'density' (computes and draws kernel density estimate).
position : str or `FeatureSpec`
Position adjustment, either as a string ('identity', 'stack', 'dodge', ...),
or the result of a call to a position adjustment function.
show_legend : bool, default=True
False - do not show legend for this layer.
sampling : `FeatureSpec`
Result of the call to the `sampling_xxx()` function.
Value None (or 'none') will disable sampling for this layer.
tooltips : `layer_tooltips`
Result of the call to the `layer_tooltips()` function.
Specifies appearance, style and content.
fatten : float, default=2.5
A multiplicative factor applied to size of the middle bar.
other_args
Other arguments passed on to the layer.
These are often aesthetics settings used to set an aesthetic to a fixed value,
like color='red', fill='blue', size=3 or shape=21.
They may also be parameters to the paired geom/stat.
Returns
-------
`LayerSpec`
Geom object specification.
Notes
-----
`geom_crossbar()` represents a vertical interval, defined by `x`, `ymin`, `ymax`.
The mean is represented by horizontal line.
`geom_crossbar()` understands the following aesthetics mappings:
- x : x-axis coordinates.
- ymin : lower bound for error bar.
- middle : position of median bar.
- ymax : upper bound for error bar.
- alpha : transparency level of a layer. Understands numbers between 0 and 1.
- color (colour) : color of a geometry lines. Can be continuous or discrete. For continuous value this will be a color gradient between two colors.
- fill : color of geometry filling.
- size : lines width.
- width : width of a bar.
- linetype : type of the line. Codes and names: 0 = 'blank', 1 = 'solid', 2 = 'dashed', 3 = 'dotted', 4 = 'dotdash', 5 = 'longdash', 6 = 'twodash'.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 10
from lets_plot import *
LetsPlot.setup_html()
data = {
'x': ['a', 'b', 'c', 'd'],
'ymin': [5, 7, 3, 5],
'middle': [6.5, 9, 4.5, 7],
'ymax': [8, 11, 6, 9],
}
ggplot(data, aes(x='x')) + \\
geom_crossbar(aes(ymin='ymin', middle='middle', ymax='ymax'))
|
.. jupyter-execute::
:linenos:
:emphasize-lines: 14-15
import numpy as np
import pandas as pd
from lets_plot import *
LetsPlot.setup_html()
n = 800
cat_list = {c: np.random.uniform(3) for c in 'abcdefgh'}
np.random.seed(42)
x = np.random.choice(list(cat_list.keys()), n)
y = np.array([cat_list[c] for c in x]) + np.random.normal(size=n)
df = pd.DataFrame({'x': x, 'y': y})
err_df = df.groupby('x').agg({'y': ['min', 'median', 'max']}).reset_index()
err_df.columns = ['x', 'ymin', 'ymedian', 'ymax']
ggplot() + \\
geom_crossbar(aes(x='x', ymin='ymin', middle='ymedian', ymax='ymax', fill='x'), \\
data=err_df, width=.6, fatten=5) + \\
geom_jitter(aes(x='x', y='y'), data=df, width=.3, shape=1, color='black', alpha=.5)
"""
return _geom('crossbar',
mapping=mapping,
data=data,
stat=stat,
position=position,
show_legend=show_legend,
sampling=sampling,
tooltips=tooltips,
fatten=fatten,
**other_args)
|
27f1faf1dea99b033e9ac5ab4dbc52ea2865934c
| 3,643,405
|
from typing import Union
def chess_to_coordinate(pos: str) -> Union[Coordinate, Move]:
"""
Arguments:
"""
if len(pos) == 2:
return Coordinate(int(pos[1]) - 1, file_dict[pos[0]])
else:
if len(pos) == 5:
if pos[4] == 'n':
return Move(Coordinate(int(pos[1]) - 1, file_dict[pos[0]]), Coordinate(int(pos[3]) - 1, file_dict[pos[2]]), False, MoveType.N_PROMO)
elif pos[4] == 'b':
return Move(Coordinate(int(pos[1]) - 1, file_dict[pos[0]]), Coordinate(int(pos[3]) - 1, file_dict[pos[2]]), False, MoveType.B_PROMO)
elif pos[4] == 'r':
return Move(Coordinate(int(pos[1]) - 1, file_dict[pos[0]]), Coordinate(int(pos[3]) - 1, file_dict[pos[2]]), False, MoveType.R_PROMO)
elif pos[4] == 'q':
return Move(Coordinate(int(pos[1]) - 1, file_dict[pos[0]]), Coordinate(int(pos[3]) - 1, file_dict[pos[2]]), False, MoveType.Q_PROMO)
else:
return Move(Coordinate(int(pos[1]) - 1, file_dict[pos[0]]), Coordinate(int(pos[3]) - 1, file_dict[pos[2]]))
|
f55e8c4d349419a5477d5fc3c5390d133b89cdf7
| 3,643,406
|
def get_db_session():
"""
Get the db session from g.
If not exist, create a session and return.
:return:
"""
session = get_g_cache('_flaskz_db_session')
if session is None:
session = DBSession()
set_g_cache('_flaskz_db_session', session)
return session
|
1254a99c3c1dd3fe71f1a9099b9937df46754c33
| 3,643,407
|
def make_tril_scale(
loc=None,
scale_tril=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None):
"""Creates a LinOp representing a lower triangular matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k
lower triangular matrix.
When `None` no `scale_tril` term is added to the LinOp.
The upper triangular elements above the diagonal are ignored.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to the LinOp.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag = scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
assert_positive: Python `bool` indicating whether LinOp should be checked
for being positive definite.
name: Python `str` name given to ops managed by this object.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
array_ops.matrix_diag_part(x),
message="diagonal part must be positive"),
], x)
return control_flow_ops.with_dependencies([
check_ops.assert_none_equal(
array_ops.matrix_diag_part(x),
array_ops.zeros([], x.dtype),
message="diagonal part must be non-zero"),
], x)
with ops.name_scope(name, "make_tril_scale",
values=[loc, scale_diag, scale_identity_multiplier]):
loc = _convert_to_tensor(loc, name="loc")
scale_tril = _convert_to_tensor(scale_tril, name="scale_tril")
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag")
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier")
if scale_tril is not None:
scale_tril = array_ops.matrix_band_part(scale_tril, -1, 0) # Zero out TriU.
tril_diag = array_ops.matrix_diag_part(scale_tril)
if scale_diag is not None:
tril_diag += scale_diag
if scale_identity_multiplier is not None:
tril_diag += scale_identity_multiplier[..., array_ops.newaxis]
scale_tril = array_ops.matrix_set_diag(scale_tril, tril_diag)
return linalg.LinearOperatorLowerTriangular(
tril=_maybe_attach_assertion(scale_tril),
is_non_singular=True,
is_self_adjoint=False,
is_positive_definite=assert_positive)
return make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
shape_hint=shape_hint,
validate_args=validate_args,
assert_positive=assert_positive,
name=name)
|
137a0ac84e7b2fab71f1630ae1bd1b0b24fe8879
| 3,643,408
|
def remove_punctuation(word):
"""Remove all punctuation from the word (unicode). Note that the `translate`
method is used, and we assume unicode inputs. The str method has a different
`translate` method, so if you end up working with strings, you may want to
revisit this method.
"""
return word.translate(TRANSLATION_TABLE)
|
46476b6e4480a2f067c2370fd378778b452a1a3e
| 3,643,409
|
def prepare_filter_weights_slice_conv_2d(weights):
"""Change dimension order of 2d filter weights to the one used in fdeep"""
assert len(weights.shape) == 4
return np.moveaxis(weights, [0, 1, 2, 3], [1, 2, 0, 3]).flatten()
|
2b6ca65d68d4407ac0a7744efe01a90dc5423870
| 3,643,410
|
async def hello(request):
"""Hello page containing sarafan node metadata.
`version` contains sarafan node version.
`content_service_id` — contains service_id of content node
:param request:
:return:
"""
return web.json_response(await request.app['sarafan'].hello())
|
4a8b82a525082a03009d087a18042574e19c1796
| 3,643,411
|
import shutil
import os
def missing_toolchain(triplet: str) -> bool:
"""
Checks whether gcc, g++ and binutils are installed and in the path for the
current triplet
:param triplet: a triplet in the form riscv64-linux-gnu
:return: True if some part of the toolchain is missing, False otherwise
"""
toolchain_expected = {"ar", "as", "gcc", "g++", "ld", "ranlib", "strip"}
retval = False
for tool in toolchain_expected:
retval |= shutil.which(cmd=triplet + "-" + tool, mode=os.X_OK) is None
return retval
|
9fafae1a4bd5ce781183f28d098580624b6ba1ef
| 3,643,412
|
import yaml
def load_capabilities(
base: str = "docassemble.ALWeaver", minimum_version="1.5", include_playground=False
):
"""
Load and return a dictionary containing all advertised capabilities matching
the specified minimum version, and optionally include capabilities that were
advertised from a namespace matching docassemble.playground*. The local
capabilities will always be the default configuration.
"""
current_package_name = _package_name()
this_yaml = path_and_mimetype(
f"{current_package_name}:data/sources/configuration_capabilities.yml"
)[0]
weaverdata = DAStore(base=base)
published_configuration_capabilities = (
weaverdata.get("published_configuration_capabilities") or {}
)
try:
with open(this_yaml) as f:
this_yaml_contents = f.read()
first_file = list(yaml.safe_load_all(this_yaml_contents))[0]
capabilities = {"Default configuration": first_file}
except:
capabilities = {}
for key in list(published_configuration_capabilities.keys()):
# Filter configurations based on minimum published version
if isinstance(published_configuration_capabilities[key], tuple) and Version(
published_configuration_capabilities[key][1]
) < Version(minimum_version):
log(
"Skipping published weaver configuration {key}:{published_configuration_capabilities[key]} because it is below the minimum version {minimum_version}. Consider updating the {key} package."
)
del published_configuration_capabilities[key]
# Filter out capability files unless the package is installed system-wide
if not include_playground and key.startswith("docassemble.playground"):
del published_configuration_capabilities[key]
for package_name in published_configuration_capabilities:
# Don't add the current package twice
if not current_package_name == package_name:
path = path_and_mimetype(
f"{package_name}:data/sources/{published_configuration_capabilities[package_name][0]}"
)[0]
try:
with open(path) as f:
yaml_contents = f.read()
capabilities[package_name] = list(yaml.safe_load_all(yaml_contents))[0]
except:
log(f"Unable to load published Weaver configuration file {path}")
return capabilities
|
3bb12fdbf4fc4340a042f0685a4917a7b1c1ed85
| 3,643,413
|
def build_graph(
config,
train_input_fn, test_input_fn, model_preprocess_fn, model):
"""Builds the training graph.
Args:
config: Training configuration.
train_input_fn: Callable returning the training data as a nest of tensors.
test_input_fn: Callable returning the test data as a nest of tensors.
model_preprocess_fn: Image pre-processing that should be combined with
the model for adversarial evaluation.
model: Callable taking (preprocessed_images, is_training, test_local_stats)
and returning logits.
Returns:
loss: 0D tensor containing the loss to be minimised.
train_measures: Dict (with string keys) of 0D tensors containing
training measurements.
test_measures: Dict (with string keys) of 0D tensors containing
test set evaluation measurements.
init_step_fn: Function taking (session, initial_step_val)
to be invoked to initialise the global training step.
"""
global_step = tf.train.get_or_create_global_step()
optimizer = _optimizer(config.optimizer, global_step)
model_with_preprocess = _model_with_preprocess_fn(
model, model_preprocess_fn)
# Training step.
loss, train_logits, train_adv_logits, train_labels = _train_step(
config.train, model_with_preprocess, global_step, optimizer,
train_input_fn())
train_measures = {
'acc': _top_k_accuracy(train_labels, train_logits),
}
if config.train.adversarial_loss_weight > 0.:
train_measures.update({
'adv_acc': _top_k_accuracy(train_labels, train_adv_logits),
})
# Test evaluation.
with tf.name_scope('test_accuracy'):
test_logits, test_adv_logits, test_labels = _test_step(
config.train, model_with_preprocess, test_input_fn())
test_measures = {
'acc': _top_k_accuracy(test_labels, test_logits),
'adv_acc': _top_k_accuracy(test_labels, test_adv_logits),
}
initial_step = tf.placeholder(shape=(), dtype=tf.int64)
init_global_step_op = tf.assign(global_step, initial_step)
def init_step_fn(session, initial_step_val):
session.run(init_global_step_op, feed_dict={initial_step: initial_step_val})
return loss, train_measures, test_measures, init_step_fn
|
4eff7555b2383db0870d5e63467e2d68a3336ece
| 3,643,414
|
import functools
import traceback
import time
def execli_deco():
""" This is a decorating function to excecute a client side Earth Engine
function and retry as many times as needed.
Parameters can be set by modifing module's variables `_execli_trace`,
`_execli_times` and `_execli_wait`
:Example:
.. code:: python
from geetools.tools import execli_deco
import ee
# TRY TO GET THE INFO OF AN IMAGE WITH DEFAULT PARAMETERS
@execli_deco()
def info():
# THIS IMAGE DOESN'E EXISTE SO IT WILL THROW AN ERROR
img = ee.Image("wrongparam")
return img.getInfo()
# TRY WITH CUSTOM PARAM (2 times 5 seconds and traceback)
@execli_deco(2, 5, True)
def info():
# THIS IMAGE DOESN'E EXISTE SO IT WILL THROW AN ERROR
img = ee.Image("wrongparam")
return img.getInfo()
:param times: number of times it will try to excecute the function
:type times: int
:param wait: waiting time to excetue the function again
:type wait: int
:param trace: print the traceback
:type trace: bool
"""
def wrap(f):
'''
if trace is None:
global trace
trace = _execli_trace
if times is None:
global times
times = _execli_times
if wait is None:
global wait
wait = _execli_wait
try:
times = int(times)
wait = int(wait)
except:
print(type(times))
print(type(wait))
raise ValueError("'times' and 'wait' parameters must be numbers")
'''
@functools.wraps(f)
def wrapper(*args, **kwargs):
trace = _execli_trace
times = _execli_times
wait = _execli_wait
r = range(times)
for i in r:
try:
result = f(*args, **kwargs)
except Exception as e:
print("try n°", i, "ERROR:", e)
if trace:
traceback.print_exc()
if i < r[-1] and wait > 0:
print("waiting {} seconds...".format(str(wait)))
time.sleep(wait)
elif i == r[-1]:
raise RuntimeError("An error occured tring to excecute"\
" the function '{0}'".format(f.__name__))
else:
return result
return wrapper
return wrap
|
c245cd30f372e6d00895f42ba26936f2fb92c257
| 3,643,415
|
import uuid
def lstm_with_backend_selection(inputs, init_h, init_c, kernel,
recurrent_kernel, bias, mask, time_major,
go_backwards, sequence_lengths,
zero_output_for_mask):
"""Call the LSTM with optimized backend kernel selection.
Under the hood, this function will create two TF function, one with the most
generic kernel and can run on all device condition, and the second one with
cuDNN specific kernel, which can only run on GPU.
The first function will be called with normal_lstm_params, while the second
function is not called, but only registered in the graph. The Grappler will
do the proper graph rewrite and swap the optimized TF function based on the
device placement.
Args:
inputs: Input tensor of LSTM layer.
init_h: Initial state tensor for the cell output.
init_c: Initial state tensor for the cell hidden state.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
mask: Boolean tensor for mask out the steps within sequence.
An individual `True` entry indicates that the corresponding timestep
should be utilized, while a `False` entry indicates that the corresponding
timestep should be ignored.
time_major: Boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
zero_output_for_mask: Boolean, whether to output zero for masked timestep.
Returns:
List of output tensors, same as standard_lstm.
"""
params = {
'inputs': inputs,
'init_h': init_h,
'init_c': init_c,
'kernel': kernel,
'recurrent_kernel': recurrent_kernel,
'bias': bias,
'mask': mask,
'time_major': time_major,
'go_backwards': go_backwards,
'sequence_lengths': sequence_lengths,
'zero_output_for_mask': zero_output_for_mask,
}
def gpu_lstm_with_fallback(inputs, init_h, init_c, kernel, recurrent_kernel,
bias, mask, time_major, go_backwards,
sequence_lengths, zero_output_for_mask):
"""Use cuDNN kernel when mask is none or strictly right padded."""
if mask is None:
return gpu_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def cudnn_lstm_fn():
return gpu_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def stardard_lstm_fn():
return standard_lstm(
inputs=inputs,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths,
zero_output_for_mask=zero_output_for_mask)
return tf.cond(
gru_lstm_utils.is_cudnn_supported_inputs(mask, time_major),
true_fn=cudnn_lstm_fn,
false_fn=stardard_lstm_fn)
if gru_lstm_utils.use_new_gru_lstm_impl():
# Chooses the implementation dynamically based on the running device.
(last_output, outputs, new_h, new_c,
runtime) = tf.__internal__.execute_fn_for_device(
{
gru_lstm_utils.CPU_DEVICE_NAME:
lambda: standard_lstm(**params),
gru_lstm_utils.GPU_DEVICE_NAME:
lambda: gpu_lstm_with_fallback(**params)
}, lambda: standard_lstm(**params))
else:
# Each time a `tf.function` is called, we will give it a unique
# identifiable API name, so that Grappler won't get confused when it
# sees multiple LSTM layers added into same graph, and it will be able
# to pair up the different implementations across them.
api_name = 'lstm_' + str(uuid.uuid4())
supportive_attribute = {
'time_major': time_major,
'go_backwards': go_backwards,
}
defun_standard_lstm = gru_lstm_utils.generate_defun_backend(
api_name, gru_lstm_utils.CPU_DEVICE_NAME, standard_lstm,
supportive_attribute)
defun_gpu_lstm = gru_lstm_utils.generate_defun_backend(
api_name, gru_lstm_utils.GPU_DEVICE_NAME, gpu_lstm_with_fallback,
supportive_attribute)
# Call the normal LSTM impl and register the cuDNN impl function. The
# grappler will kick in during session execution to optimize the graph.
last_output, outputs, new_h, new_c, runtime = defun_standard_lstm(**params)
gru_lstm_utils.function_register(defun_gpu_lstm, **params)
return last_output, outputs, new_h, new_c, runtime
|
4c45709265de5385399a7b9bff0aeb4e9a4d7b17
| 3,643,416
|
def _build_stack_from_3d(recipe, input_folder, fov=0, nb_r=1, nb_c=1):
"""Load and stack 3-d tensors.
Parameters
----------
recipe : dict
Map the images according to their field of view, their round,
their channel and their spatial dimensions. Only contain the keys
'fov', 'r', 'c', 'z', 'ext' or 'opt'.
input_folder : str
Path of the folder containing the images.
fov : int
Index of the fov to build.
nb_r : int
Number of round file to stack in order to get a 5-d tensor.
nb_c : int
Number of channel file to stack in order to get a 4-d tensor.
Returns
-------
tensor_5d : np.ndarray, np.uint
Tensor with shape (r, c, z, y, x).
"""
# load and stack successively channel elements then round elements
tensors_4d = []
for r in range(nb_r):
# load and stack channel elements (3-d tensors)
tensors_3d = []
for c in range(nb_c):
path = get_path_from_recipe(recipe, input_folder, fov=fov, r=r,
c=c)
tensor_3d = read_image(path)
tensors_3d.append(tensor_3d)
# stack 3-d tensors in 4-d
tensor_4d = np.stack(tensors_3d, axis=0)
tensors_4d.append(tensor_4d)
# stack 4-d tensors in 5-d
tensor_5d = np.stack(tensors_4d, axis=0)
return tensor_5d
|
6cb4e567324cb3404d6e373b3f9a00d3ccdd51ef
| 3,643,417
|
def view_menu(request):
"""Admin user view all the reservations."""
menus = Menu.objects.all()
return render(request,
"super/view_menu.html",
{'menus': menus})
|
7b8244a315f2da0794a80f71cf73517e81f614e0
| 3,643,418
|
def _get_hdfs_dirs_by_date(physical_table_name, date):
"""
根据日期获取指定日期的hdfs上数据目录列表
:param physical_table_name: 物理表名称
:param date: 日期
:return: hdfs上的数据目录列表
"""
return [f"{physical_table_name}/{date[0:4]}/{date[4:6]}/{date[6:8]}/{hour}" for hour in DAY_HOURS]
|
6581f81ebcf9051ccf97ade02fc80eeba46e0e78
| 3,643,419
|
import json
def indeed_jobs(request, category_id):
"""
Load Indeed jobs via ajax.
"""
if request.is_ajax() and request.method == 'POST':
per_page = 10
page = 1
html = []
if category_id == '0':
all_jobs = IndeedJob.objects.all()
else:
all_jobs = IndeedJob.objects.filter(category=category_id)
paginator = Paginator(all_jobs, per_page)
page = request.GET.get('page')
try:
jobs = paginator.page(page)
except PageNotAnInteger:
jobs = paginator.page(1)
except EmptyPage:
jobs = paginator.page(paginator.num_pages)
for job in jobs:
html.append(render_to_string('indeed-job.html', {'job': job}))
context = {
'html': u''.join(html),
'page': jobs.number,
}
if jobs.has_next(): context.update({'next_page': jobs.next_page_number()})
return HttpResponse(json.dumps(context), content_type='application/json')
raise Http404
|
12cf21f9ecad672e78715ef9687ad2e69d5ea963
| 3,643,420
|
def iinsertion_sort(arr, order=ASCENDING):
"""Iterative implementation of insertion sort.
:param arr: input list
:param order: sorting order i.e "asc" or "desc"
:return: list sorted in the order defined
"""
operator = SORTING_OPERATORS.get(order.lower(), GREATER_THAN)
for i in range(1, len(arr)):
position = i - 1
value = arr[i]
while position >= 0 and operator(arr[position], value):
arr[position + 1] = arr[position]
position -= 1
arr[position + 1] = value
return arr
|
8698fbb500bfad3cb2e6964112d46ef8151c1e89
| 3,643,421
|
def actor_files_paths():
"""
Returns the file paths that are bundled with the actor. (Path to the content of the actor's file directory).
"""
return current_actor().actor_files_paths
|
2ec9505eceb2da78aee668ff044e565374aa3a1c
| 3,643,422
|
import struct
def parse_table(data: bytes, fields: list) -> dict:
"""Return a Python dictionary created from the bytes *data* of
an ISIS cube table (presumably extracted via read_table_data()),
and described by the *fields* list and *records*.
Please be aware that this does not perform masking of the ISIS
special pixels that may be present in the table, and simply
returns them as the appropriate int or float values.
The *fields* list must be a list of dicts, each of which must
contain the following keys: 'Name', 'Type', and 'Size'. The
'Name' key can be any string (and these will end up being the
keys in the returned dict). 'Size' is the size in bytes of the
field, and 'Type' is a string that must be one of 'Integer',
'Double', 'Real', or 'Text'.
If you are using the pvl library, the get_table() function will
be easier to use.
"""
row_len = 0
for f in fields:
row_len += data_sizes[f["Type"]] * int(f["Size"])
if len(data) % row_len != 0:
raise ValueError(
f"The total sizes of each field ({row_len}) do not evenly divide "
f"into the size of the data ({len(data)}), so something is off."
)
# Parse the binary data
results = {f["Name"]: [] for f in fields}
offset = 0
while offset < len(data):
for f in fields:
if f["Type"] == "Text":
field_data = data[offset : offset + int(f["Size"])].decode(
encoding="latin_1"
)
else:
data_fmt = data_formats[f["Type"]] * int(f["Size"])
f_data = struct.unpack_from(data_fmt, data, offset)
if len(f_data) == 1:
field_data = f_data[0]
else:
field_data = list(f_data)
results[f["Name"]].append(field_data)
offset += data_sizes[f["Type"]] * int(f["Size"])
return results
|
3727a37d619c77c6789e1d11479ecfd67b814766
| 3,643,423
|
import scipy
def gridtilts(shape, thismask, slit_cen, coeff2, func2d, spec_order, spat_order, pad_spec=30, pad_spat = 5, method='interp'):
"""
Parameters
----------
tilt_fit_dict: dict
Tilt fit dictioary produced by fit_tilts
Returns
-------
piximg: ndarray, float
Image indicating how spectral pixel locations move across the image. This output is used in the pipeline.
"""
# Compute the tilts image
nspec, nspat = shape
xnspecmin1 = float(nspec-1)
xnspatmin1 = float(nspat-1)
spec_vec = np.arange(nspec)
spat_vec = np.arange(nspat)
# JFH This histogram method is not preferred, since it basically does NGP. It is however super fast, so for big images
# it is useful to have it
if 'hist2d' in method:
oversamp_spec=5
oversamp_spat=3
spec_ind, spat_ind = np.where(thismask)
min_spec = spec_ind.min() - pad_spec
max_spec = spec_ind.max() + pad_spec
num_spec = max_spec - min_spec + 1
min_spat = spat_ind.min() - pad_spat
max_spat = spat_ind.max() + pad_spat
num_spat = max_spat - min_spat + 1
spec_lin = np.linspace(min_spec,max_spec,num = int(np.round(num_spec*oversamp_spec)))
spat_lin = np.linspace(min_spat,max_spat,num = int(np.round(num_spat*oversamp_spat)))
spat_img, spec_img = np.meshgrid(spat_lin, spec_lin)
# Normalized spatial offset image (from central trace)
slit_cen_lin = (scipy.interpolate.interp1d(np.arange(nspec),slit_cen,bounds_error=False,fill_value='extrapolate'))(spec_lin)
slit_cen_img = np.outer(slit_cen_lin, np.ones(spat_img.shape[1])) # center of the slit replicated spatially
dspat_img_nrm = (spat_img - slit_cen_img)/xnspatmin1
spec_img_nrm = spec_img/xnspecmin1
# normalized spec image
tracepix = spec_img + xnspecmin1*utils.func_val(coeff2, spec_img_nrm, func2d, x2=dspat_img_nrm,
minx=0.0, maxx=1.0, minx2=-1.0, maxx2=1.0)
norm_img, spec_edges, spat_edges = np.histogram2d(tracepix.flatten(), spat_img.flatten(),
bins=[np.arange(nspec+1), np.arange(nspat+1)], density=False)
weigh_img, spec_edges, spat_edges = np.histogram2d(tracepix.flatten(), spat_img.flatten(),
bins=[np.arange(nspec+1), np.arange(nspat+1)],
weights = spec_img.flatten(),density=False)
piximg =(norm_img > 0.0)*weigh_img/(norm_img + (norm_img == 0.0))
inmask = thismask & (norm_img > 0) & (piximg/xnspecmin1 > -0.2) & (piximg/xnspecmin1 < 1.2)
# This is the defulat method although scipy.interpolate.griddata is a bit slow
elif 'interp' in method:
spec_vec_pad = np.arange(-pad_spec,nspec+pad_spec)
spat_vec_pad = np.arange(-pad_spat,nspat+pad_spat)
spat_img, spec_img = np.meshgrid(spat_vec, spec_vec)
spat_img_pad, spec_img_pad = np.meshgrid(np.arange(-pad_spat,nspat+pad_spat),np.arange(-pad_spec,nspec+pad_spec))
slit_cen_pad = (scipy.interpolate.interp1d(spec_vec,slit_cen,bounds_error=False,fill_value='extrapolate'))(spec_vec_pad)
thismask_pad = np.zeros_like(spec_img_pad,dtype=bool)
ind_spec, ind_spat = np.where(thismask)
slit_cen_img_pad= np.outer(slit_cen_pad, np.ones(nspat + 2*pad_spat)) # center of the slit replicated spatially
# Normalized spatial offset image (from central trace)
dspat_img_nrm = (spat_img_pad - slit_cen_img_pad)/xnspatmin1
# normalized spec image
spec_img_nrm = spec_img_pad/xnspecmin1
# Embed the old thismask in the new larger padded thismask
thismask_pad[ind_spec + pad_spec,ind_spat + pad_spat] = thismask[ind_spec,ind_spat]
# Now grow the thismask_pad
kernel = np.ones((2*pad_spec, 2*pad_spat))/float(4*pad_spec*pad_spat)
thismask_grow = scipy.ndimage.convolve(thismask_pad.astype(float), kernel, mode='nearest') > 0.0
# Evaluate the tilts on the padded image grid
tracepix = spec_img_pad[thismask_grow] + xnspecmin1*utils.func_val(coeff2, spec_img_nrm[thismask_grow], func2d, x2=dspat_img_nrm[thismask_grow],
minx=0.0, maxx=1.0, minx2=-1.0, maxx2=1.0)
## TESTING STARTS
"""
ikeep = np.isfinite(tracepix)
sigma = np.full_like(spec_img_pad[thismask_grow], 10.0)/xnspecmin1
fitxy = [spec_order, spat_order]
fitmask, coeff2_tilts = utils.robust_polyfit_djs(tracepix/xnspecmin1, spec_img_pad[thismask_grow]/xnspecmin1,
fitxy, x2=spat_img_pad[thismask_grow]/xnspatmin1,
sigma=sigma,
upper=5.0, lower=5.0, maxdev=10.0/xnspecmin1,
inmask=ikeep, function=func2d, maxiter=20,
minx=0.0, maxx=1.0, minx2=0.0, maxx2=1.0, use_mad=False)
## TESTING ENDS
# values(points) \equiv spec_pos(tilt,spat_pos) which is the piximg that we want to create via griddata interpolation
"""
ikeep = np.isfinite(tracepix)
points = np.stack((tracepix[ikeep], spat_img_pad[thismask_grow][ikeep]), axis=1)
values =spec_img_pad[thismask_grow][ikeep]
piximg = scipy.interpolate.griddata(points, values, (spec_img, spat_img), method='cubic')
inmask = thismask & np.isfinite(piximg) & (piximg/xnspecmin1 > -0.2) & (piximg/xnspecmin1 < 1.2)
# Now simply do a 2d polynomial fit with just rejection of crazy behavior, i.e. 10 pixels
fitxy = [spec_order, spat_order]
sigma = np.full_like(spec_img,10.0)/xnspecmin1
fitmask, coeff2_tilts = utils.robust_polyfit_djs(spec_img.flatten()/xnspecmin1, piximg.flatten()/xnspecmin1,
fitxy, x2=spat_img.flatten()/xnspatmin1, sigma = sigma.flatten(),
upper=5.0, lower=5.0, maxdev = 10.0/xnspecmin1,
inmask=inmask.flatten(), function=func2d, maxiter=20,
minx=0.0, maxx=1.0, minx2=0.0,maxx2=1.0,use_mad=False)
irej = np.invert(fitmask) & inmask.flatten()
msgs.info('Rejected {:d}/{:d} pixels in final tilts image after gridding'.format(np.sum(irej),np.sum(inmask)))
# normalized tilts image
tilts = utils.func_val(coeff2_tilts, spec_img/xnspecmin1, func2d, x2=spat_img/xnspatmin1,minx=0.0, maxx=1.0, minx2=0.0, maxx2=1.0)
tilts = np.fmax(np.fmin(tilts, 1.2),-0.2)
# Added this to ensure that tilts are never crazy values due to extrapolation of fits which can break
# wavelength solution fitting
return coeff2_tilts, tilts
|
55dd6ddd065e4f4bfefdc30bef27dc6e6541190b
| 3,643,424
|
import functools
def exp_t(u, t):
"""Compute exp_t for `u`."""
def _internal_exp_t(u, t):
return tf.nn.relu(1.0 + (1.0 - t) * u) ** (1.0 / (1.0 - t))
return tf.cond(
tf.math.equal(t, 1.0), lambda: tf.math.exp(u),
functools.partial(_internal_exp_t, u, t))
|
27fe729ea55bc8933d6ccd41c5ae96657b4426ad
| 3,643,425
|
def max_matching(G, method="ilp"):
"""Return a largest matching in *G*.
Parameters
----------
G : NetworkX graph
An undirected graph.
method: string
The method to use for finding the maximum matching. Use
'ilp' for integer linear program or 'bf' for brute force.
Defaults to 'ilp'.
Returns
-------
set
A set of edges comprising a maximum matching in *G*.
See Also
--------
max_matching
"""
max_matching_func = {"bf": max_matching_bf, "ilp": max_matching_ilp}.get(method, None)
if max_matching_func:
return max_matching_func(G)
raise ValueError('Invalid `method` argument "{}"'.format(method))
|
34407865678e46d7d042fa94852b66ebc22787d6
| 3,643,426
|
def hasEdgeFlux(source, edgeDistance=1):
"""hasEdgeFlux
Determine whether or not a source has flux within `edgeDistance`
of the edge.
Parameters
----------
source : `scarlet.Component`
The source to check for edge flux
edgeDistance : int
The distance from the edge of the image to consider
a source an edge source. For example if `edgeDistance=3`
then any source within 3 pixels of the edge will be
considered to have edge flux.
If `edgeDistance` is `None` then the edge check is ignored.
Returns
-------
isEdge: `bool`
Whether or not the source has flux on the edge.
"""
if edgeDistance is None:
return False
assert edgeDistance > 0
# Use the first band that has a non-zero SED
flux = scarlet.measure.flux(source)
if hasattr(source, "sed"):
band = np.min(np.where(flux > 0)[0])
else:
band = np.min(np.where(flux > 0)[0])
model = source.get_model()[band]
for edge in range(edgeDistance):
if (
np.any(model[edge-1] > 0)
or np.any(model[-edge] > 0)
or np.any(model[:, edge-1] > 0)
or np.any(model[:, -edge] > 0)
):
return True
return False
|
2fd924c20cb89b3728ef3a24f92b89eb0b136fe5
| 3,643,427
|
def biswas_robustness(data_scikit, data_mm):
"""
summary stats on consensus peaks
"""
CV = find_CV(th=0.0001, ca=0.5, sd=1)
CV_th001 = find_CV(th=0.001, ca=0.5, sd=1)
CV_th01 = find_CV(th=0.01, ca=0.5, sd=1)
CV_th00001 = find_CV(th=0.00001, ca=0.5, sd=1)
CV_sd15 = find_CV(th=0.0001, ca=0.5, sd=1.5)
CV_sd05 = find_CV(th=0.0001, ca=0.5, sd=0.5)
CV_ca09 = find_CV(th=0.0001, ca=0.9, sd=0.5)
CV_ca01 = find_CV(th=0.0001, ca=0.1, sd=0.5)
biswas_df = pd.DataFrame(columns=['ORF', 'corr_th001', 'corr_th01', 'corr_th00001', 'corr_sd15', 'corr_sd05', 'corr_ca09', 'corr_ca01'])
list_orfs = list( data_scikit.keys() )
for ix, orf in enumerate(list_orfs):
output = np.zeros(( 7 ))
coef = 0
p = 1
current_data = data_scikit[orf]
current_mm = data_mm[orf]
if np.shape(current_data)[1] == len(current_mm):
current_data[:,~current_mm] = 0 # after, for false consensus (i.e. multimapping), set to 0
current_cons, current_peaks = run_mc(current_data, CV)
current_cons_th001, current_peaks_th001 = run_mc(current_data, CV_th001)
current_cons_th01, current_peaks_th01 = run_mc(current_data, CV_th01)
current_cons_th00001, current_peaks_th00001 = run_mc(current_data, CV_th00001)
current_cons_sd15, current_peaks_sd15 = run_mc(current_data, CV_sd15)
current_cons_sd05, current_peaks_sd05 = run_mc(current_data, CV_sd05)
current_cons_ca09, current_peaks_ca09 = run_mc(current_data, CV_ca09)
current_cons_ca01, current_peaks_ca01 = run_mc(current_data, CV_ca01)
output[0], p = stats.spearmanr(current_cons, current_cons_th001)
output[1], p = stats.spearmanr(current_cons, current_cons_th01)
output[2], p = stats.spearmanr(current_cons, current_cons_th00001)
output[3], p = stats.spearmanr(current_cons, current_cons_sd15)
output[4], p = stats.spearmanr(current_cons, current_cons_sd05)
output[5], p = stats.spearmanr(current_cons, current_cons_ca09)
output[6], p = stats.spearmanr(current_cons, current_cons_ca01)
output = np.around(output,3)
biswas_df.loc[len(biswas_df)] = ( orf, output[0], output[1], output[2], output[3], output[4], output[5], output[6] )
print(ix, orf, output[0], output[1], output[2], output[3], output[4], output[5], output[6] )
return biswas_df
|
70ecee0baa60a5b06c785dd172bc9d0719840903
| 3,643,428
|
def get_click_offset(df):
"""
df[session_key] return a set of session_key
df[session_key].nunique() return the size of session_key set (int)
df.groupby(session_key).size() return the size of each session_id
df.groupby(session_key).size().cumsum() retunn cumulative sum
"""
offsets = np.zeros(df[session_key].nunique() + 1, dtype=np.int32)
offsets[1:] = df.groupby(session_key).size().cumsum()
return offsets
|
c8caed25899f71549a9333e64452f8eed9cf1029
| 3,643,429
|
def delete_enrichment():
"""
Controller to delete all existing GO enrichments
:return: Redirect to admin main screen
"""
CoexpressionCluster.delete_enrichment()
flash('Successfully removed GO enrichment for co-expression clusters', 'success')
return redirect(url_for('admin.controls.index'))
|
9cface0783929581f3e6076f43a461ef815c0d2b
| 3,643,430
|
from typing import Counter
import logging
def evaluate_agent(agent, env, alpha, num_users=100, deterministic=False,
softmax_temperature=1.0,
scatter_plot_trajectories=False, figure_file_obj=None,
risk_score_extractor=violence_risk, plot_histogram=False,
plot_trajectories=True,
stepwise_plot=False, only_evaluate_pool=None,
reward_health_distribution_plot=False, debug_log=False):
"""Runs an agent-env simulation to evaluate average reward and safety costs.
Args:
agent: rnn_cvar_agent.SafeRNNAgent object.
env: Recsim environment that returns responses with reward and health score.
alpha: The alpha used as the level for VaR/CVaR.
num_users: Number of users to sample for the evaluation.
deterministic: Whether the agent chooses the argmax action instead of
sampling.
scatter_plot_trajectories: Whether to evaluate
figure_file_obj: File object to store the plot.
risk_score_extractor: A function which takes an observation and returns a
risk score.
Returns:
Dictionary with average reward, health score, cvar, var for num_users
sampled.
"""
results = {}
if hasattr(env._environment, 'set_active_pool'): # pylint: disable=protected-access
pools = ['train', 'eval', 'test']
if only_evaluate_pool:
pools = [only_evaluate_pool]
else:
pools = ['all']
for pool in pools:
tf.keras.backend.set_learning_phase(0)
if hasattr(env._environment._user_model._user_sampler, 'set_active_pool'): # pylint: disable=protected-access
env._environment.set_active_pool(
pool) # pylint: disable=protected-access
else:
assert pool == 'all' or only_evaluate_pool
if plot_histogram or plot_trajectories:
recs_histogram = Counter({})
recs_histogram_keys_list = {}
if debug_log:
user_rec_log = []
ratings = []
ratings_health_user_map = {}
health = []
rewards = []
max_episode_length = agent.max_episode_length
if stepwise_plot:
stepwise_ratings = [[] for _ in range(max_episode_length)]
stepwise_healths = [[] for _ in range(max_episode_length)]
agent.epsilon = 0.0 # Turn off any exploration.
env._environment._user_model._user_sampler.reset_sampler()
# Set the learning phase to 0 i.e. evaluation to not use dropout.
# Generate num_users trajectories.
for _ in range(num_users):
# TODO(): Clean the logged variables by making a data class.
curr_user_reward = 0.0
curr_user_health = 0.0
curr_user_rating = 0.0
if plot_histogram or plot_trajectories:
current_trajectory = []
reward = 0
observation = env.reset()
curr_user_vector = env.environment.user_model._user_state.topic_affinity
user_id = observation['user']['user_id']
if debug_log:
user_rec_log.append((user_id, []))
for step_number in range(max_episode_length):
slate = agent.step(reward, observation, eval_mode=True,
deterministic=deterministic, temperature=softmax_temperature)
observation, reward, _, _ = env.step(slate)
rating = observation['response'][0]['rating']
if plot_histogram or plot_trajectories:
current_trajectory.append(slate[0])
if slate[0] in recs_histogram:
recs_histogram[slate[0]] = recs_histogram[slate[0]] + 1
else:
recs_histogram[slate[0]] = 1
recs_histogram_keys_list[slate[0]] = len(
recs_histogram.keys())
if stepwise_plot:
# print(reward, risk_score_extractor(observation))
stepwise_ratings[step_number].append(rating)
stepwise_healths[step_number].append(
1-risk_score_extractor(observation))
curr_user_rating += rating
curr_user_reward += reward
curr_user_health += 1-risk_score_extractor(observation)
if debug_log:
user_rec_log[-1][1].append((slate[0], rating, 1-risk_score_extractor(observation), reward))
agent.end_episode(reward, observation, eval_mode=True)
ratings.append(curr_user_rating/float(max_episode_length))
health.append(curr_user_health/float(max_episode_length))
ratings_health_user_map[str(curr_user_vector)] = (ratings[-1], health[-1])
rewards.append(curr_user_reward/float(max_episode_length))
if plot_trajectories:
plot_current_trajectory(
current_trajectory, observation, recs_histogram_keys_list)
plt.show()
agent.empty_buffer()
health_risks = 1-np.array(health)
var = np.percentile(health_risks, 100*alpha)
cvar = compute_cvar(health_risks, var)
logging.info('Average Reward = %f, Average Health = %f, '
'Average Ratings = %f,VaR = %f, CVaR = %f',
np.mean(rewards), np.mean(health), np.mean(ratings), var, cvar)
if plot_histogram:
plot_recs_hists(recs_histogram, pool)
plt.show()
if stepwise_plot:
plot_stepwise_ratings(stepwise_ratings, stepwise_healths)
# Set the learning phase back to 1.
tf.keras.backend.set_learning_phase(1)
if scatter_plot_trajectories:
plot_trajectories(ratings, health, figure_file_obj)
results[pool] = {
'rewards': np.mean(rewards),
'health': np.mean(health),
'ratings': np.mean(ratings),
'var': var,
'cvar': cvar
}
if plot_histogram:
results[pool]['unique_recs'] = len(recs_histogram.keys())
if reward_health_distribution_plot:
results[pool]['ratings_health_user_map'] = ratings_health_user_map
plot_reward_vs_health_distribution(ratings, health)
if debug_log:
save_user_rec_log()
results[pool]['user_rec_log'] = user_rec_log
if len(results) == 1: # No train/eval/test split, just return one value.
return results[only_evaluate_pool] if only_evaluate_pool else results['all']
# Promote the eval results to the top-level dictionary.
results.update(results['eval'])
return results
|
9bf9413eb49a7fb635ded6b665ce7e3f45a0413b
| 3,643,431
|
from typing import Sequence
def argmax(sequence: Sequence) -> int:
"""Find the argmax of a sequence."""
return max(range(len(sequence)), key=lambda i: sequence[i])
|
58cc1d0e952a7f15ff3fca721f43c4c658c41de1
| 3,643,432
|
def read_data_from_device(device, location):
""" Reads text data from device and returns it as output
Args:
location ('str'): Path to the text file
Raises:
FileNotFoundError: File Does not Exist
Returns:
Data ('str'): Text data read from the device
"""
# IMPORTANT
# =========
# This API does not require the device to have network connection
# copy_from_device is the other API that behaves similar to this one,
# but it requires network connection since it uses SCP
try:
return device.execute("cat {}".format(location))
except Exception: # Throw file not found error when encounter generic error
raise FileNotFoundError("File {} does not exist.".format(location))
|
f6895d25f9f9e68ec33bb2d8f693999a7e3a2812
| 3,643,433
|
import os
def get_credentials() -> tuple:
"""Gets bot auth credentials from environment variables defined in the local .env file"""
load_dotenv()
irc_token = os.environ.get('TWITCH_OAUTH_PASS')
client_id = os.environ.get('TWITCH_CLIENT_ID')
channel = os.environ.get('TWITCH_CHANNEL')
return irc_token, client_id, channel
|
df1c8e998d27f4fec7574fb10dd7022b284680f0
| 3,643,434
|
from typing import Dict
def postman_parser(postman_info: dict,
environment_vars: Dict = None) -> APITest:
"""
Get a parser collection, in JSON input format, and parser it
:param postman_info: JSON parsed info from Postman
:type postman_info: dict
:param environment_vars: variables to replace
:type environment_vars: dict
:return: a Postman object
:rtype: APITest
:raise ApitestValueError: when an invalid Postman format was received
"""
assert isinstance(postman_info, dict)
assert len(postman_info) > 0
# Try to find Postman variables in the JSON info from Postman Project
variables_from_postman_file = extract_postman_variables(postman_info)
# If variables was found, replace with the values
if variables_from_postman_file:
if not environment_vars:
raise ApitestMissingDataError(
"The Postman collections need some environment variables. "
"Please specify these variables and try again: "
",".join(x for x in variables_from_postman_file))
else:
postman_info = replace_postman_variables(postman_info,
variables_from_postman_file,
environment_vars)
collections = []
try:
# Get all collections
for collection in postman_info.get("item"):
end_points = []
# Get each end-point
for endpoint in collection.get("item"):
# --------------------------------------------------------------------------
# APITestRequest info
# --------------------------------------------------------------------------
query_info = endpoint.get("request")
# APITestRequest headers
request_headers = []
for header in query_info.get("header"):
request_headers.append(APITestHeader(key=header.get("key"),
value=header.get("value")))
# APITestRequest body
request_body_content_type = from_http_content_type_get_type(request_headers, query_info.get("body").get("mode"))
request_body = APITestBody(content_type=request_body_content_type,
value=from_raw_body_get_python_object(data_type=request_body_content_type,
data=query_info.get("body").get("formdata")))
# Build request
_request_url = query_info.get("url") \
if query_info.get("url").startswith("http") \
else "http://{}".format(query_info.get("url"))
request = APITestRequest(url=_request_url,
method=query_info.get("method"),
headers=request_headers,
body=request_body)
# --------------------------------------------------------------------------
# APITestResponse info
# --------------------------------------------------------------------------
response_list = endpoint.get("response")
responses = []
if response_list:
for response_info in response_list:
# APITestResponse headers
response_headers = []
for header in response_info.get("header"):
response_headers.append(APITestHeader(key=header.get("key"),
value=header.get("value")))
# APITestResponse APITestBody
response_body_content_type = from_http_content_type_get_type(response_headers, None)
response_body = APITestBody(content_type=response_body_content_type,
value=from_raw_body_get_python_object(data_type=response_body_content_type,
data=response_info.get("body")))
# APITestResponse cookie
response_cookies = []
for cookie in response_info.get("cookie"):
response_cookies.append(APITestCookie(expires=cookie.get("expires"),
host_only=cookie.get("hostOnly"),
http_only=cookie.get("httpOnly"),
domain=cookie.get("domain"),
path=cookie.get("path"),
secure=cookie.get("secure"),
session=cookie.get("session"),
value=cookie.get("value")))
# Build response
responses.append(APITestResponse(code=response_info.get("code"),
status=response_info.get("status"),
headers=response_headers,
body=response_body,
cookies=response_cookies))
end_points.append(APITestEndPoint(name=endpoint.get("name"),
description=endpoint.get("description"),
request=request,
response=responses))
collections.append(APITestCollection(name=endpoint.get("name"),
description=endpoint.get("description"),
end_points=end_points))
except Exception as exc:
raise ApitestInvalidFormatError from exc
data = APITest(title=postman_info.get("info").get("name"),
description=postman_info.get("info").get("description"),
collections=collections)
return data
|
1e3c351c3b7ee37d438edeb9e64e70d67b45e1b9
| 3,643,435
|
def allOPT2 (routes, dists, maxtime=float("inf")):
"""
A simpler way to make the 2-OPT optimization on all
the provided routes.
:param routes: The routes to optimize.
:param dists: The matrix of distances.
:param maxtime: The maximum time the optimization can go on.
:return: The optimised routes and the overall respective cost.
"""
optimized_routes = [None] * len(routes)
total_cost = 0
for i, route in enumerate(routes):
oproute, cost = OPT2(route, dists, maxtime)
optimized_routes[i] = oproute
total_cost += cost
return optimized_routes, total_cost
|
ec7a2e337371cf806b7fa32661185b7400e774a0
| 3,643,436
|
def getScoreByName(name):
"""
This function will search for the name and
will, if found, return the scores
"""
for idx, val in enumerate(names):
if val == name:
return scores[idx]
|
77074b360c2e35ae30053e1b00b3270166f27ada
| 3,643,437
|
def count_dict(dict_):
"""
Count how many levels the dict has
"""
if not isinstance(dict_, dict):
raise Dict_Exception("dict_ must be a dict")
return max(count_dict(v) if isinstance(v, dict) else 0 for v in dict_.values()) + 1
|
b608469d67f050b366cb5b97a7d686bdf8347616
| 3,643,438
|
def __draw_tick_labels(scales, chart_height, chart_width):
"""Draws the numbers in both axes."""
axis_values = [0, 0.25, 0.5, 0.75, 1]
axis_df = pd.DataFrame({"main_axis_values": axis_values, "aux_axis_position": 0})
x_tick_labels = (
alt.Chart(axis_df)
.mark_text(
yOffset=Scatter_Axis.label_font_size * 1.5,
tooltip="",
align="center",
fontSize=Scatter_Axis.label_font_size,
color=Scatter_Axis.label_color,
fontWeight=Scatter_Axis.label_font_weight,
font=FONT,
)
.encode(
text=alt.Text("main_axis_values:Q"),
x=alt.X("main_axis_values:Q", scale=scales["x"], axis=no_axis()),
y=alt.Y("aux_axis_position:Q", scale=scales["y"], axis=no_axis()),
)
)
axis_df.drop(0, inplace=True)
y_tick_labels = (
alt.Chart(axis_df)
.mark_text(
baseline="middle",
xOffset=-Scatter_Axis.label_font_size * 1.5,
tooltip="",
align="center",
fontSize=Scatter_Axis.label_font_size,
fontWeight=Scatter_Axis.label_font_weight,
color=Scatter_Axis.label_color,
font=FONT,
)
.encode(
text=alt.Text("main_axis_values:Q"),
x=alt.X("aux_axis_position:Q", scale=scales["x"], axis=no_axis()),
y=alt.Y("main_axis_values:Q", scale=scales["y"], axis=no_axis()),
)
)
return x_tick_labels + y_tick_labels
|
85107e3255953af667e43374927299a5a55b6809
| 3,643,439
|
import os
def HIP_to_HD(name):
"""Convert an HIP name in *Hipparcos Catalogue* to HD name in *Henry Draper
Catalogue*.
Args:
name (str or int): Name of star in *Hipparcos Catalogue*.
"""
hip = _get_HIP_number(name)
filename = os.path.join(xindex_path, 'HIP-HD.csv')
f1 = lambda row: int(row.split(',')[0])
f2 = lambda row: 'HD '+row.split(',')[1].strip()
if hip<100:
HDname = find_sortedfile(hip, filename, f1, f2)
else:
HDname = quickfind_sortedfile(hip, filename, f1, f2)
if HDname == None:
return None
else:
return [HDname]
|
a1a7d965ae93043a1b1905a4396484dc1005de99
| 3,643,440
|
import torch
import sys
from io import StringIO
def convert_torchscript_module_to_torch_backend_contract_mlir(program: torch.nn.Module):
"""Perform common lowering from TorchScript to Torch MLIR
Returns an MLIR module that satisfies the Torch backend contract.
"""
mb = ModuleBuilder()
scripted = torch.jit.script(program)
class_annotator = ClassAnnotator()
extract_annotations(program, scripted, class_annotator)
# TODO: Find a way to make each of these calls own its own
# "debuggable error report" situation.
try:
original_stderr = sys.stderr
sys.stderr = StringIO()
# Import the TorchScript module to MLIR
mb.import_module(scripted._c, class_annotator)
except Exception as e:
raise Exception(f"""
PyTorch TorchScript module -> torch-mlir Object Graph IR import failed with:
Exception:
{e}
Diagnostics:
{sys.stderr.getvalue()}
""") from None
finally:
sys.stderr = original_stderr
run_pipeline_with_repro_report(
mb.module,
"torchscript-module-to-torch-backend-pipeline",
"Lowering TorchScript Object Graph IR -> Torch Backend IR")
return mb.module
|
a3e162dd9cea71d7492d626257a81929565dc128
| 3,643,441
|
def thread_profile(D,P,inset,internal=True,base_pad=0.1):
"""ISO thread profile"""
H = P*np.sqrt(3)/2
Dm = D - 2*5*H/8
Dp = D - 2*3*H/8
if internal:
return np.array([
(-P/2,D/2+H/8+base_pad+inset),
(-P/2,D/2+H/8+inset),
(-P/8,Dm/2+inset),
(P/8,Dm/2+inset),
(P/2,D/2+H/8+inset),
(P/2,D/2+H/8+base_pad+inset),
])
else:
return np.array([
(-P/2,Dm/2-H/4-base_pad-inset),
(-P/2,Dm/2-H/4-inset),
(-P/16,D/2-inset),
(P/16,D/2-inset),
(P/2,Dm/2-H/4-inset),
(P/2,Dm/2-H/4-base_pad-inset),
])
|
abea6e4f234f4176a385b3abc2ca6f1de0c93a1b
| 3,643,442
|
import os
import json
from datetime import datetime
def mock_session(monkeypatch, data):
""" Mocked out sqlalchemy session """
if data:
dirname = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(dirname, data)
with open(filename) as data_file:
json_data = json.load(data_file)
predictions = json_data['predictions']
geometry = json_data['geometry']
prediction_model = PredictionModel(
id=1, name='name', abbreviation='abbrev', projection='projection')
def mock_get_session(*args):
mock_session = UnifiedAlchemyMagicMock()
return mock_session
def mock_get_most_recent_model_run(*args) -> PredictionModelRunTimestamp:
timestamp = '2020-01-22T18:00:00+00:00'
return PredictionModelRunTimestamp(id=1,
prediction_model=prediction_model,
prediction_run_timestamp=datetime.fromisoformat(timestamp))
def mock_get_model_run_predictions(*args):
shape = shapely.wkt.loads(geometry)
grid = PredictionModelGridSubset(
id=1,
prediction_model_id=prediction_model.id,
prediction_model=prediction_model,
geom=from_shape(shape)
)
result = []
for prediction in predictions:
prediction['prediction_timestamp'] = datetime.fromisoformat(
prediction['prediction_timestamp'])
result.append(
(grid, ModelRunGridSubsetPrediction(**prediction)))
return result
monkeypatch.setattr(app.db.database, 'get_session', mock_get_session)
monkeypatch.setattr(app.db.crud, 'get_most_recent_model_run',
mock_get_most_recent_model_run)
monkeypatch.setattr(app.db.crud, 'get_model_run_predictions',
mock_get_model_run_predictions)
|
d855097df6e4f472027958fa5cfb2600b7d12589
| 3,643,443
|
def get_service(hass, config, discovery_info=None):
"""Get the HipChat notification service."""
return HipchatNotificationService(
config[CONF_TOKEN],
config[CONF_ROOM],
config[CONF_COLOR],
config[CONF_NOTIFY],
config[CONF_FORMAT],
config[CONF_HOST])
|
1d6b7e5d53084bd91de307a162c4710aac84be24
| 3,643,444
|
from typing import List
def connect_with_interior_or_edge_bulk(
polygon: Polygon, polygon_array: GeometryArray
) -> List[bool]:
"""
Return boolean array with True iff polys overlap in interior/edge, but not corner.
Args:
polygon (Polygon): A shapely Polygon
polygon_array (GeometryArray): The other shapely Polygons in a geopandas
geometry array
Returns:
List[bool]: Boolean array with value True, iff `polygon` and the polygon in
`polygon_array` at the given location overlap in their interior/edge.
"""
patterns = polygon_array.relate(polygon)
return [
de9im_match(pattern, EDGE_ONLY_PATTERN) or de9im_match(pattern, OVERLAP_PATTERN)
for pattern in patterns
]
|
852a43d1782ae85dbb2d2adb70feb59ace7a6a44
| 3,643,445
|
import pickle
def get_history(kmodel=None):
"""
returns a python dict with key = metric_id val = [metric each epoch ]
"""
# get kmodel object from input str if the input is a string
if isinstance(kmodel,str):
try:
kmodel = KModel.objects.get(id=kmodel)
except ObjectDoesNotExist:
# object with name doesn't exist
return None
except ValidationError:
# input string isn't a valid uuid
return None
elif isinstance(kmodel, KModel):
# awesome! proceed
pass
else:
raise ValueError("call get_history with etiher a str uuid for model or a db model instance")
# get the history object and load history
if kmodel.artifacts.filter(descriptor="history").exists():
artifact_path = kmodel.artifacts.get(descriptor="history").path
return pickle.load(open(artifact_path,"rb"))
else:
return None
|
da2886f565ca2f96e49a38b458368de2e4216c01
| 3,643,446
|
def get_neighbor_v4_by_search(search=None):
"""Return a list of NeighborV4's by dict."""
try:
objects = NeighborV4.objects.filter()
search_dict = search if search else dict()
object_map = build_query_to_datatable_v3(objects, search_dict)
except FieldError as e:
raise api_rest_exceptions.ValidationAPIException(str(e))
except Exception as e:
raise api_rest_exceptions.NetworkAPIException(str(e))
else:
return object_map
|
6893c32014d6b2a8871825744a1953024fe3a289
| 3,643,447
|
def load_clean_data():
"""funcion that loads tuberculosis file and preprocesses/cleans the dataframe"""
df = pd.read_csv('tb.csv')
# drop columns 'fu' and 'mu' since they only contain missing values and would mess up the following processing steps
df = df.drop(columns = ['fu', 'mu'])
# define row and column length
initial_rows = len(df.index)
initial_col = len(df.columns)
# melt the gender-age columns of the df
df = pd.melt(df, id_vars=['country', 'year'], var_name='variable', value_name='value')
melted_row = len(df.index)
# assert that (initial col-number - id_var_no) * rows = length of rows afterwards
assert (initial_col - 2)*initial_rows == melted_row
# the column 'variable' needs to be split into two columns 'gender' and 'age', delete column 'variable'
df['gender'] = df.variable.str[0]
df['age'] = df.variable.str[1:3]
df = df.drop(columns = 'variable')
# transform age into an integer
df['age'] = pd.to_numeric(df['age'], errors='coerce')
# transform gender into category in order to store memory
df['gender'] = df['gender'].astype('category')
return df
#print(df.info())
#print(df.head())
#print(df.loc[df['country'] == 'AD'])
# the transformation seems to be correct. The columns age and gender have no missing values (which would have been
# suspicious)
|
2430bb61705f95c77f68eabbcda535e7d0f443ea
| 3,643,448
|
def is_anagram_passphrase(phrase):
"""
Checks whether a phrase contains no words that are anagrams of other words.
>>> is_anagram_passphrase(["abcde", "fghij"])
True
>>> is_anagram_passphrase(["abcde", "xyz", "ecdab"])
False
>>> is_anagram_passphrase(["a", "ab", "abc", "abd", "abf", "abj"])
True
>>> is_anagram_passphrase(["iiii", "oiii", "ooii", "oooi", "oooo"])
True
>>> is_anagram_passphrase(["oiii", "ioii", "iioi", "iiio"])
False
"""
return not any(
any(
first_word == "".join(permutated_word)
for permutated_word in permutations(second_word)
)
for first_word, second_word in combinations(phrase, 2)
)
|
aa7a95cda82317a41d8c4f2765a4706896135f45
| 3,643,449
|
def _client_ip(client):
"""Compatibility layer for Flask<0.12."""
return getattr(client, 'environ_base', {}).get('REMOTE_ADDR')
|
1bd110563c5e7165ec795d16e0f0d7be6d053db1
| 3,643,450
|
def findtrapezoidfunc(
thexvals,
theyvals,
thetoplength,
initguess=None,
debug=False,
minrise=0.0,
maxrise=200.0,
minfall=0.0,
maxfall=200.0,
minstart=-100.0,
maxstart=100.0,
refine=False,
displayplots=False,
):
"""
Parameters
----------
thexvals
theyvals
thetoplength
initguess
debug
minrise
maxrise
minfall
maxfall
minstart
maxstart
refine
displayplots
Returns
-------
"""
# guess at parameters: risestart, riseamplitude, risetime
if initguess is None:
initstart = 0.0
initamp = np.mean(theyvals[-10:-1])
initrisetime = 5.0
initfalltime = 5.0
else:
initstart = initguess[0]
initamp = initguess[1]
initrisetime = initguess[2]
initfalltime = initguess[3]
p0 = np.array([initstart, initamp, initrisetime, initfalltime])
if debug:
for i in range(0, len(theyvals)):
print(thexvals[i], theyvals[i])
plsq, dummy = sp.optimize.leastsq(
trapezoidresiduals, p0, args=(theyvals, thexvals, thetoplength), maxfev=5000
)
# except ValueError:
# return 0.0, 0.0, 0.0, 0
if (
(minrise <= plsq[2] <= maxrise)
and (minfall <= plsq[3] <= maxfall)
and (minstart <= plsq[0] <= maxstart)
):
return plsq[0], plsq[1], plsq[2], plsq[3], 1
else:
return 0.0, 0.0, 0.0, 0.0, 0
|
239f6d7edf2b99e631c7787ed6370685ff897e1d
| 3,643,451
|
def extractRecords(getRecordsResponse):
"""Returns a list of etrees of the individual
records of a getRecords response"""
recs = getRecordsResponse.xpath(
'/csw:GetRecordsResponse/csw:SearchResults//csw:Record',
namespaces={'csw': ns_csw})
return recs
|
3de69fc99f77c4d06346aa82121cc936e16a06b4
| 3,643,452
|
from typing import Set
def tagify(tail=u'', head=u'', sep=u'.'):
"""
Returns namespaced event tag string.
Tag generated by joining with sep the head and tail in that order
head and tail may be a string or a list, tuple, or Set of strings
If head is a list, tuple or Set Then
join with sep all elements of head individually
Else
join in whole as string prefix
If tail is a list, tuple or Set Then
join with sep all elements of tail individually
Else
join in whole as string suffix
If either head or tail is empty then do not exhibit in tag
"""
if isinstance(head, (list, tuple, Set)): # list like so expand
parts = list(head)
else: # string like so put in list
parts = [head]
if isinstance(tail, (list, tuple, Set)): # listlike so extend parts
parts.extend(tail)
else: # string like so append
parts.append(tail)
return sep.join([part for part in parts if part])
|
ddebdc0c4224db428a4338fd1e4c61137ac2d5c5
| 3,643,453
|
def get_fn_data(src_db, fn_table, year=None):
"""Get the data and fields from the query in the src database for the
fish net table specified by fn_table. Returns list of
dictionaries - each element represents a single row returned by the query.
Arguments:
- `src_db`: full path the source database.
- `fn_table`: the name of the stored query that returns the data for
the specified fish net table
"""
if year:
sql = "execute get_{} @yr='{}'".format(fn_table, year)
else:
sql = "execute get_{}".format(fn_table)
constring = "DRIVER={{Microsoft Access Driver (*.mdb, *.accdb)}};DBQ={}"
with pyodbc.connect(constring.format(src_db)) as src_conn:
src_cur = src_conn.cursor()
rs = src_cur.execute(sql)
data = rs.fetchall()
flds = [x[0].lower() for x in src_cur.description]
records = []
for record in data:
records.append({k: v for k, v in zip(flds, record)})
return records
|
60d48e0b7727ccd25e4b91bf59f1f505ddbc3127
| 3,643,454
|
import collections
def convert_example_to_feature(example, tokenizer, max_seq_length=512,
doc_stride=384, max_query_length=125, is_training=True,
cls_token_at_end=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=0, pad_token_segment_id=0,
mask_padding_with_zero=True,
sequence_a_is_doc=False):
"""Convert a single QuacExample to features (model input)"""
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[-max_query_length:]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
assert max_tokens_for_doc >= 384, max_tokens_for_doc
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
# we set the doc_stride to 384, which is the max length of evidence text,
# meaning that each evidence has exactly one _DocSpan
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
assert len(doc_spans) == 1, (max_tokens_for_doc, example)
# if len(doc_spans) > 1:
# print(len(doc_spans), example)
# doc_spans = [doc_spans[0]]
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# Original TF implem also keep the classification token (set to 0) (not sure why...)
p_mask = []
# CLS token at the beginning
if not cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = 0
# XLNet: P SEP Q SEP CLS
# Others: CLS Q SEP P SEP
if not sequence_a_is_doc:
# Query
tokens += query_tokens
segment_ids += [sequence_a_segment_id] * len(query_tokens)
p_mask += [1] * len(query_tokens)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
# Paragraph
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
if not sequence_a_is_doc:
segment_ids.append(sequence_b_segment_id)
else:
segment_ids.append(sequence_a_segment_id)
p_mask.append(0)
paragraph_len = doc_span.length
if sequence_a_is_doc:
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
tokens += query_tokens
segment_ids += [sequence_b_segment_id] * len(query_tokens)
p_mask += [1] * len(query_tokens)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_b_segment_id)
p_mask.append(1)
# CLS token at the end
if cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = len(tokens) - 1 # Index of classification token
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(pad_token)
input_mask.append(0 if mask_padding_with_zero else 1)
segment_ids.append(pad_token_segment_id)
p_mask.append(1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
span_is_impossible = example.is_impossible
start_position = None
end_position = None
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
span_is_impossible = True
else:
if sequence_a_is_doc:
doc_offset = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and span_is_impossible:
start_position = cls_index
end_position = cls_index
if False:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (example.example_id))
logger.info("example_id: %s" % (example.example_id))
logger.info("qid of the example: %s" % (example.qas_id))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(tokens))
logger.info("token_to_orig_map: %s" % " ".join([
"%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in token_is_max_context.items()
]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and span_is_impossible:
logger.info("impossible example")
if is_training and not span_is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
logger.info("start_position: %d" % (start_position))
logger.info("end_position: %d" % (end_position))
logger.info("retrieval_label: %d" % (example.retrieval_label))
logger.info(
"answer: %s" % (answer_text))
feature = InputFeatures(
unique_id=example.example_id,
example_id=example.example_id,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
cls_index=cls_index,
p_mask=p_mask,
paragraph_len=paragraph_len,
start_position=start_position,
end_position=end_position,
is_impossible=span_is_impossible,
retrieval_label=example.retrieval_label)
return feature
|
1db90a014da443e143411276cbbf0e29a9872b7f
| 3,643,455
|
def make_pickle(golfed=False):
"""Returns the pickle-quine.
If "golfed" is true, we return the minimized version; if false we return
the one that's easier to understand.
"""
part_1 = b''.join(PART_1)
part_2 = b''.join(GOLFED_PART_2 if golfed else PART_2)
# We tack the length onto part 1:
length = len(part_1) + 1 + len(part_2)
part_1 = part_1 + b'%c' % length
# Now glue everything together.
the_string = part_1 + part_2
return part_1 + the_string + part_2
|
79fdc182ef3487090a8acd61c44b46d4b7cc5493
| 3,643,456
|
def classify_design_space(action: str) -> int:
"""
The returning index corresponds to the list stored in "count":
[sketching, 3D features, mating, visualizing, browsing, other organizing]
Formulas for each design space action:
sketching = "Add or modify a sketch" + "Copy paste sketch"
3D features = "Commit add or edit of part studio feature" + "Delete part studio feature"
- "Add or modify a sketch"
mating = "Add assembly feature" + "Delete assembly feature" + "Add assembly instance"
+ "Delete assembly instance"
visualizing = "Start assembly drag" + "Animate action called"
browsing = Opening a tab + Creating a tab + Deleting a tab + Renaming a tab
other organizing = "Create version" + "Cancel Operation" + "Undo Redo Operation"
+ "Merge branch" + "Branch workspace" + "Update version"
:param action: the action to be classified
:return: the index of the action type that this action is accounted for; if the action does not
belong to any category, return -1
Note: "Add or modify a sketch" is special (+1 for sketching and -1 for 3D features),
return -10
"""
# Creating a sketch is special as it affects both the sketching and the 3D features counts
if action == "Add or modify a sketch":
return -10
# Sketching
elif action == "Copy paste sketch":
return 0
# 3D features
elif action in ["Commit add or edit of part studio feature",
"Delete part studio feature"]:
return 1
# Mating
elif action in ["Add assembly feature", "Delete assembly feature", "Add assembly instance"
"Delete assembly instance"]:
return 2
# Visualizing
elif action in ["Start assembly drag", "Animate action called"]:
return 3
# Browsing
elif "Tab" in action and ("opened" in action or "created" in action or "deleted" in action or
"renamed" in action):
return 4
# Other organizing
elif action in ["Create version", "Cancel Operation", "Undo Redo Operation", "Merge branch",
"Branch workspace", "Update version"]:
return 5
# Not classified (Optional: print out the unclassified actions)
else:
return -1
|
22dc68aa23258691b0d4b9f1b27a9e8451b275d9
| 3,643,457
|
import hashlib
def get_sha256_hash(plaintext):
"""
Hashes an object using SHA256. Usually used to generate hash of chat ID for lookup
Parameters
----------
plaintext: int or str
Item to hash
Returns
-------
str
Hash of the item
"""
hasher = hashlib.sha256()
string_to_hash = str(plaintext)
hasher.update(string_to_hash.encode('utf-8'))
hash = hasher.hexdigest()
return hash
|
79735973b8ad73823662cc428513ef393952b681
| 3,643,458
|
def get_bit_coords(dtype_size):
"""Get coordinates for bits assuming float dtypes."""
if dtype_size == 16:
coords = (
["±"]
+ [f"e{int(i)}" for i in range(1, 6)]
+ [f"m{int(i-5)}" for i in range(6, 16)]
)
elif dtype_size == 32:
coords = (
["±"]
+ [f"e{int(i)}" for i in range(1, 9)]
+ [f"m{int(i-8)}" for i in range(9, 32)]
)
elif dtype_size == 64:
coords = (
["±"]
+ [f"e{int(i)}" for i in range(1, 12)]
+ [f"m{int(i-11)}" for i in range(12, 64)]
)
else:
raise ValueError(f"dtype of size {dtype_size} neither known nor implemented.")
return coords
|
6400017e47506613cf15162425843ce2b19eed3e
| 3,643,459
|
from klpyastro.utils import obstable
def create_record(user_inputs):
"""
Create a ObsRecord from the informations gathered from the users.
:param user_inputs: Dictionary with all the values (as strings) required
to fully populate a ObsRecord object.
:type user_inputs: dict
:rtype: ObsRecord object
"""
record = obstable.ObsRecord()
record.targetname = user_inputs['targetname']
record.rootname = user_inputs['rootname']
record.band = user_inputs['band']
record.grism = user_inputs['grism']
record.datatype = user_inputs['datatype']
record.applyto = user_inputs['applyto']
record.filerange = user_inputs['filerange']
record.exptime = float(user_inputs['exptime'])
record.lnrs = int(user_inputs['lnrs'])
record.rdmode = user_inputs['rdmode']
return record
|
8fc1a31a24ac7663b405074410d1c025fbcd7d62
| 3,643,460
|
import itertools
def best_wild_hand(hand):
"""best_hand но с джокерами"""
non_jokers = list(filter(lambda x: x[0] != '?', hand))
jokers = filter(lambda x: x[0] == '?', hand)
jokers_variations = itertools.product(
*[joker_variations(joker) for joker in jokers]
)
best_hands = []
for variations in jokers_variations:
full_hand = itertools.chain(variations, non_jokers)
best_hands.append(best_hand(full_hand))
return max((hand_rank(h), h) for h in best_hands)[1]
|
86cb58dba0338c481ce516657118cdc20260ebf3
| 3,643,461
|
def GetTestMetadata(test_metadata_file=FAAS_ROOT+"/synthetic_workload_invoker/test_metadata.out"):
"""
Returns the test start time from the output log of SWI.
"""
test_start_time = None
with open(test_metadata_file) as f:
lines = f.readlines()
test_start_time = lines[0]
config_file = lines[1]
invoked_actions = int(lines[2][:-1])
print('Invocations by Workload Invoker: ' + str(invoked_actions))
try:
return int(test_start_time[:-1]), config_file[:-1]
except:
logger.error("Error reading the test metadata!")
return None, None
|
668e214452bb100885a8631b5d900eb7ca90e43b
| 3,643,462
|
import torch
def gen_geo(num_nodes, theta, lambd, source, target, cutoff, seed=None):
"""Generates a random graph with threshold theta consisting of 'num_nodes'
and paths with maximum length 'cutoff' between 'source' adn target.
Parameters
----------
num_nodes : int
Number of nodes.
theta : float
Threshold of graph.
lambd : float
Weights of graph are generated randomly from exp(lambd) distribution.
source : int
Origin of path. Must be in range(0, num_nodes).
target : int
Destination of path. Must be in range(0, num_nodes).
cutoff : int
Maximum path length.
seed : int
Set random seed if not None.
Returns
-------
object of type graph
Generated graph.
"""
file_name = './saved_items/graph_N' + str(num_nodes) + '_cutoff' + str(cutoff)
if seed != None:
np.random.seed(seed)
rand.seed(seed)
torch.manual_seed(seed)
weights = { node: rand.expovariate(lambd) for node in range(num_nodes)}
graph = geo_thresh(num_nodes, theta, weight=weights)
for (ni, nj) in graph.edges():
graph.edges[ni,nj]['weight'] = weights[ni] + weights[nj]
plt.figure(figsize=(10,5))
nx.draw(graph, with_labels=True, font_weight='bold')
plt.savefig('./figures/graph_N' + str(num_nodes) + str(".png"), dpi=500)
plt.show()
save_obj(graph, file_name)
paths = nx.all_simple_paths(graph, source=source, target=target, cutoff=cutoff)
paths = list(paths)
save_obj(paths, file_name + '_paths')
print('Paths length: ', len(paths))
return graph
|
5d3363aab4e13dd8690277453f603fe707c00d41
| 3,643,463
|
import re
def FilterExceptions(image_name, errors):
"""Filter out the Application Verifier errors that have exceptions."""
exceptions = _EXCEPTIONS.get(image_name, [])
def _HasNoException(error):
# Iterate over all the exceptions.
for (severity, layer, stopcode, regexp) in exceptions:
# And see if they match, first by type.
if (error.severity == severity and
error.layer == layer and
error.stopcode == stopcode):
# And then by regexpr match to the trace symbols.
for trace in error.trace:
if trace.symbol and re.match(regexp, trace.symbol):
return False
return True
filtered_errors = filter(_HasNoException, errors)
error_count = len(filtered_errors)
filtered_count = len(errors) - error_count
if error_count:
suffix = '' if error_count == 1 else 's'
filtered_errors.append(
'Error: Encountered %d AppVerifier exception%s for %s.' %
(error_count, suffix, image_name))
if filtered_count:
suffix1 = '' if filtered_count == 1 else 's'
suffix2 = '' if len(exceptions) == 1 else 's'
filtered_errors.append(
'Warning: Filtered %d AppVerifier exception%s for %s using %d rule%s.' %
(filtered_count, suffix1, image_name, len(exceptions), suffix2))
return (error_count, filtered_errors)
|
37b5febe4da731a426c2cd3ef9d6aeb1f28a802c
| 3,643,464
|
from typing import Optional
def dim(text: str, reset_style: Optional[bool] = True) -> str:
"""Return text in dim"""
return set_mode("dim", False) + text + (reset() if reset_style else "")
|
cb180649913760b71b2857b61e264b6a17207433
| 3,643,465
|
import sys
import os
def want_color_output():
"""Return ``True`` if colored output is possible/requested and not running in GUI.
Colored output can be explicitly requested by setting :envvar:`COCOTB_ANSI_OUTPUT` to ``1``.
"""
want_color = sys.stdout.isatty() # default to color for TTYs
if os.getenv("NO_COLOR") is not None:
want_color = False
if os.getenv("COCOTB_ANSI_OUTPUT", default="0") == "1":
want_color = True
if os.getenv("GUI", default="0") == "1":
want_color = False
return want_color
|
bda881ef70cfdb9bbb1eb1b81958f837f8bd92ed
| 3,643,466
|
def jaccard(list1, list2):
"""calculates Jaccard distance from two networks\n
| Arguments:
| :-
| list1 (list or networkx graph): list containing objects to compare
| list2 (list or networkx graph): list containing objects to compare\n
| Returns:
| :-
| Returns Jaccard distance between list1 and list2
"""
intersection = len(list(set(list1).intersection(list2)))
union = (len(list1) + len(list2)) - intersection
return 1- float(intersection) / union
|
1056c3d5a592bea9a575c24e947a91968b931000
| 3,643,467
|
def default_argument_preprocessor(args):
"""Return unmodified args and an empty dict for extras"""
extras = {}
return args, extras
|
2031dde70dbe54beb933e744e711a0bf8ecaed99
| 3,643,468
|
from typing import List
from typing import Tuple
import os
def expand_site_packages(site_packages: List[str]) -> Tuple[List[str], List[str]]:
"""Expands .pth imports in site-packages directories"""
egg_dirs: List[str] = []
for dir in site_packages:
if not os.path.isdir(dir):
continue
pth_filenames = sorted(name for name in os.listdir(dir) if name.endswith(".pth"))
for pth_filename in pth_filenames:
egg_dirs.extend(_parse_pth_file(dir, pth_filename))
return egg_dirs, site_packages
|
065c08f772440e2c2626fb785e4b89197fbf429c
| 3,643,469
|
import random
def early_anomaly(case: pd.DataFrame) -> pd.DataFrame:
"""
A sequence of 2 or fewer events executed too early, which is then skipped later in the case
Parameters
-----------------------
case: pd.DataFrame,
Case to apply anomaly
Returns
-----------------------
Case with the applied early anomaly
"""
case = case.reset_index(drop=True)
timestamps = case['timestamp']
sequence_size = random.choice([1, 2])
if sequence_size == 1:
original_position = random.choice(range(1, len(case)))
activities = case.iloc[[original_position]]
case = case.drop(original_position)
if original_position == 1:
anomaly_position = 0
else:
anomaly_position = random.choice(range(0, original_position-1))
description = activities['activity'].values[0] + ' was originally executed at position ' + str(original_position+1) + ' and changed to position ' + str(anomaly_position+1)
else:
original_position = random.choice(range(1, len(case)-1))
activities = case.iloc[original_position:original_position+2]
case = case.drop([original_position, original_position+1])
if original_position == 1:
anomaly_position = 0
else:
anomaly_position = random.choice(range(0, original_position-1))
description = activities['activity'].values[0] + ' and ' + activities['activity'].values[1] + ' were originally executed at positions ' + str(original_position+1) + ' and ' + str(original_position+2) + ' and changed to positions ' + str(anomaly_position+1) + ' and ' + str(anomaly_position+2)
case = pd.concat([case.iloc[:anomaly_position], activities, case.iloc[anomaly_position:]], sort=False).reset_index(drop=True)
case['timestamp'] = timestamps
case['label'] = 'early'
case['description'] = description
return case
|
0c5f0b0fb3336331737bd9f80712176476110ac9
| 3,643,470
|
import os
from sys import version
def get_package_version():
"""
:returns: package version without importing it.
"""
base = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(base, "gotalk/__init__.py")) as initf:
for line in initf:
m = version.match(line.strip())
if not m:
continue
return ".".join(m.groups()[0].split(", "))
|
8dafe13f12d9c4c50139250aacef92ab406bfe76
| 3,643,471
|
def parse_cmd(script, *args):
"""Returns a one line version of a bat script
"""
if args:
raise Exception('Args for cmd not implemented')
# http://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/cmd.mspx?mfr=true
oneline_cmd = '&&'.join(script.split('\n'))
oneline_cmd = 'cmd.exe /c "%s"' % oneline_cmd
return oneline_cmd
|
b3355b20af2ca1ab2e996643ae0918a2d387760f
| 3,643,472
|
def expected_inheritance(variant_obj):
"""Gather information from common gene information."""
manual_models = set()
for gene in variant_obj.get('genes', []):
manual_models.update(gene.get('manual_inheritance', []))
return list(manual_models)
|
29bf223249e29942803cef8468dbd8bd04979e81
| 3,643,473
|
def player_stats_game(data) -> defaultdict:
"""Individual Game stat parser. Directs parsing to the proper
player parser (goalie or skater).
Receives the player_id branch.
Url.GAME
Args:
data (dict): dict representing JSON object.
Returns:
defaultdict: Parsed Data.
"""
# if the stats dict is empty it means they're scratched
if not data['stats']:
return None
if data['position']['abbreviation'] == 'G':
return goalie_stats_game(data['stats']['goalieStats'])
else:
return skater_stats_game(data['stats']['skaterStats'])
|
e39e4e9fb4a3d06421639e9466d29724318484ef
| 3,643,474
|
def about(request):
"""
View function for about page
"""
return render(
request,
'about.html',
)
|
5bf7a52de1218718041ec7a05a749c623e19074e
| 3,643,475
|
def getTimeDeltaFromDbStr(timeStr: str) -> dt.timedelta:
"""Convert db time string in reporting software to time delta object
Args:
timeStr (str): The string that represents time, like 14:25 or 15:23:45
Returns:
dt.timedelta: time delta that has hours and minutes components
"""
if pd.isnull(timeStr):
return dt.timedelta(seconds=0)
elif not(':' in timeStr):
print('could parse time string {0}'.format(timeStr))
return dt.timedelta(seconds=0)
else:
try:
timeSegs = timeStr.split(':')
timeSegs = timeSegs[0:2]
return dt.timedelta(hours=int(timeSegs[0]), minutes=int(timeSegs[1]))
except:
print('could parse time string {0}'.format(timeStr))
return dt.timedelta(seconds=0)
|
66a78192e6cbe5240a9131c2b18e4a42187a6024
| 3,643,476
|
def colorBool(v) -> str:
"""Convert True to 'True' in green and False to 'False' in red
"""
if v:
return colored(str(v),"green")
else:
return colored(str(v),"red")
|
8c196bccc5bb1970cc752a495117bcc74ed4f8f6
| 3,643,477
|
from typing import List
def bootstrap(
tokens: List[str],
measure: str = "type_token_ratio",
window_size: int = 3,
ci: bool = False,
raw=False,
):
"""calculate bootstrap for lex diversity measures
as explained in Evert et al. 2017. if measure='type_token_ratio'
it calculates standardized type-token ratio
:param ci: additionally calculate and return the confidence interval
returns a tuple
:param raw: return the raw results
"""
results = []
measures = dict(
type_token_ratio=type_token_ratio,
guiraud_r=guiraud_r,
herdan_c=herdan_c,
dugast_k=dugast_k,
maas_a2=maas_a2,
dugast_u=dugast_u,
tuldava_ln=tuldava_ln,
brunet_w=brunet_w,
cttr=cttr,
summer_s=summer_s,
sichel_s=sichel_s,
michea_m=michea_m,
honore_h=honore_h,
entropy=entropy,
yule_k=yule_k,
simpson_d=simpson_d,
herdan_vm=herdan_vm,
hdd=hdd,
orlov_z=orlov_z,
mtld=mtld,
)
# tl_vs: txt_len, vocab_size
# vs_fs: vocab_size, freq_spectrum
# tl_vs_fs: txt_len, vocab_size, freq_spectrum
# tl_fs: txt_len, freq_spectrum
# t: tokens
classes = dict(
tl_vs=(
"type_token_ratio",
"guiraud_r",
"herdan_c",
"dugast_k",
"maas_a2",
"dugast_u",
"tuldava_ln",
"brunet_w",
"cttr",
"summer_s",
),
vs_fs=("sichel_s", "michea_m"),
tl_vs_fs=("honore_h", "herdan_vm", "orlov_z"),
tl_fs=("entropy", "yule_k", "simpson_d", "hdd"),
t=("mtld",),
)
measure_to_class = {m: c for c, v in classes.items() for m in v}
func = measures[measure]
cls = measure_to_class[measure]
for i in range(int(len(tokens) / window_size)):
chunk = tokens[i * window_size : (i * window_size) + window_size]
txt_len, vocab_size, freq_spectrum = preprocess(chunk, fs=True)
if cls == "tl_vs":
result = func(txt_len, vocab_size)
elif cls == "vs_fs":
result = func(vocab_size, freq_spectrum)
elif cls == "tl_vs_fs":
result = func(txt_len, vocab_size, freq_spectrum)
elif cls == "tl_fs":
result = func(txt_len, freq_spectrum)
elif cls == "t":
result = func(chunk)
results.append(result)
if raw:
return results
if ci:
return (np.mean(results), _sttr_ci(results))
return np.mean(results)
|
d86cff5edd61698b1adee14a5c2fb800b4b76608
| 3,643,478
|
def by_label(move_data, value, label_name, filter_out=False, inplace=False):
"""
Filters trajectories points according to specified value and collum label.
Parameters
----------
move_data : dataframe
The input trajectory data
value : The type_ of the feature values to be use to filter the trajectories
Specifies the value used to filter the trajectories points
label_name : String
Specifes the label of the column used in the filtering
filter_out : boolean, optional(false by default)
If set to True, it will return trajectory points with feature value different from the value
specified in the parameters
The trajectories points with the same feature value as the one especifed in the parameters.
inplace : boolean, optional(false by default)
if set to true the original dataframe will be altered to contain the result of the filtering,
otherwise a copy will be returned.
Returns
-------
move_data : dataframe or None
Returns dataframe with trajectories points filtered by label.
"""
try:
filter_ = move_data[label_name] == value
if filter_out:
filter_ = ~filter_
return move_data.drop(index=move_data[~filter_].index, inplace=inplace)
except Exception as e:
raise e
|
3d772f741539009b756744539f4a524e6ad402ea
| 3,643,479
|
import numpy
def make_pyrimidine(residue, height = 0.4, scale = 1.2):
"""Creates vertices and normals for pyrimidines:Thymine Uracil Cytosine"""
atoms = residue.atoms
names = [name.split("@")[0] for name in atoms.name]
idx=names.index('N1'); N1 = numpy.array(atoms[idx].coords)
idx=names.index('C2'); C2 = numpy.array(atoms[idx].coords)
idx=names.index('N3'); N3 = numpy.array(atoms[idx].coords)
idx=names.index('C4'); C4 = numpy.array(atoms[idx].coords)
idx=names.index('C5'); C5 = numpy.array(atoms[idx].coords)
idx=names.index('C6'); C6 = numpy.array(atoms[idx].coords)
N1_C2 = C2-N1
N1_C6 = C6-N1
C2_C6 = height*norm(C6-C2)
normal = height*numpy.array(crossProduct(N1_C2, N1_C6, normal=True))
center = (N1+C2+N3+C4+C5+C6)/6.0
vertices = numpy.zeros((14,3), float)
vertices[0] = scale*(C2 - normal - center) + center
vertices[1] = scale*(N3 - normal - center) + center
vertices[2] = scale*(C4 - normal - center) + center
vertices[3] = scale*(C5 - normal - center) + center
vertices[4] = scale*(C6 - normal - center) + center
vertices[5] = scale*(C2 + normal - center) + center
vertices[6] = scale*(N3 + normal - center) + center
vertices[7] = scale*(C4 + normal - center) + center
vertices[8] = scale*(C5 + normal - center) + center
vertices[9] = scale*(C6 + normal - center) + center
vertices[10] = scale*(N1 - C2_C6 - normal - center) + center
vertices[11] = scale*(N1 - C2_C6 + normal - center) + center
vertices[12] = scale*(N1 + C2_C6 + normal - center) + center
vertices[13] = scale*(N1 + C2_C6 - normal - center) + center
faces = numpy.array([[13,4,3,2,1,0,10],
[11,5,6,7,8,9,12],
[0,5,11,10,10,10,10], [1,6,5,0,0,0,0,], [2,7,6,1,1,1,1],
[3,8,7,2,2,2,2], [4,9,8,3,3,3,3], [13,12,9,4,4,4,4]])
return vertices, faces
|
eac8e9bd0cc6abeefa5b8a6bad299ff6a0c6b9d8
| 3,643,480
|
def get_props(filepath, m_co2=22, m_poly=2700/123, N_A=6.022E23,
sigma_co2=2.79E-8, sort=False):
"""
Computes important physical properties from the dft.input file, such as
density of CO2 in the CO2-rich phase, solubility of CO2 in the polyol-rich
phase, and specific volume of the polyol-rich phase.
The dft.input file is structured as:
p \t gsrho1b \t gsrho1a \t 10^-gsrho2b \t gsrho2a.
PARAMETERS
----------
filepath : string
Filepath to file containing densities and pressures (usually dft.input)
m_co2 : float
mass of one bead of CO2 in PC-SAFT model [amu/bead] (= Mw / N)
m_poly : float
mass of one bead of polyol in PC-SAFT model [amu/bead] (= Mw / N)
N_A : float
Avogadro's number (molecules per mol)
sigma_co2 : float
sigma parameter for co2 [cm]
sort : bool
If True, sorts solubility data in terms of increasing pressure
RETURNS
-------
p : list of floats
pressures corresponding to the solubilities [MPa]
props : tuple of lists of floats
Tuple of physical properties calculated (lists of floats):
rho_co2 : density of CO2 in CO2-rich phase [g/mL]
solub : solubility of CO2 in polyol-rich phase [w/w]
spec_vol : specific volume of polyol-rich phase [mL/g]
"""
# loads data
data = np.genfromtxt(filepath, delimiter='\t')
# extracts pressure [MPa] from first column
p = data[:,0]
# extracts the density of CO2 in the co2-rich phase [beads/sigma^3]
rho_co2_v = data[:,1]
# extracts the density of CO2 in the polyol-rich phase [beads/sigma^3]
rho_co2_l = data[:,2]
# extracts the density of polyol in the polyol-rich phase [beads/sigma^3]
rho_poly_l = data[:,4]
# conversions from beads/sigma^3 to g/mL
conv_co2 = m_co2/N_A/sigma_co2**3
conv_poly = m_poly/N_A/sigma_co2**3
# computes density of CO2 in the CO2-rich phase [g/mL]
rho_co2 = rho_co2_v*conv_co2
# computes solubility of CO2 in the polyol-rich phase [w/w]
solub = rho_co2_l*conv_co2 / (rho_co2_l*conv_co2 + rho_poly_l*conv_poly)
# computes specific volume of the polyol-rich phase [mL/g]
spec_vol = 1 / (rho_co2_l*conv_co2 + rho_poly_l*conv_poly)
# sorts data if requested
if sort:
inds_sort = np.argsort(p)
p = p[inds_sort]
rho_co2 = rho_co2[inds_sort]
solub = solub[inds_sort]
spec_vol = spec_vol[inds_sort]
props = (rho_co2, solub, spec_vol)
return p, props
|
2aec573795a40c6c95e19ea9ae531abca47128e8
| 3,643,481
|
def get_genotype(chrom, rsid):
"""
"""
geno_path = ('/home/hsuj/lustre/geno/'
'CCF_1000G_Aug2013_Chr{0}.dose.double.ATB.RNASeq_MEQTL.txt')
geno_gen = pd.read_csv(geno_path.format(str(chrom)),
sep=" ", chunksize = 10000)
for i in geno_gen:
if rsid in i.index:
break
else: pass
return(i)
|
6269aace777e5870e827152158ab70b73a44f401
| 3,643,482
|
import time
def task_dosomething(storage):
"""
Task that gets launched to handle something in the background until it is completed and then terminates. Note that
this task doesn't return until it is finished, so it won't be listening for Threadify pause or kill requests.
"""
# An important task that we want to run in the background.
for i in range(10):
print(i, end="")
time.sleep(1)
return False
|
9eabf3977c53932de8d775c21e4a1209003e0892
| 3,643,483
|
def highway(input_, size, num_layers=1, bias=-2.0, f=tf.nn.relu, scope='Highway'):
"""Highway Network (cf. http://arxiv.org/abs/1505.00387).
t = sigmoid(Wy + b)
z = t * g(Wy + b) + (1 - t) * y
where g is nonlinearity, t is transform gate, and (1 - t) is carry gate.
"""
with tf.variable_scope(scope):
for idx in xrange(num_layers):
g = f(linear(input_, size, scope='highway_lin_%d' % idx))
t = tf.sigmoid(linear(input_, size, scope='highway_gate_%d' % idx) + bias)
output = t * g + (1. - t) * input_
input_ = output
return output
|
dd90cd6107d5d69596c18d46bbef990cec8b1112
| 3,643,484
|
import functools
def convert_to_entry(func):
"""Wrapper function for converting dicts of entries to HarEnrty Objects"""
@functools.wraps(func)
def inner(*args, **kwargs):
# Changed to list because tuple does not support item assignment
changed_args = list(args)
# Convert the dict (first argument) to HarEntry
if isinstance(changed_args[0], dict):
changed_args[0] = HarEntry(changed_args[0])
return func(*tuple(changed_args), **kwargs)
return inner
|
a5be9b430a47cb9c0c448e8ba963538fd6a435dc
| 3,643,485
|
def transform(record: dict, key_ref: dict, country_ref: pd.DataFrame, who_coding: pd.DataFrame, no_update_phrase: pd.DataFrame):
"""
Apply transformations to OXCGRT records.
Parameters
----------
record : dict
Input record.
key_ref : dict
Reference for key mapping.
country_ref : pd.DataFrame
Reference for WHO accepted country names.
who_coding : pd.DataFrame
Reference for WHO coding.
no_update_phrase : pd.DataFrame
Reference for "no update" phrases.
Returns
-------
dict
Record with transformations applied.
"""
# 1. generator function of new record with correct keys (shared)
new_record = utils.generate_blank_record()
# 2. replace data in new record with data from old record using column
# reference (shared)
record = utils.apply_key_map(new_record, record, key_ref)
# 3. Assign unique ID (shared)
# record = utils.assign_id(record)
if record["prov_measure"] == "H8_Protection of elderly people":
return None
# 4. Handle date formatting
record = utils.parse_date(record)
# 8. replace sensitive country names
record = utils.replace_sensitive_regions(record)
# shift areas that should be countries.
record = utils.replace_country(record, 'United States', 'Virgin Islands')
# 7. Make manual country name changes
record = utils.replace_conditional(record, 'country_territory_area', 'Virgin Islands', 'US Virgin Islands')
record = utils.replace_conditional(record, 'country_territory_area', 'United States Virgin Islands', 'US Virgin Islands')
record = utils.replace_conditional(record, 'country_territory_area', 'Eswatini', 'Swaziland')
record = utils.replace_conditional(record, 'country_territory_area', 'South Korea', 'Korea')
# 9. assign ISO code
record['iso'] = countrycode(codes=record['country_territory_area'], origin='country_name', target='iso3c')
# 10. check missing ISO
check.check_missing_iso(record)
# Remove records where there is no data in prov_subcategory
if record['prov_subcategory'] == 0:
return(None)
# Removes information in flag variables for now
record['prov_subcategory'] = int(record['prov_subcategory'])
# 11. Join WHO accepted country names (shared)
record = utils.assign_who_country_name(record, country_ref)
record = financial_measures(record)
# 12. Join who coding from lookup (shared)
record = utils.assign_who_coding(record, who_coding)
# 13. check for missing WHO codes (shared)
check.check_missing_who_code(record)
# 16. Add WHO PHSM admin_level values
record = utils.add_admin_level(record)
record = utils.remove_tags(record)
# 17. Remove update records
record = assign_comment_links(record)
# Filter out records with "no update" phrases
record = label_update_phrase(record, list(no_update_phrase['phrase']))
return(record)
|
2d115f8d64731c5ca88807845d09085b4f07acfd
| 3,643,486
|
def hc_genes(
input_gene_expression: "gene expression data filename (.gct file) where rows are genes and columns are samples",
clustering_type: "single or consensus -- Only single is suported at the moment",
distance_metric: "the function to be used when comparing the distance/similarity of the rows in the "
"input_gene_expression dataset",
file_basename: "the name to use when naming output files" = 'HC_out',
clusters_to_highlight: "how many clusters to highlight in the dendrogram" = None):
"""
Perform hierarchical clustering to group genes with similar expression profile.
:param input_gene_expression: str; gene expression data filename (.gct file)
where rows are genes and columns are samples
:param clustering_type: str; single or consensus
:param distance_metric: str; the function to be used when comparing the distance/similarity of the rows
in the input_gene_expression dataset
:param file_basename: str; the name to use when naming output files
:param clusters_to_highlight: int; how many clusters to highlight in the dendrogram
:return: object; Sklearn's AgglomerativeClustering fitted model
"""
print("Currenty clustering_type is being ignored, only 'single' is supported.")
pwd = '.'
gct_name = input_gene_expression
col_distance_metric = 'No_column_clustering'
output_distances = False
row_distance_metric = distance_metric
clustering_method = 'average'
output_base_name = file_basename
row_normalization = False
col_normalization = False
row_centering = 'Mean'
col_centering = 'Mean'
custom_plot = 'Genes'
show = True
# print("This are the parameters to be used (for debugging purposes)")
# print("""
# pwd = '.'
# gct_name = {gct_name}
# col_distance_metric = {col_distance_metric}
# output_distances = {output_distances}
# row_distance_metric = {row_distance_metric}
# clustering_method = {clustering_method}
# output_base_name = {output_base_name}
# row_normalization = {row_normalization}
# col_normalization = {col_normalization}
# row_centering = {row_centering}
# col_centering = {col_centering}
# """.format(
# gct_name=gct_name, col_distance_metric=col_distance_metric,
# output_distances=str(output_distances),
# row_distance_metric=row_distance_metric, clustering_method=clustering_method,
# output_base_name=output_base_name,
# row_normalization=str(row_normalization), col_normalization=str(col_normalization),
# row_centering=row_centering, col_centering=col_centering
# )
# )
print("Now we will start performing hierarchical clustering, this may take a little while.")
col_model, row_model = HierarchicalClustering(pwd,
gct_name,
col_distance_metric,
row_distance_metric,
clustering_method,
output_base_name,
row_normalization,
col_normalization,
row_centering,
col_centering,
output_distances,
custom_plot,
clusters_to_highlight,
show)
print("Done with Hierarchical Clustering!")
return row_model
|
3c0a6345f21a6387e215e15b72a2e933d2586fd3
| 3,643,487
|
from google.cloud import vision
import io
def detect_text(path):
"""Detects text in the file."""
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
for text in texts:
return text.description
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
|
6dea35d84f538322eed74c9c7c1f9d7a4882dd33
| 3,643,488
|
def file_util_is_ext(path, ext):
"""判断是否指定后缀文件,ext不包含点"""
if file_util_get_ext(path) == ext:
return True
else:
return False
|
27389af32333036b998a421ed35952705092ade6
| 3,643,489
|
def load_tract(repo, tract, patches=None, **kwargs):
"""Merge catalogs from forced-photometry coadds across available filters.
Parameters
--
tract: int
Tract of sky region to load
repo: str
File location of Butler repository+rerun to load.
patches: list of str
List of patches. If not specified, will default to '0,0'--'7,7'.
Returns
--
Pandas DataFrame of merged catalog
"""
butler = Butler(repo)
if patches is None:
# Extract the patches for this tract from the skymap
skymap = butler.get(datasetType='deepCoadd_skyMap')
patches = ['%d,%d' % patch.getIndex() for patch in skymap[tract]]
merged_tract_cat = pd.DataFrame()
for patch in patches:
this_patch_merged_cat = load_patch(butler, tract, patch, **kwargs)
merged_tract_cat.append(this_patch_merged_cat)
return merged_tract_cat
|
f989c947dec96426b15219ab224364c96f65d1fb
| 3,643,490
|
from datetime import datetime
def calculate_delta(arg1, arg2):
"""
Calculates and returns a `datetime.timedelta` object representing
the difference between arg1 and arg2. Arguments must be either both
`datetime.date`, both `datetime.time`, or both `datetime.datetime`.
The difference is absolute, so the order of the arguments doesn't matter.
"""
if arg1 > arg2:
arg1, arg2 = arg2, arg1
if isinstance(arg1, datetime.date) and isinstance(arg1, datetime.date):
return (
datetime.datetime(arg2.year, arg2.month, arg2.day)
- datetime.datetime(arg1.year, arg1.month, arg1.day)
)
if isinstance(arg1, datetime.time) and isinstance(arg1, datetime.time):
return (
datetime.datetime(1, 1, 1, arg2.hour, arg2.minute, arg1.second)
- datetime.datetime(1, 1, 1, arg1.hour, arg1.minute, arg1.second)
)
if isinstance(arg1, datetime.datetime) and isinstance(arg1, datetime.datetime):
return arg2 - arg1
raise TypeError(
f'Cannot calculate delta between values of types '
f'{type(arg1)} and {type(arg2)} because they are not equivalent'
)
|
f6b3f0b86bd73be7d1702ba8893cd70d99b0b321
| 3,643,491
|
import yaml
def create_model_config(model_dir: str, config_path: str = None):
"""Creates a new configuration file in the model directory and returns the config."""
# read the config file
config_content = file_io.read_file_to_string(root_dir(config_path))
# save the config file to the model directory
write_model_config(model_dir, config_content)
# load config
config = yaml.safe_load(config_content)
return config
|
c695ee36b6dec24ef17179adbf40e81aff708082
| 3,643,492
|
def get_deployment_physnet_mtu():
"""Retrieves global physical network MTU setting.
Plugins should use this function to retrieve the MTU set by the operator
that is equal to or less than the MTU of their nodes' physical interfaces.
Note that it is the responsibility of the plugin to deduct the value of
any encapsulation overhead required before advertising it to VMs.
Note that this function depends on the global_physnet_mtu config option
being registered in the global CONF.
:returns: The global_physnet_mtu from the global CONF.
"""
return cfg.CONF.global_physnet_mtu
|
161e7f87e2a68643f81e2b62061d65251a1249de
| 3,643,493
|
def _path(path):
"""Helper to build an OWFS path from a list"""
path = "/" + "/".join(str(x) for x in path)
return path.encode("utf-8") + b"\0"
|
d38937deb459bb9bf393402efc31a90a285d4a6d
| 3,643,494
|
import time
def current_milli_time():
"""Return the current time in milliseconds"""
return int(time.time() * 1000)
|
66605d2e23df2c428c70af75247e2b22a2795363
| 3,643,495
|
def function_exists(function_name, *args, **kwargs):
"""
Checks if a function exists in the catalog
"""
# TODO (dmeister): This creates an SQL injection, but it should not
# be a problem for this purpose.
function_exists_text_count = PSQL.run_sql_command(
"SELECT 'function exists' FROM pg_proc WHERE proname='%s'" % (function_name),
*args, **kwargs).count("function exists")
return function_exists_text_count == 2
|
bf351461b4110349eea1734ef9b482435e946a4e
| 3,643,496
|
from typing import Callable
from typing import Sequence
from typing import Dict
from typing import Optional
def _loo_jackknife(
func: Callable[..., NDArray],
nobs: int,
args: Sequence[ArrayLike],
kwargs: Dict[str, ArrayLike],
extra_kwargs: Optional[Dict[str, ArrayLike]] = None,
) -> NDArray:
"""
Leave one out jackknife estimation
Parameters
----------
func : callable
Function that computes parameters. Called using func(*args, **kwargs)
nobs : int
Number of observation in the data
args : list
List of positional inputs (arrays, Series or DataFrames)
kwargs : dict
List of keyword inputs (arrays, Series or DataFrames)
Returns
-------
ndarray
Array containing the jackknife results where row i corresponds to
leaving observation i out of the sample
"""
results = []
for i in range(nobs):
items = np.r_[0:i, i + 1 : nobs]
args_copy = []
for arg in args:
if isinstance(arg, (pd.Series, pd.DataFrame)):
args_copy.append(arg.iloc[items])
else:
args_copy.append(arg[items])
kwargs_copy = {}
for k, v in kwargs.items():
if isinstance(v, (pd.Series, pd.DataFrame)):
kwargs_copy[k] = v.iloc[items]
else:
kwargs_copy[k] = v[items]
if extra_kwargs is not None:
kwargs_copy.update(extra_kwargs)
results.append(func(*args_copy, **kwargs_copy))
return np.array(results)
|
83e39e97e08ef4d16f2c48a084c5ed40d0fbc0ad
| 3,643,497
|
from Bio.SeqIO.QualityIO import solexa_quality_from_phred
def _fastq_illumina_convert_fastq_solexa(in_handle, out_handle, alphabet=None):
"""Fast Illumina 1.3+ FASTQ to Solexa FASTQ conversion (PRIVATE).
Avoids creating SeqRecord and Seq objects in order to speed up this
conversion.
"""
# Map unexpected chars to null
mapping = "".join([chr(0) for ascii in range(0, 64)] +
[chr(64 + int(round(solexa_quality_from_phred(q))))
for q in range(0, 62 + 1)] +
[chr(0) for ascii in range(127, 256)])
assert len(mapping) == 256
return _fastq_generic(in_handle, out_handle, mapping)
|
06422e23bb005756742207e63ec1d8dc603ba5b2
| 3,643,498
|
def pull_branch(c: InvokeContext, repo: Repo, directory: str, branch_name: str) -> CommandResult:
"""
Change to the repo directory and pull master.
:argument c: InvokeContext
:argument repo: Repo the repo to pull
:argument directory: str the directory to change to
:argument branch_name: str the branch to pull
"""
project_path = _generate_path(directory, repo.folder_name)
cmd = f"cd {project_path} && git checkout {branch_name} && git pull"
return _run_command(c, cmd)
|
5c21bdbbe91f5f82b40645a3449d373f6c464717
| 3,643,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.