content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def start(timeout=5, backlog_reassign_delay=None):
"""Create, start, and return the block pipeline."""
pipeline = create_pipeline(timeout=timeout,
backlog_reassign_delay=backlog_reassign_delay)
pipeline.start()
return pipeline
| 14,500
|
def preprocess_variable_features(features, interaction_augmentation, normalization):
"""
Features preprocessing following Khalil et al. (2016) Learning to Branch in Mixed Integer Programming.
Parameters
----------
features : 2D np.ndarray
The candidate variable features to preprocess.
interaction_augmentation : bool
Whether to augment features with 2-degree interactions (useful for linear models such as SVMs).
normalization : bool
Wether to normalize features in [0, 1] (i.e., query-based normalization).
Returns
-------
variable_features : 2D np.ndarray
The preprocessed variable features.
"""
# 2-degree polynomial feature augmentation
if interaction_augmentation:
interactions = (
np.expand_dims(features, axis=-1) * \
np.expand_dims(features, axis=-2)
).reshape((features.shape[0], -1))
features = np.concatenate([features, interactions], axis=1)
# query-based normalization in [0, 1]
if normalization:
features -= features.min(axis=0, keepdims=True)
max_val = features.max(axis=0, keepdims=True)
max_val[max_val == 0] = 1
features /= max_val
return features
| 14,501
|
def load_dicomdir_records(datasets):
""" If a Data Set is a DICOMDIR Record, replace it by the file it
(or its children) references.
"""
result = []
file_ids = set()
for dataset in datasets :
if "directory_record_type" in dataset : # Directory Record Type
children = get_child_file_records(dataset)
file_ids.update([(child.path, tuple(child.referenced_file_id.value))
for child in children])
else :
result.append(dataset)
for index, (path, file_id) in enumerate(file_ids) :
filename = find_dicomdir_file(os.path.dirname(path), file_id)
result.append(dataset_io.read(filename))
load_dicomdir_records.progress(float(1+index)/float(len(file_ids)))
load_dicomdir_records.progress(1.0)
return result
| 14,502
|
def reverse_inverse_from_cholesky_band_proto(S, l):
"""
S -> L
:param S: sparse subset inverse of banded matrix L
:param l: number of subdiagonals in S
:return: Ls: reconstructed cholesky decomposition
"""
# forward pass
k = l + 1 # bandwidth
n = S.shape[1]
# construct vector e = [1, 0, ..., 0]
V = np.zeros_like(S)
e = np.zeros((k))
e[0] = 1
for i in range(n):
chol_S = np.linalg.cholesky(S[i : i + k, i : i + k])
V[i : i + k, i] = cho_solve((chol_S, True), e[: n - i])
Ls = V / np.sqrt(np.diag(V)[None, :])
return Ls
| 14,503
|
def set_run_state(
collection: Collection,
run_id: str,
task_id: Optional[str] = None,
state: str = 'UNKNOWN',
):
"""Set/update state of run associated with Celery task."""
if not task_id:
document = collection.find_one(
filter={'run_id': run_id},
projection={
'task_id': True,
'_id': False,
}
)
_task_id = document['task_id']
else:
_task_id = task_id
try:
document = db_utils.update_run_state(
collection=collection,
task_id=_task_id,
state=state,
)
except Exception as e:
logger.exception(
(
"Database error. Could not update state of run '{run_id}' "
"(task id: '{task_id}') to state '{state}'. Original error "
"message: {type}: {msg}"
).format(
run_id=run_id,
task_id=_task_id,
state=state,
type=type(e).__name__,
msg=e,
)
)
finally:
if document:
logger.info(
(
"State of run '{run_id}' (task id: '{task_id}') "
"changed to '{state}'."
).format(
run_id=run_id,
task_id=_task_id,
state=state,
)
)
| 14,504
|
def fused_bn_grad_5D_run_2(shape, dtype, eps, kernel_name, attrs):
""" test bnGrad_2 """
def get_expect(dgamma_red_hw, dbeta_red_hw, var, gamma, eps, data_shape):
m = data_shape[0] * data_shape[2] * data_shape[3]
neg_m_rec = -1.0 / m
eps = np.array([eps], dtype=var.dtype).reshape([1] * 5)
neg_m_rec = np.array([neg_m_rec], dtype=var.dtype).reshape([1] * 5)
s = (1.0 / np.sqrt(var + eps)).astype(var.dtype)
dgamma = s * np.sum(dgamma_red_hw, axis=0, keepdims=True)
dbeta = np.sum(dbeta_red_hw, axis=0, keepdims=True)
rs = gamma * s
dgamma_dx = neg_m_rec * rs * s * dgamma
dbeta_dx = neg_m_rec * rs * dbeta
return [dgamma, dbeta, rs, dgamma_dx, dbeta_dx]
shape_nc1c0 = (shape[0], shape[1], 1, 1, shape[4])
shape_c1c0 = (1, shape[1], 1, 1, shape[4])
bng2_shapes = [shape_nc1c0, shape_nc1c0, shape_c1c0, shape_c1c0]
bng2_dtypes = ["float32"] * len(bng2_shapes)
bng2_opattrs = [eps, shape]
# np.random.seed(0)
inputs = [np.random.rand(*s).astype(t) for (s, t) in zip(bng2_shapes, bng2_dtypes)]
out_shapes = [shape_c1c0, shape_c1c0, shape_c1c0, shape_c1c0, shape_c1c0]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(fused_batch_norm_grad_split.fused_bn_grad_2,
bng2_shapes, bng2_dtypes, bng2_opattrs,
kernel_name=kernel_name + "_step2", attrs=attrs, tuning=t)
if t:
outputs = [np.full(s, np.nan, "float32") for s in out_shapes]
expects = get_expect(*inputs, *bng2_opattrs)
return mod, expects, {"args": (*inputs, *outputs), 'outputs': tuple(range(-len(outputs), 0)),
'tuning': False}
else:
return mod
mod = utils.op_build_test(fused_batch_norm_grad_split.fused_bn_grad_2,
bng2_shapes, bng2_dtypes, bng2_opattrs,
kernel_name=kernel_name + "_step2", attrs=attrs)
outputs = [np.full(s, np.nan, "float32") for s in out_shapes]
outputs = list(utils.mod_launch(mod, (*inputs, *outputs), outputs=tuple(range(-len(outputs), 0)),
expect=get_expect(*inputs, *bng2_opattrs)))
expects = get_expect(*inputs, *bng2_opattrs)
rtol, atol = get_rtol_atol("fused_batch_norm_grad", dtype)
results = list(map(lambda x, y: np.allclose(x, y, rtol=rtol, atol=atol), outputs, expects))
print("results", results)
return inputs, outputs, expects, all(results)
| 14,505
|
async def test_set_states(aresponses, v2_server, v2_state_response):
"""Test the ability to set the state of a v2 system."""
v2_state_response["requestedState"] = "away"
v2_server.add(
"api.simplisafe.com",
f"/v1/subscriptions/{TEST_SUBSCRIPTION_ID}/state",
"post",
response=aiohttp.web_response.json_response(v2_state_response, status=200),
)
v2_state_response["requestedState"] = "home"
v2_server.add(
"api.simplisafe.com",
f"/v1/subscriptions/{TEST_SUBSCRIPTION_ID}/state",
"post",
response=aiohttp.web_response.json_response(v2_state_response, status=200),
)
v2_state_response["requestedState"] = "off"
v2_server.add(
"api.simplisafe.com",
f"/v1/subscriptions/{TEST_SUBSCRIPTION_ID}/state",
"post",
response=aiohttp.web_response.json_response(v2_state_response, status=200),
)
async with aiohttp.ClientSession() as session:
simplisafe = await API.async_from_credentials(
TEST_USERNAME, TEST_PASSWORD, session=session
)
await simplisafe.async_verify_2fa_email()
systems = await simplisafe.async_get_systems()
system = systems[TEST_SYSTEM_ID]
await system.async_set_away()
assert system.state == SystemStates.AWAY
await system.async_set_home()
assert system.state == SystemStates.HOME
await system.async_set_off()
assert system.state == SystemStates.OFF
aresponses.assert_plan_strictly_followed()
| 14,506
|
def get_best_distance(pdb_file, reference_point, resname="GRW"):
"""
Finds fragment atom closest to the user-defined reference point.
Parameters
----------
pdb_file : str
Path to PDB file.
reference_point : list[float]
Coordinates of the reference point to which the distance should be calculated.
resname : str
Residue name of the grown fragment, default = "GRW".
Returns
-------
Distance from the closest atom to the reference point and closes atom name.
"""
struct = parser.get_structure("epoch", pdb_file)
ref_vector = numpy.array(reference_point)
best_dist = None
for residue in struct.get_residues():
if residue.resname == resname:
for atom in residue.get_atoms():
atom_vector = numpy.array(atom.get_coord())
dist = numpy.linalg.norm(atom_vector - ref_vector)
if not best_dist or dist < best_dist:
best_atom = atom
best_dist = dist
return best_dist, best_atom
| 14,507
|
def process(
hw_num: int,
problems_to_do: Optional[Iterable[int]] = None,
prefix: Optional[Path] = None,
by_hand: Optional[Iterable[int]] = None,
legacy: bool = False,
) -> None:
"""Process the homework problems in ``prefix`` folder.
Arguments
---------
hw_num
The number of this homework
problems_to_do, optional
A list of the problems to be processed
prefix, optional
A `~pathlib.Path` to this homework assignment folder
by_hand, optional
A list of the problems that should be labeled to be completed
by hand and have an image with the solution included.
legacy, optional
A boolean flag determining whether the legacy method of finding
solutions will be used, based on parsing cell content.
"""
if prefix is None:
prefix = Path(".")
problems: Iterable[Path]
if problems_to_do is None:
# The glob syntax here means a the filename must start with
# homework-, be followed the homework number, followed by a
# dash, then a digit representing the problem number for this
# homework number, then any number of characters (in practice
# either nothing or, rarely, another digit), then the ipynb
# extension. Examples:
# homework-1-1.ipynb, homework-10-1.ipynb, homework-3-10.ipynb
problems = list(prefix.glob(f"homework-{hw_num}-[0-9]*.ipynb"))
else:
problems = [prefix / f"homework-{hw_num}-{i}.ipynb" for i in problems_to_do]
problems = sorted(problems, key=lambda k: k.stem[-1])
output_directory: Path = (prefix / "output").resolve()
fw = FilesWriter(build_directory=str(output_directory))
assignment_zip_name = output_directory / f"homework-{hw_num}.zip"
solution_zip_name = output_directory / f"homework-{hw_num}-soln.zip"
assignment_pdfs: List[BytesIO] = []
solution_pdfs: List[BytesIO] = []
assignment_pdf: bytes
solution_pdf: bytes
assignment_nb: str
solution_nb: str
res: Dict[str, Union[Dict[str, bool], str, bool]] = {
"delete_pymarkdown": True,
"global_content_filter": {"include_raw": False},
"legacy": legacy,
}
for problem in problems:
print("Working on:", problem)
res["unique_key"] = problem.stem
problem_number = int(problem.stem.split("-")[-1])
if by_hand is not None and problem_number in by_hand:
res["by_hand"] = True
else:
res["by_hand"] = False
problem_fname = str(problem.resolve())
problem_nb = nbformat.read(problem_fname, as_version=4)
if "celltoolbar" in problem_nb.metadata:
del problem_nb.metadata["celltoolbar"]
# Process assignments
res["remove_solution"] = True
assignment_pdf, _ = pdf_exp.from_notebook_node(problem_nb, resources=res)
assignment_pdfs.append(BytesIO(assignment_pdf))
assignment_nb, _ = nb_exp.from_notebook_node(problem_nb, resources=res)
with ZipFile(assignment_zip_name, mode="a") as zip_file:
zip_file.writestr(problem.name, assignment_nb)
# Process solutions
res["remove_solution"] = False
solution_pdf, _ = pdf_exp.from_notebook_node(problem_nb, resources=res)
solution_pdfs.append(BytesIO(solution_pdf))
solution_nb, _ = nb_exp.from_notebook_node(problem_nb, resources=res)
with ZipFile(solution_zip_name, mode="a") as zip_file:
zip_file.writestr(problem.stem + "-soln" + problem.suffix, solution_nb)
resources: Dict[str, Any] = {
"metadata": {
"name": f"homework-{hw_num}",
"path": str(prefix),
"modified_date": date.today().strftime("%B %d, %Y"),
},
"output_extension": ".pdf",
}
fw.write(combine_pdf_as_bytes(assignment_pdfs), resources, f"homework-{hw_num}")
resources["metadata"]["name"] = f"homework-{hw_num}-soln"
fw.write(combine_pdf_as_bytes(solution_pdfs), resources, f"homework-{hw_num}-soln")
| 14,508
|
def set_build_revision(revision):
"""Set the p4 revision for following jobs in this build"""
set_metadata(__REVISION_METADATA__, revision)
set_metadata(__REVISION_METADATA_DEPRECATED__, revision)
| 14,509
|
def delete_entity(entity: EntityID):
"""
Queues entity for removal from the world_objects. Happens at the next run of process.
"""
if entity:
if snecs.exists(entity, snecs.world.default_world):
snecs.schedule_for_deletion(entity)
name = get_name(entity)
logging.info(f"'{name}' ({entity}) added to stack to be deleted on next frame.")
else:
logging.warning(f"Tried to delete entity {entity} but they don't exist!")
else:
logging.error("Tried to delete an entity but entity was None.")
| 14,510
|
def default_IM_weights(IM_j: IM, IMs: np.ndarray) -> pd.Series:
"""
Returns the default IM weights based on the conditioning IM
If the conditioning IM (IM_j) is spectral acceleration (SA) the
weighting is 70% across the SAs and 30% across all other IMs
Otherwise a uniform weighting distribution is used
Parameters
----------
IM_j: IM
Conditioning IM
IMs: list of IM
IM types for which to get the default weights
Returns
-------
im_weights: pandas series
Weigths for the specified IM types
"""
# Use 70% (SA) / 30% (other) weighting if
# conditioning IM is SA
if IM_j.is_pSA():
pSA_mask = np.asarray([cur_im.im_type is IMType.pSA for cur_im in IMs])
n_pSA_IMs = np.count_nonzero(pSA_mask)
n_other_IMs = IMs.size - n_pSA_IMs
if n_other_IMs == 0:
im_weights = np.ones(n_pSA_IMs, dtype=float) / n_pSA_IMs
else:
im_weights = np.full(IMs.size, np.nan)
im_weights[pSA_mask] = (1.0 / n_pSA_IMs) * 0.7
im_weights[~pSA_mask] = (1.0 / n_other_IMs) * 0.3
# Otherwise, default to uniform weighting
else:
print(
f"WARNING: Defaulting to uniform IM weighting as the "
f"conditioning is not SA."
)
im_weights = np.ones(IMs.size, dtype=float) / IMs.size
return pd.Series(data=im_weights, index=IMs)
| 14,511
|
def guess_file_type(filename: FileSystemPath) -> Type[PackFile]:
"""Helper to figure out the most appropriate file type depending on a filename."""
filename = str(filename)
if filename.endswith(".json"):
return JsonFile
elif filename.endswith((".yml", ".yaml")):
return YamlFile
elif filename.endswith(".png"):
return PngFile
mime_type, _ = mimetypes.guess_type(filename, strict=False)
if mime_type and mime_type.startswith("text/"):
return TextFile
return BinaryFile
| 14,512
|
def _fcn_mg_joint_pos(t, q_init, q_end, t_strike_end):
"""Helper function for `create_mg_joint_pos_policy()` to fit the `TimePolicy` scheme"""
return ((q_end - q_init) * min(t / t_strike_end, 1) + q_init) / 180 * math.pi
| 14,513
|
def test_circuit_run(default_compilation_configuration):
"""Test function for `run` method of `Circuit`"""
def f(x):
return x + 42
x = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
inputset = range(2 ** 3)
circuit = hnp.compile_numpy_function(f, {"x": x}, inputset, default_compilation_configuration)
for x in inputset:
assert circuit.run(x) == circuit.engine.run(x)
| 14,514
|
def detection():
"""
Programmed by: David Williams, Aspen Henry, and Slate Hayes
Description: detection is a state where the server tries to find all the faces in a frame, if a face is registered
then it looks for fingers held up next to the face.
"""
#STEP 1: Get and Process frame
# print("Detection!")
frame = ''
if not queues["rawFrame"].empty():
frame = queues["rawFrame"].get_nowait()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
ID, conf = recognizer.predict(cv2.resize(gray[y:y+h, x:x+w], (400, 400)))
#print("User ID:",ID, "\tconf:", conf)
# print(conf)
global registered_ids
if ID in registered_ids:
cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 255, 0, 2))
cv2.rectangle(frame, (x-int(1.5*w), y-int(1.5*h/2)), (x-2, y+int(1.5*(h/2))), (255, 0, 0, 2))
cv2.putText(frame, usernames[ID], (x, y+h+40), font, 2,(255,255,255),1,cv2.LINE_AA)
fingers = -1
roi = frame[y-int(1.5*h/2):y+int(1.5*h/2), x-int(1.5*w):x-2]
fingers = gesture.get_fingers(roi, True)
cv2.putText(frame, str(fingers), (x-int(1.5*w), y+int(1.5*h/2)+5), font, 2, (255,255,255), 1, cv2.LINE_AA)
tagProcessing(usernames[ID], fingers)
#print("User ID:",ID," Fingers:", fingers)
else:
cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 0, 255, 2))
cv2.putText(frame, "unkown", (x, y+h+40), font, 1,(255,255,255),1,cv2.LINE_AA)
#STEP 2: Facial Recognition
#STEP 3: Gesture Recognition
#STEP 4: Build CoMPES Tag
#STEP 5: Send processed frame to webpage
return frame
| 14,515
|
def rex_coverage(patterns, example_freqs, dedup=False):
"""
Given a list of regular expressions and a dictionary of examples
and their frequencies, this counts the number of times each pattern
matches a an example.
If ``dedup`` is set to ``True``, the frequencies are ignored, so that only
the number of keys is returned.
"""
results = []
for p in patterns:
p = '%s%s%s' % ('' if p.startswith('^') else '^',
p,
'' if p.endswith('$') else '$')
r = re.compile(p, re.U)
if dedup:
results.append(sum(1 if re.match(r, k) else 0
for k in example_freqs))
else:
results.append(sum(n if re.match(r, k) else 0
for (k, n) in example_freqs.items()))
return results
| 14,516
|
def build_diamond(validated_letter):
"""
>:param str validated_letter: A capital letter, that will be used to generate the
list of strings needed to print out the diamond.
>**Returns:** A list a strings that contains the correct spacing for printing
the diamond.
build_diamond is used to generate the list of strings needed to print the diamond structure.
It takes a single argument of a letter (in string format), and returns a list of strings.
This list of strings can then be printed with newline characters (using join) to output the
diamond structure.
"""
a_ascii = ord('A')
rows = ord(validated_letter) - a_ascii + 1
diamond = []
for row in list(range(rows)) + list(reversed(range(rows-1))):
if row == 0:
diamond.append('{: <{w1}}{current_letter}'.format('', w1=rows-1, current_letter=chr(a_ascii+row)))
else:
diamond.append('{: <{w1}}{current_letter}{: <{w2}}{current_letter}'.format('', '', w1=rows-row-1, current_letter=chr(a_ascii+row), w2=row*2-1))
return diamond
| 14,517
|
def animate_resize(object, size_start, size_end, duration_ms=250):
"""Create resize animation on the UI.
Args:
object (App(QDialog)): UI element to be resized.
size_start (QSize): QT tuple of the window size at start.
size_end (QSize): QT tuple of the window size after resizing.
duration_ms (int, optional): Length of the animation in ms.
"""
object.animation = QtCore.QPropertyAnimation(object, b"size")
object.animation.setDuration(duration_ms)
object.animation.setStartValue(size_start)
object.animation.setEndValue(size_end)
object.animation.start()
| 14,518
|
def human_timestamp(timestamp, now=datetime.datetime.utcnow):
"""Turn a :py:class:`datetime.datetime` into a human-friendly string."""
fmt = "%d %B at %H:%M"
if timestamp.year < now().year:
fmt = "%d %B %Y at %H:%M"
return timestamp.strftime(fmt)
| 14,519
|
def inception_block(x: tf.keras.layers.Layer, nb_filters: int=64, name: str="block1"):
"""
3D inception block, as per Itzik et al. (2018)
"""
conv3d = partial(Conv3D, activation="linear", use_bias=False, padding="same")
batchn = partial(BatchNormalization, momentum=0.99, fused=True)
activn = partial(Activation, activation="relu")
conv_1x1 = conv3d(nb_filters, (1, 1, 1), name=name + "_1x1_conv3d")(x)
conv_1x1 = batchn(name=name + "_1x1_bn")(conv_1x1)
conv_1x1 = activn(name=name + "_1x1_relu")(conv_1x1)
conv_3x3 = conv3d(nb_filters // 2, (3, 3, 3), name=name + "_3x3_conv3d")(conv_1x1)
conv_3x3 = batchn(name=name + "_3x3_bn")(conv_3x3)
conv_3x3 = activn(name=name + "_3x3_relu")(conv_3x3)
conv_5x5 = conv3d(nb_filters // 2, (5, 5, 5), name=name + "_5x5_conv3d")(conv_1x1)
conv_5x5 = batchn(name=name + "_5x5_bn")(conv_5x5)
conv_5x5 = activn(name=name + "_5x5_relu")(conv_5x5)
avgpool = AvgPool3D(strides=(1, 1, 1), pool_size=(3, 3, 3), padding="same", name=name+"_avgpool")(x)
avgpool = conv3d(nb_filters, (1, 1, 1), name=name + "_avgpool_conv3d")(avgpool)
avgpool = batchn(name=name + "_avgpool_bn")(avgpool)
avgpool = activn(name=name + "_avgpool_relu")(avgpool)
return Concatenate(axis=-1, name=name+"_concat")([conv_1x1, conv_3x3, conv_5x5, avgpool])
| 14,520
|
def _flatten_dict(d, parent_key='', sep='/'):
"""Flattens a dictionary, keeping empty leaves."""
items = []
for k, v in d.items():
path = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(_flatten_dict(v, path, sep=sep).items())
else:
items.append((path, v))
# Keeps the empty dict if it was set explicitly.
if parent_key and not d:
items.append((parent_key, {}))
return dict(items)
| 14,521
|
def secret():
"""
Authenticated only route
@authenticated will flash a message if not authed
"""
return render_template('secret.html')
| 14,522
|
def init_logging(log_level):
"""
Initialise the logging by adding an observer to the global log publisher.
:param str log_level: The minimum log level to log messages for.
"""
log_level_filter = LogLevelFilterPredicate(
LogLevel.levelWithName(log_level))
log_level_filter.setLogLevelForNamespace(
'twisted.web.client._HTTP11ClientFactory', LogLevel.warn)
log_observer = FilteringLogObserver(
textFileLogObserver(sys.stdout), [log_level_filter])
globalLogPublisher.addObserver(log_observer)
| 14,523
|
def mk_graph(img_dim, num_labels, poly_width = 3, depth = 3, hidd_repr_size = 512):
""" The function that creates and returns the graph required to
img_dim = image dimensions (Note, that the image needs to be flattened out before feeding here)
num_labels = no_of classes to classify into
"""
comp_graph = tf.Graph()
with comp_graph.as_default():
# step 1: Create the input placeholders for the input to the computation
with tf.name_scope("Input"):
tf_input_images = tf.placeholder(tf.float32, shape=(None, img_dim), name="Input_Labels")
tf_input_labels = tf.placeholder(tf.float32, shape=(None, num_labels), name="Input_Labels")
print("\nInput Placeholder Tensors:", tf_input_images, tf_input_labels)
# step 2: Construct the network architecture based on the width and the depth specified
# Note that this is static graph creation
# There doesn't seem to be any reason for dynamic graph building
def neural_layer(input, out_dim, step):
""" The method that defines a single neural layer
"""
# method to calculate the factorial of a number
factorial = lambda x: 1 if(x <= 1) else x * factorial(x - 1)
with tf.variable_scope("neural_layer"+str(step)):
# create the variable tensors ->
# additive bias
bias = tf.get_variable("bias", shape=(out_dim), initializer=tf.zeros_initializer())
# additive weight transformations
inp_dim = input.get_shape()[-1]
weights = [tf.get_variable("weight"+str(i), shape=(inp_dim, out_dim),
initializer=tf.contrib.layers.xavier_initializer(seed = FLAGS.seed_value))
for i in range(1, poly_width)]
# attach the summary ops to the biases and weights
bias_summary = tf.summary.histogram("Layer"+str(step)+"/bias", bias)
weights_summary = [tf.summary.histogram("Layer"+str(step)+"/"+weight.name, weight)
for weight in weights]
# define the compuataion ops for this layer
out = bias # initialize the output tensor
for degree in range(1, poly_width):
out = out + tf.matmul(tf.pow(input, degree) / factorial(degree), weights[degree - 1])
return out # return the calculated tensor
if(depth > 1):
lay1_out = neural_layer(tf_input_images, hidd_repr_size, 1)
else:
lay1_out = neural_layer(tf_input_images, num_labels, 1)
# define the while loop for creating the hidden layer computations
lay_out = lay1_out # initialize to output of first layer
for lay_no in range(2, depth):
lay_out = neural_layer(lay_out, hidd_repr_size, lay_no)
# define the output layer
if(depth > 1):
output = neural_layer(lay_out, num_labels, depth)
else:
output = lay1_out
print("Final output:", output)
return comp_graph, {"output": output, "labels": tf_input_labels, "input": tf_input_images}
| 14,524
|
def bandpassHLS_1_4(img, band, satsen):
"""Bandpass function applied to Sentinel-2 data as followed in HLS 1.4 products.
Reference:
Claverie et. al, 2018 - The Harmonized Landsat and Sentinel-2 surface reflectance data set.
Args:
img (array): Array containing image pixel values.
band (str): Band that will be processed, which can be 'B02','B03','B04','B8A','B01','B11' or 'B12'.
satsen (str): Satellite sensor, which can be 'S2A' or 'S2B'.
Returns:
array: Array containing image pixel values bandpassed.
"""
logging.info('Applying bandpass band {} satsen {}'.format(band, satsen))
# Skakun et. al, 2018 - Harmonized Landsat Sentinel-2 (HLS) Product User’s Guide
if satsen == 'S2A':
if band == 'coastal': # UltraBlue/coastal #MODIS don't have this band # B01
slope = 0.9959
offset = -0.0002
elif band == 'blue': # Blue # B02
slope = 0.9778
offset = -0.004
elif band == 'green': # Green # B03
slope = 1.0053
offset = -0.0009
elif band == 'red': # Red # B04
slope = 0.9765
offset = 0.0009
elif band == 'nir': # Nir # B08 B8A
slope = 0.9983
offset = -0.0001
elif band == 'swir1': # Swir 1 # B11
slope = 0.9987
offset = -0.0011
elif band == 'swir2': # Swir 2 # B12
slope = 1.003
offset = -0.0012
img = numpy.add(numpy.multiply(img, slope), offset)
elif satsen == 'S2B':
logging.debug("S2B")
if band == 'coastal': # UltraBlue/coastal #MODIS don't have this band # B01
slope = 0.9959
offset = -0.0002
elif band == 'blue': # Blue # B02
slope = 0.9778
offset = -0.004
elif band == 'green': # Green # B03
slope = 1.0075
offset = -0.0008
elif band == 'red': # Red # B04
slope = 0.9761
offset = 0.001
elif band == 'nir': # Nir # B08 B8A
slope = 0.9966
offset = 0.000
elif band == 'swir1': # Swir 1 # B11
slope = 1.000
offset = -0.0003
elif band == 'swir2': # Swir 2 # B12
slope = 0.9867
offset = -0.0004
img = numpy.add(numpy.multiply(img, slope), offset)
return img
| 14,525
|
def save_dataset(df: pd.DataFrame, filename: str, sep) -> None:
"""Save dataset in format CSV
"""
file_path_to_save = f"{config.DATASET_DIR}/{filename}"
df.to_csv(file_path_to_save, sep=sep, index=None)
| 14,526
|
def test_coordinates(device, backend, random_state=42):
"""
Tests whether the coordinates correspond to the actual values (obtained
with Scattering1d.meta()), and with the vectorization
"""
torch.manual_seed(random_state)
J = 6
Q = 8
T = 2**12
scattering = Scattering1D(J, T, Q, max_order=2, backend=backend, frontend='torch')
x = torch.randn(2, T)
scattering.to(device)
x = x.to(device)
for max_order in [1, 2]:
scattering.max_order = max_order
scattering.vectorize = False
if backend.name.endswith('skcuda') and device == 'cpu':
with pytest.raises(TypeError) as ve:
s_dico = scattering(x)
assert "CPU" in ve.value.args[0]
else:
s_dico = scattering(x)
s_dico = {k: s_dico[k].data for k in s_dico.keys()}
scattering.vectorize = True
if backend.name.endswith('_skcuda') and device == 'cpu':
with pytest.raises(TypeError) as ve:
s_vec = scattering(x)
assert "CPU" in ve.value.args[0]
else:
s_vec = scattering(x)
s_dico = {k: s_dico[k].cpu() for k in s_dico.keys()}
s_vec = s_vec.cpu()
meta = scattering.meta()
if not backend.name.endswith('_skcuda') or device != 'cpu':
assert len(s_dico) == s_vec.shape[1]
for cc in range(s_vec.shape[1]):
k = meta['key'][cc]
assert torch.allclose(s_vec[:, cc], torch.squeeze(s_dico[k]))
| 14,527
|
def test_get_or_create_suggested_object_id(registry):
"""Test that suggested_object_id works."""
entry = registry.async_get_or_create(
'light', 'hue', '1234', suggested_object_id='beer')
assert entry.entity_id == 'light.beer'
| 14,528
|
def read_dictionary(vocab_path):
"""
从路径文件中读取字典
:param vocab_path:
:return:
"""
vocab_path = os.path.join(vocab_path)
with open(vocab_path, 'rb') as fr:
word2id = pickle.load(fr)
return word2id
| 14,529
|
def createrawtransaction(inputs, outputs, outScriptGenerator=p2pkh):
"""
Create a transaction with the exact input and output syntax as the bitcoin-cli "createrawtransaction" command.
If you use the default outScriptGenerator, this function will return a hex string that exactly matches the
output of bitcoin-cli createrawtransaction.
But this function is extended beyond bitcoin-cli in the following ways:
inputs can have a "sig" field which is a binary hex string of the signature script
outputs can be a list of tuples rather than a dictionary. In that format, they can pass complex objects to
the outputScriptGenerator (use a tuple or an object), be a list (that is passed to CScript()), or a callable
"""
if not type(inputs) is list:
inputs = [inputs]
tx = CTransaction()
for i in inputs:
sigScript = i.get("sig", b"")
tx.vin.append(CTxIn(COutPoint(i["txid"], i["vout"]), sigScript, 0xffffffff))
pairs = []
if type(outputs) is dict:
for addr, amount in outputs.items():
pairs.append((addr,amount))
else:
pairs = outputs
for addr, amount in pairs:
if callable(addr):
tx.vout.append(CTxOut(amount * BTC, addr()))
elif type(addr) is list:
tx.vout.append(CTxOut(amount * BTC, CScript(addr)))
elif addr == "data":
tx.vout.append(CTxOut(0, CScript([OP_RETURN, unhexlify(amount)])))
else:
tx.vout.append(CTxOut(amount * BTC, outScriptGenerator(addr)))
tx.rehash()
return hexlify(tx.serialize()).decode("utf-8")
| 14,530
|
def get_versions() -> List[str]:
"""
Gets a list of recognized CRSD urns.
Returns
-------
List[str]
"""
return list(sorted(urn_mapping.keys()))
| 14,531
|
def test_cube_io_larger_case_ertrun(tmp_path):
"""Larger test cube io as ERTRUN, uses global config from Drogon to tmp_path.
Need some file acrobatics here to make the tmp_path area look like an ERTRUN first.
"""
current = tmp_path / "scratch" / "fields" / "user"
current.mkdir(parents=True, exist_ok=True)
shutil.copytree(CASEPATH, current / "mycase")
runfolder = current / "mycase" / "realization-0" / "iter-0" / "rms" / "model"
runfolder.mkdir(parents=True, exist_ok=True)
out = (
current
/ "mycase"
/ "realization-0"
/ "iter-0"
/ "share"
/ "observations"
/ "cubes"
)
# alternative 1, set inside_rms True (developer setting for testing)
exp1 = fmu.dataio.ExportData(
config=CFG2,
name="Volantis",
content="depth",
unit="m",
vertical_domain={"depth": "msl"},
timedata=[[20290101, "monitor"], [19990601, "base"]],
is_prediction=True,
is_observation=True,
tagname="what Descr",
verbosity="INFO",
runfolder=runfolder.resolve(),
inside_rms=True,
workflow="my current workflow",
)
cube = xtgeo.Cube(
ncol=23, nrow=12, nlay=5, xinc=25.0, yinc=25.0, zinc=2.0, values=0.0
)
exp1.export(cube, verbosity="INFO")
metadataout = out / ".volantis--what_descr--20290101_19990601.segy.yml"
assert metadataout.is_file() is True
# now read the metadata file and test some key entries:
with open(metadataout, "r", encoding="utf8") as mstream:
meta = yaml.safe_load(mstream)
assert (
meta["file"]["relative_path"]
== "realization-0/iter-0/share/observations/cubes/volantis--what_descr"
+ "--20290101_19990601.segy"
)
assert meta["fmu"]["model"]["name"] == "ff"
assert meta["fmu"]["iteration"]["name"] == "iter-0"
assert meta["fmu"]["realization"]["name"] == "realization-0"
assert meta["data"]["stratigraphic"] is False
assert meta["data"]["bbox"]["xmin"] == 0.0
assert meta["data"]["bbox"]["xmax"] == 550.0
logger.info("\n%s", json.dumps(meta, indent=2))
| 14,532
|
def k2_factor_sq(df=inf,p=95):
"""Return a squared coverage factor for an elliptical uncertainty region
:arg df: the degrees-of-freedom (>=2)
:arg p: the coverage probability (%)
:type df: float
:type p: int or float
Evaluates the square of the coverage factor for an elliptical uncertainty
region with coverage probability ``p`` and ``df`` degrees of freedom
based on the F-distribution.
**Example**::
>>> reporting.k2_factor_sq(3)
56.99999999999994
"""
p = p / 100.0
if df > inf_dof:
return -2.0 * math.log(1-p)
elif(df>1):
# norm = l * (n-1) / (n - l) in the general
# 'l'-dimensional case for 'n' observations
# here l = 2, df = n-1
norm = 2*df / (df-1)
# `fdtri` is the inverse of the cumulative F distribution
# returning `x` such that `fdtr(dfn, dfd, x) = p`
return norm*special.fdtri(2.0,df-1.0,p)
else:
raise RuntimeError("invalid df={!r}".format( df ) )
| 14,533
|
def get_cluster_version_path(cluster_id):
"""
Gives s3 full path of cluster_version file of a given cluster_id
"""
base_path = s3.get_cluster_info_base_path()
return "%s/%s/cluster_version.json"%(base_path, cluster_id)
| 14,534
|
def MsfParser(f):
"""Read sequences from a msf format file"""
alignmentdict = {}
# parse optional header
# parse optional text information
# file header and sequence header are seperated by a line ending in '..'
line = f.readline().strip()
for line in f:
line = line.strip()
if line.endswith(".."):
break
# parse sequence info
seqinfo = {}
for line in f:
line = line.strip()
if line.startswith("//"):
break
line = line.split()
if line and line[0] == "Name:":
seqinfo[line[1]] = int(line[3])
# parse sequences
sequences = {}
for line in f:
line = line.strip().split()
if line and line[0] in sequences:
sequences[line[0]] += "".join(line[1:])
elif line and line[0] in seqinfo:
sequences[line[0]] = "".join(line[1:])
# consistency check
if len(sequences) != len(seqinfo):
warnings.warn(
"Number of loaded seqs[%s] not same as "
"expected[%s]." % (len(sequences), len(seqinfo))
)
for name in sequences:
if len(sequences[name]) != seqinfo[name]:
warnings.warn(
"Length of loaded seqs [%s] is [%s] not "
"[%s] as expected." % (name, len(sequences[name]), seqinfo[name])
)
# yield sequences
for name in sequences:
yield (name, sequences[name])
| 14,535
|
def import_module_attribute(function_path):
"""Import and return a module attribute given a full path."""
module, attribute = function_path.rsplit(".", 1)
app_module = importlib.import_module(module)
return getattr(app_module, attribute)
| 14,536
|
def rgb_to_hex(red, green, blue):
"""Give three color arrays, return a list of hex RGB strings"""
pat = "#{0:02X}{1:02X}{2:02X}"
return [pat.format(r & 0xff, g & 0xff, b & 0xff)
for r, g, b in zip(red, green, blue)]
| 14,537
|
def run_cli(cmd: str, print_output: bool = True, check: bool = False,) -> Tuple[int, str, str]:
"""Runs the command with `dcos` as the prefix to the shell command
and returns a tuple containing exit code, stdout, and stderr.
eg. `cmd`= "package install pkg-name" results in:
$ dcos package install pkg-name
"""
dcos_cmd = "dcos {}".format(cmd)
log.info("(CLI) {}".format(dcos_cmd))
return _run_cmd(dcos_cmd, print_output, check)
| 14,538
|
def get_initialization(arg, indent_count):
"""Get the initialization string to use for this argument."""
t = get_base_c_type(arg)
if arg.is_array():
if t == "char*":
init = '[] = { "String 1", "String 2", "String 0" }'
else:
if arg.is_dictionary() or arg.is_structure():
indent = indent_count * " "
si0 = __make_structure_init_string(arg, 0)
si1 = __make_structure_init_string(arg, 1)
si2 = __make_structure_init_string(arg, 2)
f = "[3] =\n{0}{{ {1},\n{0} {2},\n{0} {3} }}"
init = f.format(indent, si0, si1, si2)
else:
init = "[10] = { 0 }"
else:
if arg.arg_type == "b":
init = " = FALSE"
elif arg.arg_type == "d":
init = " = 0.0"
elif arg.is_structure():
init = " = {0}".format(__make_structure_init_string(arg))
else:
init = " = 0"
return init
| 14,539
|
def get_time_source_from_output(output):
""" Parse out 'Time Source' value from output
Time source output example : 'Time source is NTP, 23:59:38.461 EST Thu Jun 27 2019'
'Time source is NTP, *12:33:45.355 EST Fri Feb 7 2020'
Args:
output ('str'): Text output from command
Returns:
Datetime object
Format : datetime(year, month, day, hour, minute, second, microseconds)
"""
r1 = re.compile(
r"Time\ssource\sis\sNTP\,\s\.*\*?(?P<hour>\d+)\:(?P<minute>\d+)\:"
r"(?P<seconds>\d+)\.(?P<milliseconds>\d+)\s(?P<time_zone>"
r"\S+)\s(?P<day_of_week>\S+)\s(?P<month>\S+)\s(?P<day>\d+)"
r"\s(?P<year>\d+)")
for line in output.splitlines():
line = line.strip()
result = r1.match(line)
if result:
group = result.groupdict()
hour = int(group["hour"])
minute = int(group["minute"])
second = int(group["seconds"])
milliseconds = int(group["milliseconds"])
month = strptime(group["month"], "%b").tm_mon
day = int(group["day"])
year = int(group["year"])
return datetime(year, month, day, hour, minute, second,
milliseconds * 1000)
log.warning('Time source could not be found in output')
| 14,540
|
def getStyleFX():
"""
Defines and returns the style effects
Returns: style effects (list of MNPR_FX)
"""
# general effects
distortionFX = MNPR_FX("distortion", "Substrate distortion", "controlSetB", [[1, 0, 0, 0]], ["distort", "revert"], ["noise"])
gapsOverlapsFX = MNPR_FX("gaps-overlaps", "Gaps and overlaps", "controlSetC", [[0, 0, 1, 0]], ["overlaps", "gaps"], ["noise"])
# watercolor effects
densityFX_WC = MNPR_FX("density", "Pigment turbulence", "controlSetA", [[0, 0, 1, 0]], ["accumulate", "dilute"], ["noise"])
applicationFX_WC = MNPR_FX("application", "Granulate | Dry-brush", "controlSetA", [[0, 1, 0, 0]], ["granulate", "dry-brush"], ["noise"])
blendingFX_WC = MNPR_FX("blending", "Color bleeding (wet-in-wet)", "controlSetC", [[0, 0, 0, 1]], ["bleed", "revert"], ["noise"])
edgeFX_WC = MNPR_FX("edge manip", "Edge darkening", "controlSetC", [[1, 0, 0, 0], [0, 1, 0, 0]], ["darken", "lighten", "wider", "narrower"], ["n. dark", "n. wide"])
watercolorFX = [densityFX_WC, applicationFX_WC, distortionFX, edgeFX_WC, gapsOverlapsFX, blendingFX_WC]
# oil effects
densityFX_OP = MNPR_FX("density", "Pigment turbulence", "controlSetA", [[0, 0, 1, 0]], ["accumulate", "dilute"], ["noise"])
blendingFX_OP = MNPR_FX("blending", "Paint stroke length", "controlSetC", [[0, 0, 0, 1]], ["increase", "decrease"], ["noise"])
detailFX_OP = MNPR_FX("detail", "Paint stroke width", "controlSetA", [[0, 0, 0, 1]], ["increase", "decrease"], ["noise"])
applicationFX_OP = MNPR_FX("application", "Impasto | Dry-brush", "controlSetA", [[0, 1, 0, 0]], ["impasto", "dry-brush"], ["noise"])
oilFX = [densityFX_OP, blendingFX_OP, detailFX_OP, applicationFX_OP, distortionFX, gapsOverlapsFX]
# charcoal effects
densityFX_CH = MNPR_FX("density", "Pigment density", "controlSetA", [[0, 0, 1, 0]], ["accumulate", "dilute"], ["noise"])
applicationFX_CH = MNPR_FX("application", "Pigment application", "controlSetA", [[0, 1, 0, 0]], ["even", "granulation"], ["noise"])
mixingFX_CH = MNPR_FX("mixing", "Mixing", "controlSetC", [[0, 0, 0, 1]], ["mix", "separate"], ["noise"])
smudgingFX_CH = MNPR_FX("smudging", "Smudging", "controlSetA", [[0, 0, 0, 1]], ["smudge", "revert"], ["noise"])
edgeFX_CH = MNPR_FX("edge manip", "Edge manipulation", "controlSetC", [[1, 0, 0, 0]], ["soften", "revert"], ["n. soften", "n. darken"])
charcoalFX = [distortionFX, densityFX_CH, applicationFX_CH, mixingFX_CH, smudgingFX_CH, edgeFX_CH]
# query mnpr style and return
style = cmds.mnpr(style=True, q=True).encode('latin1') # some users have had problems without encode('latin1')
if style == "Watercolor":
return watercolorFX
elif style == "Oil":
return oilFX
elif style == "Charcoal":
return charcoalFX
return []
| 14,541
|
def raise_from(exccls, message, exc):
"""
raise exc with new message
"""
raise exccls(message) from exc
| 14,542
|
def deaths_this_year() -> dict:
"""Get number of deaths this year."""
return get_metric_of(label='deaths_this_year')
| 14,543
|
def get_cls_dropdown_tree_view_item(object_name):
"""Get and return class of TreeViewItem Dropdown object according to
snapshotability
"""
base_cls = tree_view_item.CommonDropdownTreeViewItem
if object_name in objects.ALL_SNAPSHOTABLE_OBJS:
base_cls = tree_view_item.SnapshotsDropdownTreeViewItem
return _factory(cls_name=object_name, parent_cls=base_cls)
| 14,544
|
def make_webhdfs_url(host, user, hdfs_path, op, port=50070):
""" Forms the URL for httpfs requests.
INPUT
-----
host : str
The host to connect to for httpfs access to HDFS. (Can be 'localhost'.)
user : str
The user to use for httpfs connections.
hdfs_path : str
The full path of the file or directory being checked.
op : str
The httpfs operation string. E.g., 'GETFILESTATUS'.
port : int
The port to use for httpfs connections.
OUTPUT
------
str : The string to use for an HTTP request to httpfs.
"""
url = 'http://' + host + ':' + str(port) + '/webhdfs/v1'
url += hdfs_path + '?user.name=' + user + '&op=' + op
return url
| 14,545
|
def send_email(to, subject, template):
"""
if mailgun's api goes down failover to flask-mail
takes to/from email and an email template.
"""
msg = Message(
subject,
recipients=[to],
html=template,
sender=''
)
mail.send(msg)
| 14,546
|
def get_slash_mapping(bot: commands.Bot):
"""Get all the prefix commands groupped by category."""
categories = {}
for command in bot.slash_commands:
if command:
category_name = get_cog_category(command.cog)
# categories are organized by cog folders
try:
categories[category_name].append(command)
except KeyError:
categories[category_name] = [command]
return categories
| 14,547
|
def test_bootstrap_from_submodule_no_locale(tmpdir, testpackage, capsys,
monkeypatch):
"""
Regression test for https://github.com/astropy/astropy/issues/2749
Runs test_bootstrap_from_submodule but with missing locale/langauge
settings.
"""
for varname in ('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE'):
monkeypatch.delenv(varname, raising=False)
test_bootstrap_from_submodule(tmpdir, testpackage, capsys)
| 14,548
|
def rose_plot(ax, angles, bins=16, density=None, offset=0, lab_unit="degrees",
start_zero=False, **param_dict):
"""
Plot polar histogram of angles on ax. ax must have been created using
subplot_kw=dict(projection='polar'). Angles are expected in radians.
** This function is copied directly from user Ralph on stackoverflow
at https://stackoverflow.com/a/55067613 **
"""
# Wrap angles to [-pi, pi)
angles = (angles + np.pi) % (2*np.pi) - np.pi
# Set bins symetrically around zero
if start_zero:
# To have a bin edge at zero use an even number of bins
if bins % 2:
bins += 1
bins = np.linspace(-np.pi, np.pi, num=bins+1)
# Bin data and record counts
count, bin = np.histogram(angles, bins=bins)
# Compute width of each bin
widths = np.diff(bin)
# By default plot density (frequency potentially misleading)
if density is None or density is True:
# Area to assign each bin
area = count / angles.size
# Calculate corresponding bin radius
radius = (area / np.pi)**.5
else:
radius = count
# Plot data on ax
ax.bar(bin[:-1], radius, zorder=1, align='edge', width=widths,
edgecolor='C0', fill=False, linewidth=1)
# Set the direction of the zero angle
ax.set_theta_offset(offset)
# Remove ylabels, they are mostly obstructive and not informative
ax.set_yticks([])
if lab_unit == "radians":
label = ['$0$', r'$\pi/4$', r'$\pi/2$', r'$3\pi/4$',
r'$\pi$', r'$5\pi/4$', r'$3\pi/2$', r'$7\pi/4$']
ax.set_xticklabels(label)
| 14,549
|
def create_xml_regression(lfiles, lsbj, foxml):
"""
take a list of files, create a xml file data_set.xml
only 'one' subject, each case seen as a visit...
"""
impl = xml.dom.minidom.getDOMImplementation()
doc = impl.createDocument(None, "some_tag", None)
top_element = doc.documentElement
e = doc.createElement('subject')
e.setAttribute('id', 'case')
for i, fn in enumerate(lfiles):
v = doc.createElement('visit')
v.setAttribute('id', "subj{}".format(i))
f = doc.createElement('filename')
f.setAttribute('object_id', "face")
t = doc.createTextNode(fn)
f.appendChild(t)
a = doc.createElement('age')
x = doc.createTextNode(str(lsbj[i]["age"]))
a.appendChild(x)
v.appendChild(f)
v.appendChild(a)
e.appendChild(v)
top_element.appendChild(e)
with open(foxml, "w") as fo:
fo.write(doc.toprettyxml())
| 14,550
|
def generate_site():
"""Generate site in local directory"""
shutil.rmtree(SITE_DIR, ignore_errors=True)
SITE_DIR.mkdir()
print("Copy template to site folder")
for filename in TEMPLATE_DIR.iterdir():
if filename.is_dir():
shutil.copytree(str(filename), SITE_DIR / filename.name)
elif filename.name != "template.html" and filename.name != ".DS_Store":
shutil.copy(str(filename), SITE_DIR)
template_loader = jinja2.FileSystemLoader(searchpath=TEMPLATE_DIR)
template_env = jinja2.Environment(loader=template_loader)
template = template_env.get_template("template.html")
print("Process data and build site")
csv_files = [
filename for filename in DATA_DIR.iterdir() if filename.suffix == ".csv"
]
csv_files.sort()
lists_all = []
for csv_file in csv_files:
original_name = re.search(r"[0-9]_(.*?)\.csv", csv_file.name).group(1)
processed_name = LISTS_MAPPING.get(original_name, original_name)
with open(str(csv_file), mode="r") as csv_file:
csv_reader = csv.DictReader(csv_file)
list_ind = [row for row in csv_reader]
lists_all.append((original_name, processed_name, list_ind))
curr_date = datetime.now().strftime("%B %-d, %Y")
output = template.render(
lists=lists_all, last_update=curr_date, meta_content=META_CONTENT,
)
with open(SITE_DIR / "index.html", "w") as f:
f.write(output)
| 14,551
|
def get_memcached_client(servers, debug=False):
"""
mc.set("name", "python")
ret = mc.get('name')
print(ret)
"""
if isinstance(servers, str):
servers = servers.split(',')
return memcache.Client(servers, debug=debug)
| 14,552
|
def part_to_text(part):
"""
Converts an e-mail message part into text.
Returns None if the message could not be decoded as ASCII.
:param part: E-mail message part.
:return: Message text.
"""
if part.get_content_type() != 'text/plain':
return None
charset = part.get_content_charset()
if not charset:
return None
text = str(part.get_payload(decode=True), encoding=charset, errors='ignore')
try:
text = str(text.encode('ascii'), 'ascii')
except UnicodeEncodeError:
return None
except UnicodeDecodeError:
return None
if part.get_param('format') == 'flowed':
text = unflow_text(text, part.get_param('delsp', False))
return text
| 14,553
|
def icwt(Wx, wavelet='gmw', scales='log-piecewise', nv=None, one_int=True,
x_len=None, x_mean=0, padtype='zero', rpadded=False, l1_norm=True):
"""The inverse Continuous Wavelet Transform of `Wx`, via double or
single integral.
# Arguments:
Wx: np.ndarray
CWT computed via `ssqueezepy.cwt`.
wavelet: str / tuple[str, dict] / `wavelets.Wavelet`
Wavelet sampled in Fourier frequency domain.
- str: name of builtin wavelet. `ssqueezepy.wavs()`
- tuple[str, dict]: name of builtin wavelet and its configs.
E.g. `('morlet', {'mu': 5})`.
- `wavelets.Wavelet` instance. Can use for custom wavelet.
scales: str['log', 'linear', 'log:maximal', ...] / np.ndarray
See help(cwt).
nv: int / None
Number of voices. Suggested >= 32. Needed if `scales` isn't array
(will default to `cwt`'s).
one_int: bool (default True)
Whether to use one-integral iCWT or double.
Current one-integral implementation performs best.
- True: Eq 2.6, modified, of [3]. Explained in [4].
- False: Eq 4.67 of [1]. Explained in [5].
x_len: int / None. Length of `x` used in forward CWT, if different
from Wx.shape[1] (default if None).
x_mean: float. mean of original `x` (not picked up in CWT since it's an
infinite scale component). Default 0.
padtype: str
Pad scheme to apply on input, in case of `one_int=False`.
See `help(utils.padsignal)`.
rpadded: bool (default False)
True if Wx is padded (e.g. if used `cwt(, rpadded=True)`).
l1_norm: bool (default True)
True if Wx was obtained via `cwt(, l1_norm=True)`.
# Returns:
x: np.ndarray
The signal, as reconstructed from Wx.
# References:
1. Wavelet Tour of Signal Processing, 3rd ed. S. Mallat.
https://www.di.ens.fr/~mallat/papiers/WaveletTourChap1-2-3.pdf
2. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications. G. Thakur,
E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
3. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
4. One integral inverse CWT. OverLordGoldDragon.
https://dsp.stackexchange.com/a/71274/50076
5. Inverse CWT derivation. OverLordGoldDragon.
https://dsp.stackexchange.com/a/71148/50076
6. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
synsq_cwt_fw.m
"""
#### Prepare for inversion ###############################################
na, n = Wx.shape
x_len = x_len or n
if not isinstance(scales, np.ndarray) and nv is None:
nv = 32 # must match forward's; default to `cwt`'s
wavelet = _process_gmw_wavelet(wavelet, l1_norm)
wavelet = Wavelet._init_if_not_isinstance(wavelet)
# will override `nv` to match `scales`'s
scales, scaletype, _, nv = process_scales(scales, x_len, wavelet, nv=nv,
get_params=True)
assert (len(scales) == na), "%s != %s" % (len(scales), na)
#### Handle piecewise scales case ########################################
# `nv` must be left unspecified so it's inferred automatically from `scales`
# in `process_scales` for each piecewise case
if scaletype == 'log-piecewise':
kw = dict(wavelet=wavelet, one_int=one_int, x_len=x_len, x_mean=x_mean,
padtype=padtype, rpadded=rpadded, l1_norm=l1_norm)
idx = logscale_transition_idx(scales)
x = icwt(Wx[:idx], scales=scales[:idx], **kw)
x += icwt(Wx[idx:], scales=scales[idx:], **kw)
return x
##########################################################################
#### Invert ##############################################################
if one_int:
x = _icwt_1int(Wx, scales, scaletype, l1_norm)
else:
x = _icwt_2int(Wx, scales, scaletype, l1_norm,
wavelet, x_len, padtype, rpadded)
# admissibility coefficient
Cpsi = (adm_ssq(wavelet) if one_int else
adm_cwt(wavelet))
if scaletype == 'log':
# Eq 4.67 in [1]; Theorem 4.5 in [1]; below Eq 14 in [2]
# ln(2**(1/nv)) == ln(2)/nv == diff(ln(scales))[0]
x *= (2 / Cpsi) * np.log(2 ** (1 / nv))
else:
x *= (2 / Cpsi)
x += x_mean # CWT doesn't capture mean (infinite scale)
return x
| 14,554
|
def AdvApp2Var_MathBase_msc_(*args):
"""
:param ndimen:
:type ndimen: integer *
:param vecte1:
:type vecte1: doublereal *
:param vecte2:
:type vecte2: doublereal *
:rtype: doublereal
"""
return _AdvApp2Var.AdvApp2Var_MathBase_msc_(*args)
| 14,555
|
def chunkify(arr, n):
"""Breaks a list into n chunks.
Last chunk may not be equal in size to other chunks
"""
return [arr[i : i + n] for i in range(0, len(arr), n)]
| 14,556
|
def featurize(data):
"""
put in a data table and get one back including features
"""
#try gradient
| 14,557
|
def _run_command(command, targets, options):
# type: (str, List[str], List[str]) -> bool
"""Runs `command` + `targets` + `options` in a
subprocess and returns a boolean determined by the
process return code.
>>> result = run_command('pylint', ['foo.py', 'some_module'], ['-E'])
>>> result
True
:param command: str
:param targets: List[str]
:param options: List[str]
:return: bool
"""
print('{0}: targets={1} options={2}'.format(command, targets, options))
cmd = [command] + targets + options
process = Popen(cmd)
process.wait()
return bool(process.returncode)
| 14,558
|
def _usage():
"""Print command line usage."""
txt = "[INFO] Usage: %s ldt_file topdatadir startYYYYMM model_forcing" \
%(sys.argv[0])
print(txt)
print("[INFO] where:")
print("[INFO] ldt_file is path to LDT parameter file")
print("[INFO] topdatadir is top-level directory with LIS data")
print("[INFO] startYYYYMM is year-month of start of LIS forecast")
print("[INFO] model_forcing is ID for atmospheric forcing for LIS")
| 14,559
|
def find_untranscribed_words(
gt: Counter, machine: Counter
) -> List[Dict[str, any]]:
"""
Finds untranscribed words.
That is, we find if there exist words in the GT which never occur in the machine transcription.
:param gt: Counter of GT words.
:param machine: Counter of machine words.
:return: List of word/counts which occur in GT but not (or infrequently) in the machine transcription.
"""
result: List[Dict[str, any]] = []
for word, gt_count in gt.most_common():
if word not in machine:
machine_count = 0
else:
machine_count = machine[word]
if gt_count > 0 and machine_count == 0:
r = {"word": word, "machine": machine_count, "gt": gt_count}
result.append(r)
return result
| 14,560
|
def get_vcvars(vs_tools, arch):
"""Get the VC tools environment using vswhere.exe or buildtools docker
This is intended to work either when VS is in its standard installation
location, or when the docker instructions have been followed, and we can
find Visual C++ in C:/BuildTools.
Visual Studio provides a docker image with instructions here:
https://docs.microsoft.com/en-us/visualstudio/install/build-tools-container?view=vs-2019
This vswhere code is following the guidelines from strategy 1 in this blog
post:
https://blogs.msdn.microsoft.com/vcblog/2017/03/06/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
It doesn't work when VS is not installed at the default location.
"""
if not arch:
# First check the wow64 processor architecture, since python is probably
# 32-bit, then fall back to PROCESSOR_ARCHITECTURE.
arch = os.environ.get('PROCESSOR_ARCHITEW6432', '').lower()
if not arch:
arch = os.environ.get('PROCESSOR_ARCHITECTURE', '').lower()
else:
arch = arch.lower()
# Use vswhere.exe if it exists.
if os.path.exists(VSWHERE_PATH):
cmd = [VSWHERE_PATH, "-latest", "-property", "installationPath"]
vs_path = subprocess.check_output(cmd).decode(sys.stdout.encoding)
vs_path = vs_path.strip()
util.report("Running vswhere to find VS: " + repr(cmd))
util.report("vswhere output: " + vs_path)
if not os.path.isdir(vs_path):
raise ValueError("VS install path does not exist: " + vs_path)
vcvars_path = pjoin(vs_path, 'VC', 'Auxiliary', 'Build',
'vcvarsall.bat')
elif os.path.exists(BUILDTOOLS_VSDEVCMD):
vcvars_path = BUILDTOOLS_VSDEVCMD
elif vs_tools is None:
vs_tools = os.path.expandvars('%VS140COMNTOOLS%')
vcvars_path = pjoin(vs_tools, '..', '..', 'VC', 'vcvarsall.bat')
# Newer vcvarsall.bat scripts aren't quiet, so direct them to NUL, aka
# Windows /dev/null.
cmd = util.shquote_cmd([vcvars_path, arch]) + ' > NUL && set'
util.report("Running vcvars: " + cmd)
output = \
subprocess.check_output(cmd, shell=True).decode(sys.stdout.encoding)
new_env = {}
for line in output.splitlines():
var, val = line.split('=', 1)
new_env[var] = val
return new_env
| 14,561
|
def set_line_length(length):
"""
set_line_length(int length)
Sets the maximum line length for log messages.
Messages longer than this amount will be broken up into multiline messages.
Parameters
----------
* length :
the maximum log message line length in characters
"""
return _openmoc.set_line_length(length)
| 14,562
|
def get_teams(pbp_json):
"""
Get teams
:param pbp_json: raw play by play json
:return: dict with home and away
"""
return {'Home': shared.get_team(pbp_json['gameData']['teams']['home']['name'].upper()),
'Away': shared.get_team(pbp_json['gameData']['teams']['away']['name'].upper())}
| 14,563
|
def makeCapture(theGame, startCoordinate, endCoordinate):
""" Update the board for a capture between a start and end coordinate """
startX = startCoordinate.get_x_board()
startY = startCoordinate.get_y_board()
endX = endCoordinate.get_x_board()
endY = endCoordinate.get_y_board()
startPieceType = theGame.getState(startCoordinate)
if startPieceType in (types.EMPTY, types.OFF_BOARD):
error_template = "Illegal start piece type: {0} at ({1}, {2})"
raise TypeError(error_template.format(startPieceType, startX, startY))
elif abs(startX - endX) not in (0, 2):
error_template = "Illegal X capture: {0} -> {1}"
raise ValueError(error_template.format(startX, endX))
elif abs(startY - endY) not in (0, 2):
error_template = "Illegal Y capture: {0} -> {1}"
raise ValueError(error_template.format(startY, endY))
elif startX == endX and startY == endY:
error_template = ("Start and end capture coordinates are the "
"same: ({0}, {1})")
raise ValueError(error_template.format(startX, startY))
captureStartX = int(startX + (endX - startX)/2)
captureStartY = int(startY + (endY - startY)/2)
captureCoordinate = coordinate.Coordinate(captureStartX, captureStartY)
theGame.setState(endCoordinate, getPossiblePromotedPiece(theGame,
endCoordinate,
startCoordinate))
theGame.setState(captureCoordinate, types.EMPTY)
theGame.setState(startCoordinate, types.EMPTY)
theGame.pieceLastMoved = endCoordinate
| 14,564
|
def register(args):
"""Register a new user using email and password.
Return CONFLICT is a user with the same email already exists.
"""
if db.session.query(User).filter_by(email=args['email']).first():
return conflict("User already exists.")
new_user = User(args['email'], args['password'])
db.session.add(new_user)
db.session.commit()
user_schema = UserSchema()
return created(data=user_schema.dump(new_user).data)
| 14,565
|
def _scale_value_to_rpm(value, total):
"""Scale value to reads per million"""
return value * 1 / (total / 1e6)
| 14,566
|
def file_exists_not_empty(filename,):
"""
Tests if file exists and is not empty
:param filename: full path of file to be checked
:type filename: str
"""
if os.path.isfile(filename):
if os.stat(filename).st_size == 0:
return False
else:
return False
return True
| 14,567
|
def poly_vals_in_range(minimum, maximum, roots):
"""Return a list of all results of a given polynomial within a range
based on the roots of the polynomial itself.
These roots will be selected by a user from the GUI.
Arguments:
minimum -- the lowest value in the dataset
maximum -- the highest value in the dataset
roots -- the roots of the polynomial
"""
poly = polyfromroots(roots)
vals = itertools.takewhile(lambda x: x <= maximum,
[int(polyval(y, poly)) for y in range(minimum, maximum + 1)])
vals = sorted(filter(lambda x: minimum <= x <= maximum, vals))
return vals
| 14,568
|
def merge_files_in_range(cli_args, file_names, range_to, args):
"""
:param cli_args: Dictionary containing all command-line arguments from user
:param file_names: List of strings where each is a filename
:param range_to: Integer, the number of files to merge
:param args: List, the rest of the arguments to call merge_to_make_dtseries
"""
for r in range(0, range_to):
for f in file_names:
merge_to_make_dtseries(cli_args, str(f) + str(r + 1), *args)
| 14,569
|
def width_angle(rectangle: Polygon):
"""Returns the length and angle(in degrees) of the longest side of a
rotated rectangle
"""
point_a, point_b, point_c = rectangle.exterior.coords[:3]
a = distance(point_a, point_b)
b = distance(point_b, point_c)
if a > b:
angle = line_angle(point_a, point_b)
return a, b, angle
angle = line_angle(point_b, point_c)
return b, a, angle
| 14,570
|
def mean_absolute_error(y_true, y_pred, discretise = False):
"""
requires input arrays to be same np.dtype.
returns average, not sum of errors
discretising (for classification problems) makes little sense to me,
but may be necessary in some obscure scenarios
"""
if discretise:
y_p = tools.round_probabilities(y_pred)
else:
y_p = y_pred
mae_a = tf.Session().run(tf.keras.losses.mean_absolute_error(y_true, y_p))
return mae_a.mean()
| 14,571
|
def population_fit_feature_cal(population,
fitFunction,
fitFunctionInput,
ite):
"""Parallel population fitness calculation function
Args:
population (list): Single population
fitFunction (function): Fitness calculation function
fitFunctionInput (list): Auxiliary input of fitness calculation function
ite (int): Current population number
Returns:
[int, list]: Current population number, population characteristics (mean, variance, maximum)
"""
# Calculate population fitness
populationFit = []
for individual in population:
Fit = fitFunction(individual, fitFunctionInput)
populationFit.append(Fit)
# Calculate population fitness characteristics
mean, std, max = np.mean(populationFit), np.std(populationFit), np.max(populationFit)
return ite, [mean, std, max]
| 14,572
|
def cblaster_gne(job_id: str, options: ImmutableMultiDict = None,
file_path: t.Union[str, None] = None) -> None:
"""Executed when requested job is cblaster_gne (forges + exec. command)
Input:
- job_id: ID of the submitted job
- options: user submitted parameters via HTML form
- file_path: path to previous job's session file
Output:
- None, execution of this module
This function forges and executes a cblaster command.
"""
pre_job_formalities(job_id)
_, log_path, results_path = generate_paths(job_id)
if log_threshold_exceeded(int(options["sample_number"]),
thresholds['maximum_gne_samples'],
(log_path, job_id, 'cblaster'),
'Too many samples'):
return
session_path = file_path
cmd = ["cblaster", "gne", session_path,
"--max_gap", options["max_intergenic_distance"],
"--samples", options["sample_number"],
"--scale", options["sampling_space"],
"--plot", os.path.join(results_path, f"{job_id}_plot.html"),
"--output", os.path.join(results_path, f"{job_id}_summary.txt")]
cmd.extend(create_summary_table_commands('gne', options))
return_code = run_command(cmd, log_path, job_id)
post_job_formalities(job_id, return_code)
| 14,573
|
def intilise_database2():
"""
Initilse the database and make a table instance
Returns
pymongo object of the table
"""
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb=myclient['subreddit']
maintable2 = mydb["posts2"]
return maintable2
| 14,574
|
def first(id_):
"""The first service of the station."""
return jsonify(
[
(n.removeprefix("_"), t)
for n, t in r.zrange(f"Station:_{id_}:first", 0, -1, withscores=True)
]
)
| 14,575
|
def plot_set_points(set_data, color="black", size=10, marker="o", scale=1):
"""
Plot the individual points of a two dimensional isl set.
:param set_data: The islpy.Set to plot.
:param color: The color of the points.
:param size: The diameter of the points.
:param marker: The marker used to mark a point.
:param scale: Scale the values.
"""
points = []
points = bset_get_points(set_data, scale=scale)
dimX = [x[0] for x in points]
dimY = [x[1] for x in points]
_plt.plot(dimX, dimY, marker, markersize=size, color=color, lw=0)
| 14,576
|
def sample(df, n, shape):
"""
randomly sample patch images from DataFrame
Parameters
----------
df : pd.DataFrame
DataFrame containing name of image files
n : int
number of patches to extract
shape : list
shape of patches to extract
Returns
-------
images : (n, n_channels, shape[0], shape[1], ...) ndarray
input patches
labels : (n, shape[0], shape[1], ...) ndarray
label patches
"""
N = len(df)
assert N >= n, "n should be smaller than or equal to " + str(N)
indices = np.random.choice(N, n, replace=False)
image_files = df["image"][indices]
label_files = df["label"][indices]
images = []
labels = []
for image_file, label_file in zip(image_files, label_files):
image = load_nifti(image_file)
label = load_nifti(label_file).astype(np.int32)
mask = np.int32(label > 0)
slices = [slice(len_ / 2, -len_ / 2) for len_ in shape]
mask[slices] *= 2
indices = np.where(mask > 1.5)
i = np.random.choice(len(indices[0]))
slices = [
slice(index[i] - len_ / 2, index[i] + len_ / 2)
for index, len_ in zip(indices, shape)]
image_patch = image[slices]
label_patch = label[slices]
image_patch = image_patch.transpose(3, 0, 1, 2)
images.append(image_patch)
labels.append(label_patch)
images = np.array(images)
labels = np.array(labels)
return images, labels
| 14,577
|
def bitsizeof_varint32(value: int) -> int:
"""
Gets bit size of variable 32-bit signed integer value.
:param value: Value to use for bit size calculation.
:returns: Bit size of the value.
:raises PythonRuntimeException: Throws if given value is out of range for varint32 type.
"""
return _bitsizeof_varnum(abs(value), VARINT32_MAX_VALUES, "varint32")
| 14,578
|
def sort_faces(path):
"""
Sorts the faces with respect facial features
:param path: The path that is checked : Provided by the user
:return: None
"""
count = 0
to_find = get_directories(path)
faces = load_faces(to_find, path)
print("No. of faces to detect: ", len(to_find))
for i in range(len(to_find)):
print(path + "/" + to_find[i])
images = get_images(path)
print("Number of images loaded: ", len(images))
known_faces = make_encodings(faces)
results = Parallel(n_jobs=num_cores)(delayed(check_faces)(image, to_find, known_faces, path) for image in images)
| 14,579
|
def slack(channel, message, subject=''):
"""
Sends a notification to meerkat slack server. Channel is '#deploy' only if
in live deployment, otherwise sent privately to the developer via slackbot.
Args:
channel (str): Required. The channel or username to which the message
should be posted.
message (str): Required. The message to post to slack.
subject (str): Optional. Placed in bold and seperated by a pipe.
return "sent"
"""
# Assemble the message text string
text = str(message)
if subject:
text = "*_{}_* | {}".format(subject, message)
# Send the slack message
message = {'text': text, 'channel': channel, 'username': 'Meerkat'}
url = ('https://hooks.slack.com/services/T050E3XPP/'
'B0G7UKUCA/EtXIFB3CRGyey2L7x5WbT32B')
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=json.dumps(message), headers=headers)
# Return the slack response
return r
| 14,580
|
def setFigFormat(figFormat):
"""
Set figure size for either journal/conference paper or presentation.
<333 T. Harrison 10/2020
Input:
1) figFormat - string - either 'paper' or 'presentation'
Output:
1) changed matplotlib rcParams
"""
##########################################################################
if figFormat == 'paper' or figFormat == 'journal':
matplotlib.rcParams['figure.figsize'] = (3.5, 2.625)
matplotlib.rcParams['font.size'] = 8
elif figFormat == 'presentation' or figFormat == 'pres':
matplotlib.rcParams['figure.figsize'] = (6.4, 4.8)
matplotlib.rcParams['font.size'] = 14
matplotlib.rcParams['axes.xmargin'] = 0.5
# return iStr
| 14,581
|
def compute_similarity(img_one,img_two):
"""Performs image resizing just to compute the
cosine similarity faster
Input:
Two images
Output:
Cosine Similarity
"""
x = cv2.resize(img_one, dsize=(112, 112), interpolation=cv2.INTER_CUBIC)
y = cv2.resize(img_two, dsize=(112, 112), interpolation=cv2.INTER_CUBIC)
x = x.ravel().reshape(-1, 1)
y = y.ravel().reshape(-1, 1)
if x.shape[0] != y.shape[0]:
dist = 0
else:
dist = 1 - distance.cosine(x, y)
return dist
| 14,582
|
def get_results_from_firebase(firebase):
"""
The function to download all results from firebase
Parameters
----------
firebase : pyrebase firebase object
initialized firebase app with admin authentication
Returns
-------
results : dict
The results in a dictionary with the following format:
{
"task_id" {
"user1_id": {
"data": {...}
},
"user2_id": {
"data": {...}
},
}
}
"""
fb_db = firebase.database()
results = fb_db.child("results").get().val()
return results
| 14,583
|
def merge_sort(items):
"""
Sorts the items in the list
:param items: The list to sort
"""
# Base case: if the list has 1 we're done
if len(items) <= 1:
return
# Break the list into halves
middle = len(items) // 2
part1 = items[:middle]
part2 = items[middle:]
# Recursively call this function to sort each half
merge_sort(part1)
merge_sort(part2)
# Merge the two sorted parts
i = 0 # iterator for part 1
j = 0 # iterator for part 2
k = 0 # iterator for complete list
# As long as there are more items in each list
while i < len(part1) and j < len(part2):
# Get the smaller item from whichever part its in
if part1[i] < part2[j]:
items[k] = part1[i]
i += 1
k += 1
else: # part2 <= part1
items[k] = part2[j]
j += 1
k += 1
# At this point, one or the other size is done
# Copy any remaining items from part1
while i < len(part1):
items[k] = part1[i]
i += 1
k += 1
# Copy any remaining items from part2
while j < len(part2):
items[k] = part2[j]
j += 1
k += 1
# The list is now sorted!
| 14,584
|
def validate_response(response: Response, method: str, endpoint_url: str) -> None:
"""
This function verifies that your OpenAPI schema definition matches the response of your API endpoint.
It inspects your schema recursively, and verifies that the schema matches the structure of the response.
:param response: HTTP response
:param method: HTTP method ('get', 'put', 'post', ...)
:param endpoint_url: Relative path of the endpoint being tested
:return: None
"""
# Load settings
schema, case, path = load_settings()
case_func = case_check(case)
# Load response contents
try:
data = response.json()
except Exception as e:
logger.exception('Unable to open response object')
raise ValueError(
f'Unable to unpack response object. ' f'Make sure you are passing response, and not response.json(). Error: {e}')
# Fetch schema
if schema == 'static':
complete_schema = fetch_from_dir(path=path)
# Get the part of the schema relating to the endpoints success-response
schema = parse_endpoint(schema=complete_schema, method=method, endpoint_url=endpoint_url,
status_code=response.status_code)
else:
schema = fetch_generated_schema(url=endpoint_url, method=method, status_code=response.status_code)
# Test schema
if not schema:
raise OpenAPISchemaError('The OpenAPI schema is undefined. Schema is not testable.')
if schema['type'] == 'object':
_dict(schema=schema, data=data, case_func=case_func)
elif schema['type'] == 'array':
_list(schema=schema, data=data, case_func=case_func)
elif schema['type'] == 'string' or schema['type'] == 'boolean' or schema['type'] == 'integer':
_item(schema=schema, data=data)
else:
raise Exception(f'Unexpected error.\nSchema: {schema}\n Response: {data}')
| 14,585
|
def mtci_vi(imgData, wave, mask=0, bands=[-1,-1,-1]):
"""
Function that calculates the MERIS Terrestrial Chlorophyll Index.
This functions uses wavelengths 753.75, 708.75, and 681.25 nm. The closest bands to these values will be used.
Citation: Dash, J. and Curran, P.J. 2004. The MERIS terrestrial chlorophyll index, International Journal of Remote Sensing, 25(23), 5403–5413.
INPUTS:
1) imgData: an array of hyperspectral data either as 3D [n_row x n_col x n_band] or 2D [n_row x n_band]
2) wave: an array of wavelengths in nanometers that correspond to the n_bands in imgData
3) mask: OPTIONAL - a binary array (same size as imgData) that designates which pixels should be included in analysis. Pixels with 1 are used, while pixels with 0 are not.
4) bands: OPTIONAL - if the user wants to define the bands used in the function provide the band index (not in nm) for each wavelength in this order [681.25, 708.75, 753.75 nm].
OUTPUTS:
1) vi: the calculated spectral index value for each pixel either returned as [n_row x n_col x 1] or [n_row x 1]
02/2020 - Susan Meerdink
"""
# Determine the bands used in function
if len(bands) == 3:
if bands[0] == -1:
idx_681 = (np.abs(wave - 681.25)).argmin()
else:
idx_681 = bands[0]
if bands[1] == -1:
idx_708 = (np.abs(wave - 708.75)).argmin()
else:
idx_708 = bands[1]
if bands[2] == -1:
idx_753 = (np.abs(wave - 753.75)).argmin()
else:
idx_753 = bands[2]
print('MTCI calls for bands 681.25, 708.75, and 753.75 nm. Using bands ' + str(wave[idx_681]) +', '+ str(wave[idx_708])+', '+ str(wave[idx_753]))
else:
raise Exception('Not enough band indexes are provided by user.')
# 3D data, hyperspectral image, [n_row x n_col x n_band]
if imgData.ndim > 2:
data_753 = np.reshape(imgData[:,:,idx_753],[-1,1])
data_708 = np.reshape(imgData[:,:,idx_708],[-1,1])
data_681 = np.reshape(imgData[:,:,idx_681],[-1,1])
# 2D data, flattened hyperspectral data, [n_row x n_band]
else:
data_753 = imgData[:,idx_753]
data_708 = imgData[:,idx_708]
data_681 = imgData[:,idx_681]
# Calculate MTCI
index = (data_753 - data_708)/(data_708 - data_681)
# If data was 3D, reshape the index value back into 3D shape
if imgData.ndim > 2:
index = np.reshape(index,[imgData.shape[0],imgData.shape[1]])
if isinstance(mask, int) is False:
idx_x, idx_y = np.where(mask==0)
index[idx_x,idx_y] = 0
return index
| 14,586
|
def test_convert_overflow(fast_reader):
"""
Test reading an extremely large integer, which falls through to
string due to an overflow error (#2234). The C parsers used to
return inf (kind 'f') for this.
"""
expected_kind = 'U'
dat = ascii.read(['a', '1' * 10000], format='basic',
fast_reader=fast_reader, guess=False)
assert dat['a'].dtype.kind == expected_kind
| 14,587
|
def GetSourceFile(file, sourcepath):
"""Return a relative file if it is embedded in a path."""
for root in sourcepath:
if file.find(root) == 0:
prefix_length = len(root)
if not root.endswith('/'):
prefix_length += 1
relative_file = file[prefix_length:]
return relative_file
return None
| 14,588
|
def trace_cmd_installed():
"""Return true if trace-cmd is installed, false otherwise"""
with open(os.devnull) as devnull:
try:
subprocess.check_call(["trace-cmd", "options"], stdout=devnull)
except OSError:
return False
return True
| 14,589
|
def getmessage(msg_type) :
""" Renvoie le message qui doit être affiché """
cfg = configparser.ConfigParser()
cfg.read(clt_path)
if not msg_type in cfg.options('Messages') :
sys.stderr.write("{} is not a valide type message : {}".format(msg_type, cfg.options('Messages')))
sys.stderr.flush()
return cfg.get('Messages', msg_type)
| 14,590
|
def refine_gene_list(adata, layer, gene_list, threshold, return_corrs=False):
"""Refines a list of genes by removing those that don't correlate well with the average expression of
those genes
Parameters
----------
adata: an anndata object.
layer: `str` or None (default: `None`)
The layer of data to use for calculating correlation. If None, use adata.X.
gene_list: list of gene names
threshold: threshold on correlation coefficient used to discard genes (expression of each gene is
compared to the bulk expression of the group and any gene with a correlation coefficient less
than this is discarded)
return_corrs: whether to return the correlations along with the gene names (default: False)
Returns
-------
Refined list of genes that are well correlated with the average expression trend
"""
gene_list, corrs = group_corr(adata, layer, gene_list)
if (return_corrs):
return corrs[corrs >= threshold]
else:
return gene_list[corrs >= threshold]
| 14,591
|
def shape_to_coords(value, precision=6, wkt=False, is_point=False):
"""
Convert a shape (a shapely object or well-known text) to x and y coordinates
suitable for use in Bokeh's `MultiPolygons` glyph.
"""
if is_point:
value = Point(*value).buffer(0.1 ** precision).envelope
x_coords = list()
y_coords = list()
if wkt:
value = loads(value)
if not hasattr(value, '__len__'):
value = [value]
for v in value:
x_dict = dict()
y_dict = dict()
if not hasattr(v, 'exterior'):
v = v.buffer(0)
x_dict['exterior'] = [round(x, precision) for x in v.exterior.coords.xy[0]]
x_dict['holes'] = [[round(y, precision) for y in x.coords.xy[0]] for x in v.interiors]
y_dict['exterior'] = [round(x, precision) for x in v.exterior.coords.xy[1]]
y_dict['holes'] = [[round(y, precision) for y in x.coords.xy[1]] for x in v.interiors]
x_coords.append(x_dict)
y_coords.append(y_dict)
return x_coords, y_coords
| 14,592
|
def get_args():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(
description='Test Runner script')
parser.add_argument('-c', '--controller', type=str, required=True, help='Controller host name')
parser.add_argument('-s', '--server', type=str, required=True, help='Cluster Server hostname')
parser.add_argument('-e', '--export', type=str, help='NFS Export Name', default="/")
parser.add_argument('-n', '--nodes', type=int, help='Number of active nodes', default=0)
parser.add_argument('-d', '--domains', type=int, help='Number of fs domains', default=0)
parser.add_argument('-m', '--mtype', type=str, help='Mount Type', choices=['nfs3', 'nfs4', 'nfs4.1', 'smb1', 'smb2',
'smb3'], default="nfs3")
parser.add_argument('--start_vip', type=str, help="Start VIP address range")
parser.add_argument('--end_vip', type=str, help="End VIP address range")
parser.add_argument('-l', '--locking', type=str, help='Locking Type', choices=['native', 'application', 'off'],
default="native")
args = parser.parse_args()
return args
| 14,593
|
def view_image(image, label=""):
"""View a single image."""
print("Label: %s" % label)
imshow(image, cmap=cm.gray)
show()
| 14,594
|
def create_sink(sink_id: str, project_id: str, dataset_id: str, pubsub_topic: str, query: str = None) -> None:
"""Creates a sink to export logs to the given PubSub Topic.
:param sink_id: A unique identifier for the sink
:type sink_id: str
:param project_id: The project_id that is associated with your GCP account and BigQuery Table
:type project_id: str
:param pubsub_topic: The PubSub Topic that the logs will export to
:type pubsub_topic: str
:param query: The query that filters what logs will be exported
:type query: str
:return: Nothing
:rtype: None
"""
client = logging.Client()
destination = 'pubsub.googleapis.com/projects/{project_id}/topics/{pubsub_topic}'.format(
project_id=project_id, pubsub_topic=pubsub_topic)
if query is None:
query = '''
resource.labels.dataset_id="{dataset_id}"
resource.labels.project_id="{project_id}"
protoPayload.metadata.@type="type.googleapis.com/google.cloud.audit.BigQueryAuditMetadata"
(protoPayload.metadata.tableDataChange.deletedRowsCount > "0" OR protoPayload.metadata.tableDataChange.insertedRowsCount > "0")
'''.format(project_id=project_id, dataset_id=dataset_id)
sink = client.sink(
sink_id,
filter_=query,
destination=destination)
if sink.exists():
print('Sink {} already exists.'.format(sink.name))
return
else:
sink.create()
print('Created sink {}'.format(sink.name))
return
| 14,595
|
def max_union(map_list):
"""
Element-wise maximum of the union of a list of HealSparseMaps.
Parameters
----------
map_list : `list` of `HealSparseMap`
Input list of maps to compute the maximum of
Returns
-------
result : `HealSparseMap`
Element-wise maximum of maps
"""
return _apply_operation(map_list, np.fmax, 0, union=True, int_only=False)
| 14,596
|
def getDtypes(attributes, forecastHorizon):
"""
Auxillary function to generate dictionary of datatypes for data queried from dynamo.
Parameters
----------
attributes : list,
Attributes queried from dynamo.
forecastHorizon : integer,
Number of forecast horizons which have been queried.
Returns
-------
attributeDtypes : dict,
Dictionary to pass to dataframe to specify dtypes of all data queried.
"""
dtypes = {
"apparentTemperature": np.float64,
"cloudCover": np.float64,
"dewPoint": np.float64,
"humidity": np.float64,
"precipIntensity": np.float64,
"precipProbability": np.float64,
"pressure": np.float64,
"temperature": np.float64,
"uvIndex": np.float64,
"visibility": np.float64,
"windBearing": np.float64,
"windGust": np.float64,
"windSpeed": np.float64,
"carbonFactor": np.float64,
"carbonIndex": str
}
attributeDtypes = dict()
attributeDtypes["unixTimestamp"] = np.int32
for attribute in attributes:
dtype = dtypes[attribute]
for x in range(forecastHorizon+1):
attributeDtypes[attribute + "_" + str(x)] = dtype
return attributeDtypes
| 14,597
|
def cazy_synonym_dict():
"""Create a dictionary of accepted synonms for CAZy classes."""
cazy_dict = {
"Glycoside Hydrolases (GHs)": ["Glycoside-Hydrolases", "Glycoside-Hydrolases", "Glycoside_Hydrolases", "GlycosideHydrolases", "GLYCOSIDE-HYDROLASES", "GLYCOSIDE-HYDROLASES", "GLYCOSIDE_HYDROLASES", "GLYCOSIDEHYDROLASES", "glycoside-hydrolases", "glycoside-hydrolases", "glycoside_hydrolases", "glycosidehydrolases", "GH", "gh"],
"GlycosylTransferases (GTs)": ["Glycosyl-Transferases", "GlycosylTransferases", "Glycosyl_Transferases", "Glycosyl Transferases", "GLYCOSYL-TRANSFERASES", "GLYCOSYLTRANSFERASES", "GLYCOSYL_TRANSFERASES", "GLYCOSYL TRANSFERASES", "glycosyl-transferases", "glycosyltransferases", "glycosyl_transferases", "glycosyl transferases", "GT", "gt"],
"Polysaccharide Lyases (PLs)": ["Polysaccharide Lyases", "Polysaccharide-Lyases", "Polysaccharide_Lyases", "PolysaccharideLyases", "POLYSACCHARIDE LYASES", "POLYSACCHARIDE-LYASES", "POLYSACCHARIDE_LYASES", "POLYSACCHARIDELYASES", "polysaccharide lyases", "polysaccharide-lyases", "polysaccharide_lyases", "polysaccharidelyases", "PL", "pl"],
"Carbohydrate Esterases (CEs)": ["Carbohydrate Esterases", "Carbohydrate-Esterases", "Carbohydrate_Esterases", "CarbohydrateEsterases", "CARBOHYDRATE ESTERASES", "CARBOHYDRATE-ESTERASES", "CARBOHYDRATE_ESTERASES", "CARBOHYDRATEESTERASES", "carbohydrate esterases", "carbohydrate-esterases", "carbohydrate_esterases", "carbohydrateesterases", "CE", "ce"],
"Auxiliary Activities (AAs)": ["Auxiliary Activities", "Auxiliary-Activities", "Auxiliary_Activities", "AuxiliaryActivities", "AUXILIARY ACTIVITIES", "AUXILIARY-ACTIVITIES", "AUXILIARY_ACTIVITIES", "AUXILIARYACTIVITIES", "auxiliary activities", "auxiliary-activities", "auxiliary_activities", "auxiliaryactivities", "AA", "aa"],
"Carbohydrate-Binding Modules (CBMs)": ["Carbohydrate-Binding-Modules", "Carbohydrate_Binding_Modules", "Carbohydrate_Binding Modules", "CarbohydrateBindingModules", "CARBOHYDRATE-BINDING-MODULES", "CARBOHYDRATE_BINDING_MODULES", "CARBOHYDRATE_BINDING MODULES", "CARBOHYDRATEBINDINGMODULES", "carbohydrate-binding-modules", "carbohydrate_binding_modules", "carbohydrate_binding modules", "carbohydratebindingmodules", "CBMs", "CBM", "cbms", "cbm"]
}
return cazy_dict
| 14,598
|
def alignment_patterns(matrix: list[list[Point]], version: int) -> None:
"""Adding alignment patterns (from version 2).
Args:
matrix: Matrix containing qr code pixels.
version: QR code version.
"""
if version > 1:
patterns = tables.alignment_patterns_table().get(version)
if version > 6:
points = list(product(patterns, patterns))
points.remove((patterns[0], patterns[0]))
points.remove((patterns[0], patterns[-1]))
points.remove((patterns[-1], patterns[0]))
else:
points = [(patterns[0], patterns[0])]
for x, y in points:
matrix[x][y].bit = True
matrix[x][y].is_service_bit = True
for i in range(5):
matrix[x - 2][y - 2 + i].bit = True
matrix[x - 2][y - 2 + i].is_service_bit = True
matrix[x + 2][y - 2 + i].bit = True
matrix[x + 2][y - 2 + i].is_service_bit = True
matrix[x - 2 + i][y - 2].bit = True
matrix[x - 2 + i][y - 2].is_service_bit = True
matrix[x - 2 + i][y + 2].bit = True
matrix[x - 2 + i][y + 2].is_service_bit = True
for i in range(5):
for j in range(5):
if not matrix[x - 2 + i][y - 2 + j].is_service_bit:
matrix[x - 2 + i][y - 2 + j].is_service_bit = True
| 14,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.