content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from datetime import datetime
def should_build_new_prices() -> bool:
"""
Determine if prices were built recently enough that there
is no reason to build them again
:return: Should prices be rebuilt
"""
cache_file = CACHE_PATH.joinpath("last_price_build_time")
if not cache_file.is_file():
return True
stat_time = cache_file.stat().st_mtime
last_price_build_time = datetime.datetime.fromtimestamp(stat_time)
twelve_hours_ago = datetime.datetime.now() - datetime.timedelta(hours=12)
return twelve_hours_ago > last_price_build_time | 0cc1f673cf39c554aa1ad72ff4c7e0a5f90cc713 | 3,628,600 |
def config_nat_pool_binding(dut, **kwargs):
"""
Config NAT pool bindings
Author:kesava-swamy.karedla@broadcom.com
:param :dut:
:param :config: add/del:
:param :binding_name:
:param :pool_name:
:param :nat_type:
:param :twice_nat_id:
:param :acl_name:
usage:
config_nat_pool_binding(dut1, binding_name="name", pool_name="pool1", acl_name="acl1", nat_type="dnat",
config="add")
"""
# config nat add binding bind1 test1 acl1
instance = 0
result = False
command = ''
cli_type = st.get_ui_type(dut, **kwargs)
skip_error_check = kwargs.get("skip_error_check", True)
if cli_type not in ["click", "klish", "rest-patch", "rest-put"]:
st.log("UNSUPPORTE CLI TYPE")
return False
if "binding_name" not in kwargs and "config" not in kwargs:
st.error("Mandatory params binding_name, config not provided ")
return result
if kwargs["config"] == "add":
if "pool_name" not in kwargs:
st.error("Mandatory params pool_name is not provided ")
return result
if cli_type == "click":
command = "config nat add binding {} {}".format(kwargs["binding_name"], kwargs["pool_name"])
if "acl_name" in kwargs:
command += " {}".format(kwargs["acl_name"])
if "nat_type" in kwargs:
command += " -nat_type {}".format(kwargs["nat_type"])
if "twice_nat_id" in kwargs:
command += ' -twice_nat_id {}'.format(kwargs["twice_nat_id"])
elif cli_type == "klish":
command = list()
command.append("nat")
cmd = "binding {} {}".format(kwargs["binding_name"], kwargs["pool_name"])
if "acl_name" in kwargs:
cmd += " {}".format(kwargs["acl_name"])
if "nat_type" in kwargs:
cmd += " {}".format(kwargs["nat_type"])
if "twice_nat_id" in kwargs:
cmd += " twice-nat-id {}".format(kwargs["twice_nat_id"])
command.append(cmd)
command.append("exit")
elif cli_type in ["rest-patch", "rest-put"]:
url = st.get_datastore(dut, "rest_urls")['config_nat_pool_binding'].format(instance, kwargs["binding_name"])
data = {"openconfig-nat:config": {"nat-pool": kwargs["pool_name"]}}
if "acl_name" in kwargs:
data['openconfig-nat:config']['access-list'] = kwargs["acl_name"]
if "nat_type" in kwargs:
data['openconfig-nat:config']['type'] = kwargs["nat_type"].upper()
if "twice_nat_id" in kwargs:
data['openconfig-nat:config']['twice-nat-id'] = int(kwargs["twice_nat_id"])
config_rest(dut, http_method=cli_type, rest_url=url, json_data=data)
return True
if kwargs["config"] == "del":
if cli_type == "click":
command = "config nat remove binding {} ".format(kwargs['binding_name'])
elif cli_type == "klish":
command = list()
command.append("nat")
command.append("no binding {}".format(kwargs["binding_name"]))
command.append("exit")
elif cli_type in ["rest-patch", "rest-put"]:
url = st.get_datastore(dut, "rest_urls")['del_nat_pool_binding'].format(instance, kwargs["binding_name"])
delete_rest(dut, rest_url=url)
return True
st.config(dut, command, type=cli_type, skip_error_check=skip_error_check)
return True | e64204b575684fb57e3ab2a025771edad329d902 | 3,628,601 |
def database_obj_to_py(obj, fingerprints_in_song=False):
"""
Recursively convert Fingerprint and Song sqlalchemy objects to native
Python types (lists and dicts).
Args:
obj (database.schema.Fingerprint|database.schema.Song): ``audio``
module Fingerprint or Song object.
fingerprints_in_song (bool): Include each song's fingerprints as a
list within the song dict.
Returns:
py_obj: list|dict
"""
if isinstance(obj, list):
return [
database_obj_to_py(elem, fingerprints_in_song=fingerprints_in_song)
for elem in obj
]
elif isinstance(obj, Song):
song = {
"id": obj.id,
"duration": obj.duration,
"filehash": obj.filehash,
"filepath": obj.filepath,
"title": obj.title,
"youtube_id": obj.youtube_id,
}
if fingerprints_in_song:
song["fingerprints"] = [
database_obj_to_py(fp) for fp in obj.fingerprints
]
song["num_fingerprints"] = len(obj.fingerprints)
return song
elif isinstance(obj, Fingerprint):
return {
"song_id": obj.song_id,
"hash": obj.hash,
"offset": obj.offset,
}
else:
raise ValueError("Unsupported object") | 49d89c0e93ee492f911e1c42e0bfc6af9232c8ba | 3,628,602 |
import csv
def get_outputs(output_file):
"""
Parse ``output_file`` which is a csv file and defines the semantics of the
output of a neural network.
For example, output neuron 1 means class "0" in the MNIST classification
task.
"""
outputs = []
mode = "rt"
with open(output_file, mode, newline="", encoding="utf8") as csvfile:
spamreader = csv.reader(csvfile, delimiter="\n", quotechar="|")
for row in spamreader:
outputs.append(row[0])
return outputs | b001942bc270952c9bb82537d0b5033c9ee968ff | 3,628,603 |
def auto_adjust(img):
"""Python translation of ImageJ autoadjust function.
Parameters
----------
img : ndarray
Returns
-------
(vmin, vmax) : tuple of numbers
"""
# calc statistics
pixel_count = int(np.array((img.shape)).prod())
# get image statistics
# ImageStatistics stats = imp.getStatistics()
# initialize limit
limit = pixel_count / 10
# histogram
try:
my_hist, bins = np.histogram(np.nan_to_num(img).ravel(), bins="auto")
if len(bins) < 100:
my_hist, bins = np.histogram(np.nan_to_num(img).ravel(), bins=128)
# convert bin edges to bin centers
bins = np.diff(bins) + bins[:-1]
nbins = len(my_hist)
# set up the threshold
# Below is what ImageJ purportedly does.
# auto_threshold = threshold_isodata(img, nbins=bins)
# if auto_threshold < 10:
# auto_threshold = 5000
# else:
# auto_threshold /= 2
# this version, below, seems to converge as nbins increases.
threshold = pixel_count / (nbins * 16)
# find the minimum by iterating through the histogram
# which has 256 bins
valid_bins = bins[np.logical_and(my_hist < limit, my_hist > threshold)]
# check if the found limits are valid.
vmin = valid_bins[0]
vmax = valid_bins[-1]
except IndexError:
vmin = 0
vmax = 0
if vmax <= vmin:
vmin = img.min()
vmax = img.max()
return dict(vmin=vmin, vmax=vmax) | 861ff4d7c2eaea9e10341f718fd9cfab33a9eab8 | 3,628,604 |
def ChanPcaKmeansDS(ds, xvec='bias', chan='cf', mod=lambda y: y, comps=6,nclust=4, fig=None):
"""
:param ds: xarray dataset
:param xvec: name of dataset coordinate
:param chan: selected channel
:param mod: if desired, pass the modifier for the plotting of the histogram via lambda functions
:param comps: number of pca components to keep for clustering (which will be performed on the scores)
:param nclust: number of clusters
:param fig: figure to plot in
:param xsel:
:return:
"""
cf2 = ds[chan]
cf = mod(cf2).values
cf = cf.reshape((cf2.x.shape[0] * cf2.y.shape[0], cf2[xvec].shape[0]))
km, _ = PcaKmeans(X=cf, dims=comps, nclust=nclust, fig=fig)
km = km.labels_.reshape((cf2.x.shape[0], cf2.y.shape[0]))
ds[chan + '_kmeans'] = (ds[chan].dims[:-1],km)
ds.attrs.update([(chan + '_kmeans', 'kmeans clustering of ' + chan + ' with')])
return km | 487bd468a77340b13f5e5702e24ed87e092f41f9 | 3,628,605 |
def elements(elements, axis=0):
"""
Calculates an node 2-d-array from an element 2-d-array,
uses the [:, 1], [:, -2] entries of the calculated node 2-d-array
to fill the first als last row of the node 2-d-array.
"""
nodes = np.asarray((elements[:, :-1] + elements[:, 1:])) * .5
return np.hstack([elements[:, [0]], nodes, elements[:, [-1]]]) | 6be65159469972db6d69ce1b4975e84ef1aef4d4 | 3,628,606 |
def convert_date_to_unix(date_string):
"""Convert date input to unix timestamp.
Args:
date_string: the date input string.
Returns:
(int) converted timestamp.
"""
if not date_string:
return None
return int(dateparser.parse(date_string).timestamp() * 1000) | a3cc4e363d7940df904e02b4e5d5d34a3a58a322 | 3,628,607 |
def decoder(decoder_inputs, initial_state,
cell, output_size, loop_function=None, dtype=None,
scope=None):
"""
The Decoder Function which returns the decoder hidden states after decoding the whole output
args:
decoder_inputs: The inputs to the decoder, either the targets during training or the previous decoder output during inference.
initial_state: The tensor used to initialize the first decoder step cell.
cell: The decoder cell returned by the rnn_cell function.
output_size: The number of decoder hidden state units.
loop_function: The function that embeds the previous decoder step's output and provides as input to next decoder step
dtype: the data type
scope: the scope of the attention decoder
"""
with tf.variable_scope(scope or 'decoder', dtype=dtype) as scope:
state = initial_state[0]
outputs = []
prev = None
for i, inp in enumerate(decoder_inputs):
if i > 0:
tf.get_variable_scope().reuse_variables()
if loop_function is not None and prev is not None:
with tf.variable_scope("loop_function", reuse=True):
inp = loop_function(prev, i)
cell_outputs, state = cell(inp, state)
if loop_function is not None:
prev = cell_outputs
outputs.append(cell_outputs)
return outputs, state | ea8272815be6a0cb206505a811b522ec36f9e7df | 3,628,608 |
from typing import Sequence
def kron(nodes: Sequence[BaseNode]) -> BaseNode:
"""Kronecker product of the given nodes.
Kronecker products of nodes is the same as the outer product, but the order
of the axes is different. The first half of edges of all of the nodes will
appear first half of edges in the resulting node, and the second half ot the
edges in each node will be in the second half of the resulting node.
For example, if I had two nodes :math:`X_{ab}`, :math:`Y_{cdef}`, and
:math:`Z_{gh}`, then the resulting node would have the edges ordered
:math:`R_{acdgbefh}`.
The kronecker product is designed such that the kron of many operators is
itself an operator.
Args:
nodes: A sequence of `BaseNode` objects.
Returns:
A `Node` that is the kronecker product of the given inputs. The first
half of the edges of this node would represent the "input" edges of the
operator and the last half of edges are the "output" edges of the
operator.
"""
input_edges = []
output_edges = []
for node in nodes:
order = len(node.shape)
if order % 2 != 0:
raise ValueError(f"All operator tensors must have an even order. "
f"Found tensor with order {order}")
input_edges += node.edges[:order // 2]
output_edges += node.edges[order // 2:]
result = outer_product_final_nodes(nodes, input_edges + output_edges)
return result | d52f6fe4c5a3c5980f6bf8b94543dc9201c3ac7d | 3,628,609 |
def diskmounter() -> Mounter:
"""Fixture with an unversioned disk filesystem mounter."""
return unversioned_mounter(DiskFilesystem) | d3c67839dc7c0d3cb452235c8d4aa29b9a829ee7 | 3,628,610 |
def stations():
"""Return a JSON list of stations from the dataset."""
# most_active_stations = session.query(measurement.station, func.count(measurement.station)).\
# group_by(measurement.station).\
# order_by(func.count(measurement.station).desc()).all()
stationresults = session.query(station.station , station.name)
station_list = []
for stations in stationresults:
station_dict = {}
station_dict["Station"] = stations.station
station_dict["Name"] = stations.name
station_list.append(station_dict)
return jsonify(station_list) | b70109e02d15177343c4d2f8cf8fce078f5c2bf6 | 3,628,611 |
def search_sorted_matrix(matrix, target):
# Write your code here.
"""
1 4 7 12 15 1000
2 50 500 1001
3 1002
4
"""
result = [-1, -1]
row = 0
if len(matrix) == 0:
return result
col = len(matrix[0]) - 1
while col >= 0 and row < len(matrix):
if target > matrix[row][col]:
row += 1
elif target < matrix[row][col]:
col -= 1
else:
result = [row, col]
break
return result | 26a2dd0d714b0cdf88822dc2f956fc9d7f9cd02f | 3,628,612 |
def get_io_functions_from_lib(lib, load_func_name='load', dump_func_name='dump', load_kwargs={}, dump_kwargs={}):
"""Helper to create loader and dumper functions for libraries"""
def loader(input_stream, args):
return getattr(lib, load_func_name)(input_stream, **load_kwargs)
def dumper(output, output_stream, args):
return getattr(lib, dump_func_name)(output, output_stream, **dump_kwargs)
return loader, dumper | 1d81031c7e5421f190f8c8936d92f2a5a2d999bf | 3,628,613 |
def get_pane_id(session: str, window: int, pane: int):
"""
Get a given pane ID
Parameters
----------
session : str
Name of the session
window : int
Window number of pane
pane : int
Pane index in the window
"""
injected = get_injected_pane_data(session, window, pane, PANE_ID_VARIABLE)
return injected if injected else get_id() | f795859c6a8b5546e220e6d715ca3e3eec984b0f | 3,628,614 |
def normalize_signs(arr1, arr2):
"""Change column signs so that "column" and "-column" compare equal.
This is needed because results of eigenproblmes can have signs
flipped, but they're still right.
Notes
=====
This function tries hard to make sure that, if you find "column"
and "-column" almost equal, calling a function like np.allclose to
compare them after calling `normalize_signs` succeeds.
To do so, it distinguishes two cases for every column:
- It can be all almost equal to 0 (this includes a column of
zeros).
- Otherwise, it has a value that isn't close to 0.
In the first case, no sign needs to be flipped. I.e., for
|epsilon| small, np.allclose(-epsilon, 0) is true if and only if
np.allclose(epsilon, 0) is.
In the second case, the function finds the number in the column
whose absolute value is largest. Then, it compares its sign with
the number found in the same index, but in the other array, and
flips the sign of the column as needed.
"""
# Let's convert everyting to floating point numbers (it's
# reasonable to assume that eigenvectors will already be floating
# point numbers). This is necessary because np.array(1) /
# np.array(0) != np.array(1.) / np.array(0.)
arr1 = np.asarray(arr1, dtype=np.float64)
arr2 = np.asarray(arr2, dtype=np.float64)
if arr1.shape != arr2.shape:
raise ValueError(
"Arrays must have the same shape ({0} vs {1}).".format(arr1.shape,
arr2.shape)
)
# To avoid issues around zero, we'll compare signs of the values
# with highest absolute value
max_idx = np.abs(arr1).argmax(axis=0)
max_arr1 = arr1[max_idx, range(arr1.shape[1])]
max_arr2 = arr2[max_idx, range(arr2.shape[1])]
sign_arr1 = np.sign(max_arr1)
sign_arr2 = np.sign(max_arr2)
# Store current warnings, and ignore division by zero (like 1. /
# 0.) and invalid operations (like 0. / 0.)
wrn = np.seterr(invalid='ignore', divide='ignore')
differences = sign_arr1 / sign_arr2
# The values in `differences` can be:
# 1 -> equal signs
# -1 -> diff signs
# Or nan (0/0), inf (nonzero/0), 0 (0/nonzero)
np.seterr(**wrn)
# Now let's deal with cases where `differences != \pm 1`
special_cases = (~np.isfinite(differences)) | (differences == 0)
# In any of these cases, the sign of the column doesn't matter, so
# let's just keep it
differences[special_cases] = 1
return arr1 * differences, arr2 | 8f44bafdbacc5dcbbc339985092bb459ce7294b1 | 3,628,615 |
def draw_lines(img, lines, scale):
"""
this function draw lines on a picture, according to scale
first point from the left is added in case of absence
last point from the right is added in case of absence
returns updated image
"""
if lines is not None and len(lines) > 0:
# calculate angle changes between first 2 points
# and find start y in accordance to this angle
angle = calc_angle(lines[0], lines[-1])
# check_pos(lines[0][1], img.shape[0], scale)
if abs(angle) < np.pi/180:
start_y = check_pos(lines[0][1], img.shape[0], scale)
else:
start_y = int(check_pos(lines[0][1], img.shape[0], scale) -
check_pos(lines[0][0], img.shape[1], scale) *
np.tan(angle))
start = (0, start_y)
for line in lines:
cv2.line(img, start, (check_pos(line[0], img.shape[1], scale),
check_pos(line[1], img.shape[0], scale)),
(0, 255, 0), 5)
if len(line) > 2:
cv2.line(img, (check_pos(line[0], img.shape[1], scale),
check_pos(line[1], img.shape[0], scale)),
(check_pos(line[2], img.shape[1], scale),
check_pos(line[3], img.shape[0], scale)),
(0, 255, 0), 5)
start = (check_pos(line[2], img.shape[1], scale),
check_pos(line[3], img.shape[0], scale))
else:
start = (check_pos(line[0], img.shape[1], scale),
check_pos(line[1], img.shape[0], scale))
y_first = check_pos(lines[0][1], img.shape[0], scale)
y_last = (start[1] - y_first) * img.shape[1] / start[0] + y_first
x_last = img.shape[1] - 1
cv2.line(img, start, (x_last, y_last), (0, 255, 0), 5)
return img | 4da51115d3037b9b938fdafef65bb5a6f22176f3 | 3,628,616 |
def tokenize_de(text):
"""
将德语文本从字符串标记为字符串列表
"""
return [tok.text for tok in spacy_de.tokenizer(text)] | 9d5cf22d2356202b8818348edb3c36b6562106b4 | 3,628,617 |
import socket
def port_in_use(port, host=LOCALHOST):
"""Returns True when a port is in use at the given host.
Must actually "bind" the address. Just checking if we can create
a socket is insufficient as it's possible to run into permission
errors like:
- An attempt was made to access a socket in a way forbidden by its
access permissions.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
try:
sock.bind((host, port))
return False
except:
return True | 7249e6fe405d4577301c78eaa576c181b99816b1 | 3,628,618 |
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# TODO (tym): Figure out how to switch between LBANN builds. See
# GitHub Issue #1289.
# Layer graph
input_ = lbann.Input()
images = lbann.Identity(input_)
labels = lbann.Identity(input_)
x = lbann.models.ResNet50(1000, bn_statistics_group_size=-1)(images)
probs = lbann.Softmax(x)
cross_entropy = lbann.CrossEntropy(probs, labels)
top5 = lbann.TopKCategoricalAccuracy(probs, labels, k=5)
layers = list(lbann.traverse_layer_graph(x))
# Setup objective function
l2_reg_weights = set()
for l in layers:
if type(l) == lbann.Convolution or type(l) == lbann.FullyConnected:
l2_reg_weights.update(l.weights)
l2_reg = lbann.L2WeightRegularization(weights=l2_reg_weights, scale=1e-4)
obj = lbann.ObjectiveFunction([cross_entropy, l2_reg])
# Objects for LBANN model
callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()]
metrics = [lbann.Metric(top5, name='top-5 accuracy', unit='%')]
# Construct model
return lbann.Model(num_epochs,
layers=layers,
objective_function=obj,
metrics=metrics,
callbacks=callbacks) | f5ff4254ceaaa06dbe19364f6493cc67f6643fab | 3,628,619 |
async def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return StreamingResponse(gen(Camera()),
media_type='multipart/x-mixed-replace; boundary=frame') | 27051c6d2adb1dd30a477055b175d8a85103bd46 | 3,628,620 |
def logout():
"""
Log out the user
"""
session.clear()
return json_response(status=200, response_data={"success": True}) | 01a6a15f1ec4c654ff8b0b0c79254bc4962fce95 | 3,628,621 |
import base64
import json
def get_secrets(secret_name):
"""
Get secrets from AWS Secrets Manager
"""
session = boto3.session.Session(profile_name="platform-dev")
client = session.client(
service_name="secretsmanager", region_name="eu-west-1"
)
try:
response = client.get_secret_value(SecretId=secret_name)
except ClientError as e:
log.error(e)
if e.response["Error"]["Code"] == "DecryptionFailureException":
raise e
elif e.response["Error"]["Code"] == "InternalServiceErrorException":
raise e
elif e.response["Error"]["Code"] == "InvalidParameterException":
raise e
elif e.response["Error"]["Code"] == "InvalidRequestException":
raise e
elif e.response["Error"]["Code"] == "ResourceNotFoundException":
raise e
else:
if "SecretString" in response:
secrets = response["SecretString"]
else:
secrets = base64.b64decode(response["SecretBinary"])
return json.loads(secrets) | 8c7d19b34c27f706ec4e6b6e214fa2f42a8e2ec8 | 3,628,622 |
import numpy as np
from stentseg.motion.displacement import _calculateAmplitude, _calculateSumMotion
from stentseg.motion.dynamic import get_mesh_deforms
from visvis.processing import lineToMesh
from visvis import Pointset # lineToMesh does not like the new PointSet class
def make_mesh_dynamic_with_abs_displacement(mesh,deforms_f,origin,dim ='z',motion='amplitude',radius=1.0,**kwargs):
""" Create Mesh object with color values from mesh, e.g. of vessel
Input: origin from volume
deforms forward??
invertZ, True of False. Inverts vertices value for z
Output:
"""
#todo: mesh deforms_f use correct this way?
pp, pp_mesh_deforms = get_mesh_deforms(mesh, deforms_f, origin, **kwargs) # removes vertice duplicates
pp_mesh_displacements = []
for n_point in range(len(pp_mesh_deforms[0])): # n vertices
pointDeforms = [] # vertice deforms
for i in range(len(pp_mesh_deforms)): # n (10) phases
pointDeform = pp_mesh_deforms[i][n_point]
pointDeforms.append(np.asarray(pointDeform)[0])
if motion == 'amplitude': # max distance between two pointpositions
dmax = _calculateAmplitude(pointDeforms, dim=dim)[0]
pp_mesh_displacements.append(dmax)
elif motion == 'sum':
dsum = _calculateSumMotion(pointDeforms, dim=dim)
pp_mesh_displacements.append(dsum)
# create mesh
# mesh._values
mesh = []
values = np.vstack(pp_mesh_displacements)
points, values = Pointset(pp), np.asarray(values)
mesh.append( lineToMesh(points, radius, 8, values) )
return mesh | 634687df1c3166c8381f509cedf2fca29711a684 | 3,628,623 |
import sys
def is_piped_output():
"""
Checks the piped output.
This function checks if this script
is being executed with a piped output.
E.g.: python dnsrato.py -d domain.com --format json > outfile.json
Returns
-------
bool
True if the is a piped output, False otherwise.
"""
return not sys.stdout.isatty() | 4f59a6abc6bfaac47340434afd07c62aaf8a5ddc | 3,628,624 |
def load(savime_element: LoadableSavimeElement) -> str:
"""
Get the load query string for a loadable element.
:param savime_element: A loadable savime element.
:return: The load query for the savime element.
"""
return savime_element.load_query_str() | 650387cbde39fd82dcd5484542bd210b3a7ced56 | 3,628,625 |
def topo_flat(x,y):
"""
flat
"""
z = where(x < 204.91213, 30., -30.)
return z | b7d55b2eaff9ebc78645ccf0b2c2f13210eaef3f | 3,628,626 |
def softmax_xent(*, logits, labels, reduction=True, kl=False):
"""Computes a softmax cross-entropy (Categorical NLL) loss over examples."""
log_p = jax.nn.log_softmax(logits)
nll = -jnp.sum(labels * log_p, axis=-1)
if kl:
nll += jnp.sum(labels * jnp.log(jnp.clip(labels, 1e-8)), axis=-1)
return jnp.mean(nll) if reduction else nll | 9c59907d365088c6bf54be861db0a67631dbf8cf | 3,628,627 |
def activate_WS_VCC(activation,
Q_cooling_unmet_W,
Qc_water_body_remaining_W,
Qc_DailyStorage_to_storage_W,
Qc_DailyStorage_from_storage_W,
T_district_cooling_supply_K,
T_district_cooling_return_K,
T_source_average_water_body_K,
VC_chiller,
capacity_VCC_WS_W,
daily_storage_class):
"""
This function activates water source cooling technologies. In case the temperature of the water body is low enough
(i.e. T_district_cooling_supply_K - DeltaT) the water is used directly for free cooling. If the temperature of the
water body is higher a water source vapour compression chiller is activated.
In both cases the available cooling potential is compared with the demand. If the cooling potential is higher than
the demand the thermal storage is charged, otherwise the storage is discharged.
"""
# Initialise variables for water source cooling (VCC or FreeCooling) activation
Qc_from_water_body_W = 0.0
Qc_from_activated_cooling_system_W = 0.0
Qc_water_body_to_storage_W = 0.0
Qc_DailyStorage_content_W = daily_storage_class.current_storage_capacity_Wh
Qc_to_storage_W = 0.0
Qc_from_storage_W = 0.0
Qc_FreeCooling_WS_directload_W = 0.0
Qc_FreeCooling_and_DirectStorage_WS_W = 0.0
E_FreeCooling_req_W = 0.0
Qc_VCC_WS_gen_directload_W = 0.0
Qc_VCC_WS_gen_W = 0.0
Qc_VCC_WS_gen_storage_W = 0.0
E_VCC_WS_req_W = 0.0
# VCC water-source OR free cooling using water body
if activation == 1 and Q_cooling_unmet_W > 0.0 \
and T_source_average_water_body_K < VCC_T_COOL_IN \
and not np.isclose(T_district_cooling_supply_K, T_district_cooling_return_K):
# initialise variables for the water source vapour compression chiller and free cooling calculation
VCC_WS_activated = False
FreeCooling_WS_activated = False
# TODO: Replace the current calculation of the thermal efficiency (Carnot efficiency) to a more realistic value
thermal_efficiency_VCC = T_district_cooling_supply_K / T_source_average_water_body_K
Qc_output_VCC_WS_max_W = min(capacity_VCC_WS_W, thermal_efficiency_VCC * Qc_water_body_remaining_W)
# Activation Case 1: The water temperature doesn't allow for free cooling, therefore the VCC is activated.
# The unmet cooling demand is larger than the maximum VCC output, therefore the storage is discharged.
if T_district_cooling_supply_K - T_source_average_water_body_K < DT_COOL \
and Q_cooling_unmet_W >= Qc_output_VCC_WS_max_W:
VCC_WS_activated = True
Qc_VCC_WS_gen_directload_W = Qc_output_VCC_WS_max_W
Qc_VCC_WS_gen_storage_W = 0.0
Qc_from_storage_W, Qc_DailyStorage_content_W = \
daily_storage_class.discharge_storage(Q_cooling_unmet_W - Qc_output_VCC_WS_max_W)
Qc_VCC_WS_gen_W = Qc_VCC_WS_gen_directload_W + Qc_VCC_WS_gen_storage_W
# Activation Case 2: The water temperature doesn't allow for free cooling, therefore the VCC is activated.
# The maximum VCC output is larger than the unmet cooling demand, therefore the storage is charged.
elif T_district_cooling_supply_K - T_source_average_water_body_K < DT_COOL \
and Q_cooling_unmet_W < Qc_output_VCC_WS_max_W:
VCC_WS_activated = True
Qc_VCC_WS_gen_directload_W = Q_cooling_unmet_W
Qc_VCC_WS_gen_storage_W, Qc_DailyStorage_content_W = \
daily_storage_class.charge_storage(Qc_output_VCC_WS_max_W - Q_cooling_unmet_W)
Qc_from_storage_W = 0.0
Qc_VCC_WS_gen_W = Qc_VCC_WS_gen_directload_W + Qc_VCC_WS_gen_storage_W
# Activation Case 3: The water temperature allows for free cooling, therefore the VCC is bypassed.
# The unmet cooling demand is larger than the water body's cooling potential, hence the storage is discharged.
elif T_district_cooling_supply_K - T_source_average_water_body_K >= DT_COOL \
and Q_cooling_unmet_W >= Qc_water_body_remaining_W:
FreeCooling_WS_activated = True
Qc_FreeCooling_WS_directload_W = Qc_water_body_remaining_W
Qc_water_body_to_storage_W = 0.0
Qc_from_storage_W, Qc_DailyStorage_content_W = \
daily_storage_class.discharge_storage(Q_cooling_unmet_W - Qc_water_body_remaining_W)
Qc_FreeCooling_and_DirectStorage_WS_W = Qc_FreeCooling_WS_directload_W + Qc_water_body_to_storage_W
# Activation Case 4: The water temperature allows for free cooling, therefore the VCC is bypassed.
# The water body's cooling potential is larger than the unmet cooling demand, therefore the storage is charged.
elif T_district_cooling_supply_K - T_source_average_water_body_K >= DT_COOL \
and Q_cooling_unmet_W < Qc_water_body_remaining_W:
FreeCooling_WS_activated = True
Qc_FreeCooling_WS_directload_W = Q_cooling_unmet_W
Qc_water_body_to_storage_W, Qc_DailyStorage_content_W = \
daily_storage_class.charge_storage(Qc_water_body_remaining_W - Q_cooling_unmet_W)
Qc_from_storage_W = 0.0
Qc_FreeCooling_and_DirectStorage_WS_W = Qc_FreeCooling_WS_directload_W + Qc_water_body_to_storage_W
# Determine the electricity needed for the hydraulic pumps and the VCC if the latter is activated...
if VCC_WS_activated:
# TODO: Make sure the water source Peak VCC's cooling output returned from the function below is in
# accordance with the thermal efficiency definition above
Qc_VCC_WS_gen_W, \
E_VCC_WS_req_W = calc_vcc_operation(Qc_VCC_WS_gen_W,
T_district_cooling_return_K,
T_district_cooling_supply_K,
T_source_average_water_body_K,
capacity_VCC_WS_W,
VC_chiller)
# Delta P from linearization after distribution optimization
E_pump_WS_req_W = calc_water_body_uptake_pumping(Qc_VCC_WS_gen_W,
T_district_cooling_return_K,
T_district_cooling_supply_K)
E_VCC_WS_req_W += E_pump_WS_req_W
# Calculate metrics for energy balancing
# The first expression below corresponds to the second law of thermodynamics, assuming that there are no
# losses to the air (i.e. the water in the VCC is thermally sealed from the surrounding air)
Qc_from_water_body_W = Qc_VCC_WS_gen_W + E_VCC_WS_req_W
Qc_from_activated_cooling_system_W = Qc_VCC_WS_gen_W - Qc_VCC_WS_gen_storage_W
Qc_to_storage_W = Qc_VCC_WS_gen_storage_W
# ...determine the electricity needed for only the pumps if the systems runs on free cooling.
elif FreeCooling_WS_activated:
E_pump_WS_req_W = calc_water_body_uptake_pumping(Qc_FreeCooling_and_DirectStorage_WS_W,
T_district_cooling_return_K,
T_district_cooling_supply_K)
E_FreeCooling_req_W = E_pump_WS_req_W
E_VCC_WS_req_W = E_pump_WS_req_W # TODO: Check if direct water body cooling can be displayed separately
# Calculate metrics for energy balancing
Qc_from_water_body_W = Qc_FreeCooling_and_DirectStorage_WS_W
Qc_from_activated_cooling_system_W = Qc_FreeCooling_and_DirectStorage_WS_W - Qc_water_body_to_storage_W
Qc_to_storage_W = Qc_water_body_to_storage_W
else:
print("no water body source peak load VCC was used")
# energy balance: calculate the remaining cooling potential of the water body, the remaining unmet cooling
# demand (after contributions from VCC and storage) of the DCN and the cooling provided by the storage
Qc_water_body_remaining_W -= Qc_from_water_body_W
Q_cooling_unmet_W = Q_cooling_unmet_W - Qc_from_activated_cooling_system_W - Qc_from_storage_W
Qc_DailyStorage_to_storage_W += Qc_to_storage_W
Qc_DailyStorage_from_storage_W += Qc_from_storage_W
return Q_cooling_unmet_W, Qc_water_body_remaining_W, \
Qc_DailyStorage_content_W, Qc_DailyStorage_to_storage_W, Qc_DailyStorage_from_storage_W, \
Qc_VCC_WS_gen_W, Qc_VCC_WS_gen_directload_W, \
Qc_FreeCooling_and_DirectStorage_WS_W, Qc_FreeCooling_WS_directload_W, \
E_VCC_WS_req_W, E_FreeCooling_req_W, \
Qc_from_water_body_W | 8ba04627c9e1f10f0ce5cb53f1e92a913031bc1e | 3,628,628 |
from typing import Dict
from typing import Any
def sign_block_data(
data: Dict[str, Any], private_key: str, hash_alg: str = 'keccak256'
) -> Dict[str, str]:
"""sign block data
:param data: dict
:param private_key: hex str (private key)
:param hash_alg: `keccak256` or `sha256`, the default value is `keccak256`
:return: dict, {'hash': _hash, 'signature': sign_hex}
"""
return sign_hash(hash_block_data(data, hash_alg=hash_alg), private_key) | 26da624c26885fc3c9b20a0e1229c398f627d63a | 3,628,629 |
def convert_label(label_img):
""" covert label """
label_processed = np.zeros(label_img.shape[0:]).astype(np.uint8)
for index in range(label_img.shape[2]):
label_slice = label_img[:, :, index]
label_slice[label_slice == 10] = 1
label_slice[label_slice == 150] = 2
label_slice[label_slice == 250] = 3
label_processed[:, :, index] = label_slice
return label_processed | d76e9b2557048f0bee1d657fb5d50283819e7d4e | 3,628,630 |
def update_hand(hand, word):
"""
Does NOT assume that hand contains every letter in word at least as
many times as the letter appears in word. Letters in word that don't
appear in hand should be ignored. Letters that appear in word more times
than in hand should never result in a negative count; instead, set the
count in the returned hand to 0 (or remove the letter from the
dictionary, depending on how your code is structured).
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
new_hand = hand.copy()
for letter in word.lower():
if new_hand.get(letter):
new_hand[letter] = new_hand[letter] - 1
# Clean up the new hand by removing letters we've run out of
new_hand = {letter: count for (letter, count) in new_hand.items() if count > 0}
return new_hand | 2f87fe20335d2080b51b2fd40c55a226e1c79b8a | 3,628,631 |
def list_zeroes(line):
"""
Takes a list of integers and removes all non-zero elements.
"""
zeroes = []
for item in line:
if item == 0:
zeroes.append(item)
return zeroes | f10d6a59f8a6f00cb22d5f6ee6ce2087a4969b8e | 3,628,632 |
def ecef_to_lla2(x_m, y_m, z_m):
"""Convert ECEF cartesian coordinates to WGS84 spherical coordinates.
This converts an earth-centered, earth-fixed (ECEF) cartesian position to a
position on the Earth specified in geodetic latitude, longitude and altitude.
This code assumes the WGS84 earth model.
Parameters
----------
x_m, y_m, z_m : float or array
X, Y, Z coordinates, in metres
Returns
-------
lat_rad : float or array
Latitude (customary geodetic, not geocentric), in radians
lon_rad : float or array
Longitude, in radians
alt_m : float or array
Altitude, in metres above WGS84 ellipsoid
Notes
-----
This is a copy of the algorithm in the CONRAD codebase (from conradmisclib).
It's nearly identical to :func:`ecef_to_lla`, but returns lon/lat in
different ranges.
"""
# WGS84 ellipsoid constants
a = 6378137.0 # semi-major axis of Earth in m
e = 8.1819190842622e-2 # eccentricity of Earth
b = np.sqrt(a**2 * (1.0 - e**2))
ep = np.sqrt((a**2 - b**2) / b**2)
p = np.sqrt(x_m**2 + y_m**2)
th = np.arctan2(a * z_m, b * p)
lon_rad = np.arctan2(y_m, x_m)
lat_rad = np.arctan2((z_m + ep**2 * b * np.sin(th)**3), (p - e**2 * a * np.cos(th)**3))
N = a / np.sqrt(1.0 - e**2 * np.sin(lat_rad)**2)
alt_m = p / np.cos(lat_rad) - N
# Return lon_rad in range [0, 2*pi)
lon_rad = np.mod(lon_rad, 2.0 * np.pi)
# Correct for numerical instability in altitude near exact poles
# (after this correction, error is about 2 millimeters, which is about
# the same as the numerical precision of the overall function)
if np.isscalar(alt_m):
if (abs(x_m) < 1.0) and (abs(y_m) < 1.0):
alt_m = abs(z_m) - b
else:
near_poles = (np.abs(x_m) < 1.0) & (np.abs(y_m) < 1.0)
alt_m[near_poles] = np.abs(z_m[near_poles]) - b
return lat_rad, lon_rad, alt_m | 63495e6b1407cc3714b5ee44b115bc47725b0a19 | 3,628,633 |
def ObjectToDict(obj):
"""Converts an object into a dict."""
keys = [ k for k in dir(obj) if not k.startswith("__") ]
return { k : getattr(obj, k) for k in keys } | f2679daab84d5cee2c7f319d1d34f3c669971cd6 | 3,628,634 |
def writeCmd(cmd):
"""Take in a single integer value (command), try sending it to the arduino
command is wrapped in '<>'. Then wait for confirmation from the uC
"""
# Needed? TODO
while ser.in_waiting > 0:
readLine()
print("Write command: " + str(cmd))
cmd = '<' + str(cmd) + '>'
ser.write(cmd.encode("utf-8"))
reply = readCmd()
if reply == -1:
print("CMD no worky :(")
else:
return reply
return -1 | 94a4c24a29de5c48d1f116aee93ff0bc6d60a672 | 3,628,635 |
def ortho(subj_coord, obj_coord, subj_dim, obj_dim):
""" It returns a tuple of 3 values: new dim for combined array,
component of subj_origin in it, component of obj_origin in it. """
if subj_coord > obj_coord:
return (subj_coord + (obj_dim - obj_coord), 0,
subj_coord - obj_coord)
if subj_coord < obj_coord:
return (obj_coord + (subj_dim - subj_coord),
obj_coord - subj_coord, 0)
if subj_dim > obj_dim:
# There is place for obj_mod's tokens in subj_mod,
# no increase of dims needed: use subj_mod's dims.
return (subj_dim, 0, 0)
# There is place for subj_mod's tokens in obj_mod,
# no increase of dims needed: use obj_mod's dims.
return (obj_dim, 0, 0) | 36a3127b0721ac87e81259432823eab26fc015e9 | 3,628,636 |
def _unravel(nodes,tets,index):
"""Returns a list containing the node coordinates of the tet
stored in the 'index' position in the 'tets' list."""
return [nodes[tets[index][0]],nodes[tets[index][1]],nodes[tets[index][2]],nodes[tets[index][3]]] | e8428de351db2a84a4875a81b47d07b03a67efd9 | 3,628,637 |
def pairwise(accuracy_balanced, method_names, out_results_dir, num_repetitions):
"""
Produces a matrix of pair-wise significance tests,
where each cell [i, j] answers the question:
is method i significantly better than method j?
The result would be based on a test of choice.
The default test would be non-parametric Friedman test.
"""
bal_acc_transposed = accuracy_balanced.T # num_datasets x num_rep (each CV rep is considered a performance measurement on a new dataset, although not independent from each other)
num_datasets = len(method_names)
median_bal_acc = np.nanmedian(accuracy_balanced, axis=0)
ranks = np.rank(median_bal_acc)
critical_dist = compute_critical_dist(ranks)
signif_matrix = np.full([num_datasets, num_datasets], np.nan)
for m1, method_name in enumerate(method_names):
for m2 in range(m1+1, num_datasets+1, 1):
signif_matrix[m1, m2] = check_if_better(ranks[m1], ranks[m2], critical_dist)
return signif_matrix | 06618051f2aad89c8eb75877a37f18ba243b4150 | 3,628,638 |
def visdom_image(vis, image,win_name):
"""
eg :
visdom_image(vis=vis, image=drawn_image, win_name='image')
:param vis: 由 setup_visdom 函数创建
:param image: 单幅图片张量,shape:[n,w,h]
:param win_name: 绘图窗口名称,必须指定,不然会一直创建窗口
:return:
"""
vis.image(img=image, win=win_name)
return True | 145ad236c0e07a79195a573b562ffd9a8654be96 | 3,628,639 |
def generate_splits_type3(in_data,
slot_data,
intent_data,
instance_types_per_client=3,
clients_per_instance_type=3):
"""Creates non-IID splits of the dataset. Each client is given only a fixed number
of intent types. This is different from type2 since in type 2 each intent type belongs
exclusively to a certain user but in type 3 the instances having the same intent type can
belong to different users.
"""
unique_intents = np.unique(intent_data)
np.random.shuffle(unique_intents)
num_clients = int(
np.ceil(len(unique_intents) /
float(instance_types_per_client))) * clients_per_instance_type
client_list = []
# Create a list of shuffled client ids
for _ in range(
int(
np.ceil(clients_per_instance_type * len(unique_intents) /
num_clients))):
client_shuffled = np.arange(num_clients)
np.random.shuffle(client_shuffled)
client_list.append(client_shuffled)
client_list = np.concatenate(client_list)
client_idxs = defaultdict(list)
for idx, intent_id in enumerate(unique_intents):
# select a subset of clients for each instance
client_ids = client_list[idx * clients_per_instance_type:(idx + 1) *
clients_per_instance_type]
# generate a random multinomial distribution
intent_client_distribution = np.random.randint(
low=0, high=1000, size=clients_per_instance_type).astype(np.float)
intent_client_distribution /= np.sum(intent_client_distribution)
intent_idxs = np.where(np.array(intent_data).squeeze() == intent_id)[0]
# sample from the distribution
client_idx_distribution = np.random.multinomial(
1, intent_client_distribution, size=len(intent_idxs))
client_idx_distribution = np.argmax(client_idx_distribution, axis=1)
for i, client_id in enumerate(client_ids):
client_idxs[client_id] += intent_idxs[(
client_idx_distribution == i)].tolist()
fed_data = create_tff_dataset(client_idxs, in_data, slot_data, intent_data,
num_clients)
return fed_data, num_clients | ae2c2472ba32f737160e69fbe7a77fded9407b07 | 3,628,640 |
import os
import inspect
import tempfile
import subprocess
import re
def is_module_installed(module_name, version=None, installed_version=None,
interpreter=None):
"""Return True if module *module_name* is installed
If version is not None, checking module version
(module must have an attribute named '__version__')
version may starts with =, >=, > or < to specify the exact requirement ;
multiple conditions may be separated by ';' (e.g. '>=0.13;<1.0')
interpreter: check if a module is installed with a given version
in a determined interpreter"""
if interpreter:
if not osp.isdir(TEMPDIR):
os.mkdir(TEMPDIR)
if osp.isfile(interpreter) and ('python' in interpreter):
checkver = inspect.getsource(check_version)
get_modver = inspect.getsource(get_module_version)
ismod_inst = inspect.getsource(is_module_installed)
fd, script = tempfile.mkstemp(suffix='.py', dir=TEMPDIR)
with os.fdopen(fd, 'w') as f:
f.write("# -*- coding: utf-8 -*-" + "\n\n")
f.write("from distutils.version import LooseVersion" + "\n")
f.write("import re" + "\n\n")
f.write(checkver + "\n")
f.write(get_modver + "\n")
f.write(ismod_inst + "\n")
if version:
f.write("print is_module_installed('%s','%s')"\
% (module_name, version))
else:
f.write("print is_module_installed('%s')" % module_name)
try:
output = subprocess.Popen([interpreter, script],
stdout=subprocess.PIPE).communicate()[0]
except subprocess.CalledProcessError:
output = 'True'
if output: # TODO: Check why output could be empty!
return eval(output)
else:
return False
else:
# Try to not take a wrong decision if there is no interpreter
# available (needed for the change_pystartup method of ExtConsole
# config page)
return True
else:
if installed_version is None:
try:
actver = get_module_version(module_name)
except ImportError:
# Module is not installed
return False
else:
actver = installed_version
if actver is None and version is not None:
return False
elif version is None:
return True
else:
if ';' in version:
output = True
for ver in version.split(';'):
output = output and is_module_installed(module_name, ver)
return output
match = re.search('[0-9]', version)
assert match is not None, "Invalid version number"
symb = version[:match.start()]
if not symb:
symb = '='
assert symb in ('>=', '>', '=', '<'),\
"Invalid version condition '%s'" % symb
version = version[match.start():]
return check_version(actver, version, symb) | 08482d5a996a47da0fe2f02acdabf07c6463822c | 3,628,641 |
def file_num2size(num_size, h=True):
"""文件大小数值变为 MB 的显示
:param num_size: 文件大小
:param h: 是否 human 显示
:return: {'value': 数值,'measure': 单位,'str': 字串, 'org_size': 原始大小}
"""
measure_list = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
fsize = num_size
i = 0
while (fsize >= 1) and (i < len(measure_list)) and h:
if fsize < 1024:
break
else:
fsize = fsize / 1024.0
i += 1
i = min(i, len(measure_list) - 1)
fsize = round(fsize, 2) if not isinstance(fsize, int) else fsize
res_info = {'value': fsize,
'measure': measure_list[i],
'str': str(fsize) + measure_list[i],
'org_size': num_size}
return res_info | f3cf060b4015831381ab5d5b3ed236d8519746cc | 3,628,642 |
def cartopy_ylim(var=None, geobounds=None, wrfin=None, varname=None, timeidx=0,
method="cat", squeeze=True, cache=None):
"""Return the y-axis limits in the projected coordinates.
For some map projections, like :class`wrf.RotatedLatLon`, the
:meth:`cartopy.GeoAxes.set_extent` method does not work correctly. This
method is equivalent to:
.. code-block:: python
pc = crs.PlateCarree()
xs, ys, _ = self._cartopy().transform_points(pc,
np.array([geobounds.bottom_left.lon,
geobounds.top_right.lon]),
np.array([geobounds.bottom_left.lat,
geobounds.top_right.lat])).T
_xlimits = xs.tolist()
_ylimits = ys.tolist()
return (_xlimits, _ylimits)[1]
Args:
var (:class:`xarray.DataArray`, optional): A :class:`xarray.DataArray`
variable that includes latitude,longitude coordinate information.
If not used, then *wrfin* must be provided.
geobounds (:class:`wrf.GeoBounds`, optional): The geobounds to
get the extents. If set to None and using the *var* parameter,
the geobounds will be taken from the variable. If using a
file, then the geobounds will be taken from the native grid.
wrfin (:class:`netCDF4.Dataset`, :class:`Nio.NioFile`, or an \
iterable, optional): WRF-ARW NetCDF
data as a :class:`netCDF4.Dataset`, :class:`Nio.NioFile`
or an iterable sequence of the aforementioned types. If not used,
then *var* must be provided.
varname (:obj:`str`, optional): If using *wrfin*, then this will be the
variable name to use to determine the geobounds. The variable
can be a coordinate variable, or a regular variable that contains
coordinate attributes. If None,
then the 'XLAT', 'XLAT_M', 'XLONG', 'XLONG_M' variables
will be used.
timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The
desired time index. This value can be a positive integer,
negative integer, or
:data:`wrf.ALL_TIMES` (an alias for None) to return
all times in the file or sequence. Default is 0.
method (:obj:`str`, optional): The aggregation method to use for
sequences. Must be either 'cat' or 'join'.
'cat' combines the data along the Time dimension.
'join' creates a new dimension for the file index.
The default is 'cat'.
squeeze (:obj:`bool`, optional): Set to False to prevent dimensions
with a size of 1 from being automatically removed from the shape
of the output. Default is True.
cache (:obj:`dict`, optional): A dictionary of (varname, ndarray)
that can be used to supply pre-extracted NetCDF variables to the
computational routines. It is primarily used for internal
purposes, but can also be used to improve performance by
eliminating the need to repeatedly extract the same variables
used in multiple diagnostics calculations, particularly when using
large sequences of files.
Default is None.
Returns:
:obj:`list`: A list of [start_y, end_y] in the projected coordinate
system.
"""
wrf_proj, native_geobnds = _get_wrf_proj_geobnds(var, wrfin, varname,
timeidx, method, squeeze, cache)
if geobounds is not None:
return wrf_proj.cartopy_ylim(geobounds)
return wrf_proj.cartopy_ylim(native_geobnds) | ea0b9f40730c19f9cc52e55e6f124a54df07b9f4 | 3,628,643 |
def get_user_csc_name():
"""Get user csc name from saml userdata.
Returns:
[string] -- The users CSC username.
"""
if not is_authenticated() or not is_authenticated_CSC_user() or 'samlUserdata' not in session:
return None
csc_name = session.get('samlUserdata', {}).get(SAML_ATTRIBUTES.get('CSC_username', None), False)
return csc_name[0] if csc_name else not_found('csc_name')
return None | 17bcbc3b4ca2bfbb0a9de19b77f47aac94f2d089 | 3,628,644 |
import os
import tarfile
def get_ip_representations_info(_, identifier):
"""
get:
Get data set structure
"""
try:
dpts = DirectoryPairtreeStorage(config_path_storage)
object_path = dpts.get_object_path(identifier)
package_path = os.path.join(object_path, representations_directory)
tar_files = list_files_in_dir(package_path)
structure = {}
for tar_file in tar_files:
distribution_tar_path = os.path.join(package_path, tar_file)
t = tarfile.open(distribution_tar_path, 'r')
structure[tar_file] = t.getnames()
return JsonResponse(structure, status=200)
except ObjectNotFoundException:
return JsonResponse({"message": "Information package does not exist in storage"}, status=404) | 11109d590c2ec94f2a594b38926ddd4cd5d26ae3 | 3,628,645 |
def precision_Approximation(*args):
"""
* Returns the precision value in real space, frequently used by approximation algorithms. This function provides an acceptable level of precision for an approximation process to define adjustment limits. The tolerance of approximation is designed to ensure an acceptable computation time when performing an approximation process. That is why the tolerance of approximation is greater than the tolerance of confusion. The tolerance of approximation is equal to : Precision::Confusion() * 10. (that is, 1.e-6). You may use a smaller tolerance in an approximation algorithm, but this option might be costly.
:rtype: float
"""
return _Precision.precision_Approximation(*args) | 01dd6286509b69a875807bdaf4db59a34d7d5aad | 3,628,646 |
import argparse
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Rock the Casbah',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
help='Input file',
metavar='FILE',
type=argparse.FileType('rt'),
default=None)
return parser.parse_args() | 78c1baed2d059c106c16446f47b2f1e16b571c61 | 3,628,647 |
import os
def get_population_data(read_data=dd.defaultDict['read_data'],
file_format=dd.defaultDict['file_format'],
out_folder=dd.defaultDict['out_folder'],
no_raw=dd.defaultDict['no_raw'],
split_gender=False,
merge_eisenach=True):
"""! Download age-stratified population data for the German counties.
There are two different data sources that can be transformed in a simple
data file with age-resolved information per county and on which our other
tools can continue to operate on.
1.) Official 'Bevölkerungsfortschreibung' 12411-02-03-4:
'Bevölkerung nach Geschlecht und Altersgruppen (17)'
of regionalstatistik.de.
ATTENTION: The raw file cannot be downloaded
automatically by our scripts without an Genesis Online account. In order to
work on this dataset, please manually download it from:
https://www.regionalstatistik.de/genesis/online -> "1: Gebiet, Bevölkerung,
Arbeitsmarkt, Wahlen" -> "12: Bevölkerung" -> "12411 Fortschreibung des
Bevölkerungsstandes" -> "12411-02-03-4: Bevölkerung nach Geschlecht und
Altersgruppen (17) - Stichtag 31.12. - regionale Tiefe: Kreise und
krfr. Städte".
Download the xlsx file and put it under dd.defaultDict['out_folder'],
this normally is Memilio/data/pydata/Germany.
The folders 'pydata/Germany' have to be created if they do not exist yet.
Then this script can be run.
TODO: The following parameters have no effect for this source.
2.) Combination of data from Federal Statistical Office of Germany and
Zensus2011 to compute very simple approximations of age-stratified number
of inhabitants per county. Population from the Zensus data of all age groups is scaled to the
total population of our more recent migration data by a factor which
represents the relative increase/decrease in population size
between 2011 and 2019 for each county"
This data can either be downloaded automatically or read from
"out_folder"/Germany/ if it was downloaded before.
@param read_data False or True. Defines if data is read from file or
downloaded. Default defined in defaultDict.
@param file_format File format which is used for writing the data.
Default defined in defaultDict.
@param out_folder Path to folder where data is written in folder
out_folder/Germany. Default defined in defaultDict.
@param no_raw True or False. Defines if unchanged raw data is written or
not. Default defined in defaultDict.
@param split_gender [Default: False] or True. Defines whether data is
splitted by gender
@param merge_eisenach [Default: True] or False. Defines whether the
counties 'Wartburgkreis' and 'Eisenach' are listed separately or
combined as one entity 'Wartburgkreis'.
@return DataFrame with adjusted population data for all ages to current level.
"""
directory = os.path.join(dd.defaultDict['out_folder'], 'Germany')
filename = '12411-02-03-4' # '12411-09-01-4-B'
new_data_file = os.path.join(directory, filename)
new_data_avail = os.path.isfile(new_data_file + '.xlsx')
if new_data_avail:
print('Information: Using new population data file ' + filename)
df_pop_raw = gd.loadExcel(
new_data_file, apiUrl='', extension='.xlsx',
param_dict={"engine": None, "sheet_name": filename, "header": 4})
column_names = list(df_pop_raw.columns)
# rename columns
rename_columns = {
column_names[0]: dd.EngEng['idCounty'],
column_names[1]: dd.EngEng['county'],
column_names[2]: dd.EngEng['ageRKI'],
column_names[3]: dd.EngEng['number'],
column_names[4]: dd.EngEng['male'],
column_names[5]: dd.EngEng['female'],
}
df_pop_raw.rename(columns=rename_columns, inplace=True)
# remove date and explanation rows at end of table
# -33 for '12411-09-01-4-B', -38 for '12411-02-03-4'
df_pop_raw = df_pop_raw[:-38].reset_index(drop=True)
# get indices of counties first lines
idCounty_idx = list(df_pop_raw.index[
df_pop_raw[dd.EngEng['idCounty']].isna() == False])
# read county list and create output data frame
counties = np.array(geoger.get_county_names_and_ids(
merge_berlin=True, merge_eisenach=False, zfill=True))
new_cols = {dd.EngEng['idCounty']: counties[:, 1],
dd.EngEng['county']: counties[:, 0]}
age_cols = df_pop_raw.loc[
idCounty_idx[0]: idCounty_idx[1] - 2, # -1 for '12411-09-01-4-B'
dd.EngEng['ageRKI']].copy().values
nage = len(age_cols)
for i in range(len(age_cols)):
if i == 0:
upper_bound = str(int(age_cols[i][
age_cols[i].index('unter ')+6:].split(' ')[0])-1)
age_cols[i] = '0-' + upper_bound
elif i == len(age_cols)-1:
lower_bound = age_cols[i].split(' ')[0]
age_cols[i] = lower_bound + '-99'
else:
lower_bound = age_cols[i].split(' ')[0]
upper_bound = str(int(age_cols[i][
age_cols[i].index('unter ')+6:].split(' ')[0])-1)
age_cols[i] = lower_bound + '-' + upper_bound
# add age_cols with zero initilization to new_cols
new_cols.update({age: 0 for age in age_cols})
df_pop = pd.DataFrame(new_cols)
empty_data = ['.', '-']
for i in idCounty_idx:
# county information needed
if df_pop_raw.loc[i, dd.EngEng['idCounty']] in counties[:, 1]:
# direct assignment of population data found
df_pop.loc[df_pop[dd.EngEng['idCounty']] == df_pop_raw.loc
[i, dd.EngEng['idCounty']],
age_cols] = df_pop_raw.loc[i: i + nage - 1, dd.EngEng
['number']].values
# Berlin and Hamburg
elif df_pop_raw.loc[i, dd.EngEng['idCounty']] + '000' in counties[:, 1]:
# direct assignment of population data found
df_pop.loc[df_pop[dd.EngEng['idCounty']] == df_pop_raw.loc
[i, dd.EngEng['idCounty']] + '000',
age_cols] = df_pop_raw.loc[i: i + nage - 1, dd.EngEng
['number']].values
# empty rows
elif df_pop_raw.loc[i: i + nage - 1,
dd.EngEng['number']].values.any() in empty_data:
if not df_pop_raw.loc[i: i + nage - 1,
dd.EngEng['number']].values.all() in empty_data:
raise gd.DataError(
'Error. Partially incomplete data for county ' +
df_pop_raw.loc[i, dd.EngEng['idCounty']])
# additional information for local entities not needed
elif df_pop_raw.loc[i, dd.EngEng['idCounty']] in ['03241001', '05334002', '10041100']:
pass
# Germany, federal states, and governing regions
elif len(df_pop_raw.loc[i, dd.EngEng['idCounty']]) < 5:
pass
else:
print('no data for ' + df_pop_raw.loc
[i, dd.EngEng['idCounty']])
raise gd.DataError(
'Error. County ID in input population data'
'found which could not be assigned.')
if df_pop[age_cols].sum().sum() != 83155031:
raise gd.DataError('Wrong total population size')
new_cols = [
dd.EngEng['idCounty'],
dd.EngEng['population'],
'<3 years', '3-5 years', '6-14 years', '15-17 years',
'18-24 years', '25-29 years', '30-39 years', '40-49 years',
'50-64 years', '65-74 years', '>74 years']
df_pop_export = pd.DataFrame(columns=new_cols)
df_pop_export[df_pop.columns[0]] = df_pop[dd.EngEng['idCounty']]
# <3 and 3-5
df_pop_export[df_pop_export.columns[2:4]] = df_pop[df_pop.columns[2:4]]
# 6-14
df_pop_export[df_pop_export.columns[4]] = \
df_pop[df_pop.columns[4:6]].sum(axis=1)
# 15-17
df_pop_export[df_pop_export.columns[5]] = df_pop[df_pop.columns[6]]
# 18-24
df_pop_export[df_pop_export.columns[6]] = \
df_pop[df_pop.columns[7:9]].sum(axis=1)
# 25-29
df_pop_export[df_pop_export.columns[7]] = df_pop[df_pop.columns[9]]
# 30-39
df_pop_export[df_pop_export.columns[8]] = \
df_pop[df_pop.columns[10:12]].sum(axis=1)
# 40-49
df_pop_export[df_pop_export.columns[9]] = \
df_pop[df_pop.columns[12:14]].sum(axis=1)
# 50-64
df_pop_export[df_pop_export.columns[10]] = \
df_pop[df_pop.columns[14:17]].sum(axis=1)
# 65-74
df_pop_export[df_pop_export.columns[11]] = df_pop[df_pop.columns[17]]
# >74
df_pop_export[df_pop_export.columns[12]] = df_pop[df_pop.columns[18]]
df_pop_export[dd.EngEng['population']
] = df_pop_export.iloc[:, 2:].sum(axis=1)
directory = os.path.join(out_folder, 'Germany/')
gd.check_dir(directory)
filename = 'county_current_population_dim401'
gd.write_dataframe(df_pop_export, directory, filename, file_format)
if merge_eisenach == True:
filename = 'county_current_population'
# Merge Eisenach and Wartburgkreis
df_pop_export = geoger.merge_df_counties_all(
df_pop_export, sorting=[dd.EngEng["idCounty"]],
columns=dd.EngEng["idCounty"])
gd.write_dataframe(df_pop_export, directory, filename, file_format)
return df_pop_export
else:
counties, zensus, reg_key = load_population_data(
out_folder, read_data=read_data, no_raw=no_raw,
file_format=file_format)
# find region keys for census population data
key = np.zeros((len(zensus)))
for i in range(len(key)):
for j in range(len(reg_key)):
if zensus['Name'].values[i] == reg_key['NAME'].values.astype(
str)[j]:
if zensus[dd.invert_dict(dd.GerEng)[dd.EngEng['population']]].values[i] == round(
reg_key['Zensus_EWZ'].values[j] * 1000):
key[i] = reg_key['AGS'].values[j]
inds = np.unique(key, return_index=True)[1]
# columns of downloaded data which should be replaced
male = ['M_Unter_3', 'M_3_bis_5', 'M_6_bis_14', 'M_15_bis_17',
'M_18_bis_24', 'M_25_bis_29', 'M_30_bis_39', 'M_40_bis_49',
'M_50_bis_64', 'M_65_bis_74', 'M_75_und_aelter']
female = ['W_Unter_3', 'W_3_bis_5', 'W_6_bis_14', 'W_15_bis_17',
'W_18_bis_24', 'W_25_bis_29', 'W_30_bis_39', 'W_40_bis_49',
'W_50_bis_64', 'W_65_bis_74', 'W_75_und_aelter']
if not split_gender:
# get data from zensus file and add male and female population data
data = np.zeros((len(inds), len(male)+2))
data[:, 0] = key[inds].astype(int)
data[:, 1] = zensus[dd.invert_dict(
dd.GerEng)[dd.EngEng['population']]].values[inds].astype(int)
for i in range(len(male)):
data[:, i+2] = zensus[male[i]].values[inds].astype(
int) + zensus[female[i]].values[inds].astype(int)
# define new columns for dataframe
columns = [
dd.EngEng['idCounty'],
dd.EngEng['population'],
'<3 years', '3-5 years', '6-14 years', '15-17 years',
'18-24 years', '25-29 years', '30-39 years', '40-49 years',
'50-64 years', '65-74 years', '>74 years']
else:
# get data from zensus file
data = np.zeros((len(inds), len(male)+len(female)+2))
data[:, 0] = key[inds].astype(int)
data[:, 1] = zensus[dd.invert_dict(
dd.GerEng)[dd.EngEng['population']]].values[inds].astype(int)
for i in range(len(male)):
data[:, i+2] = zensus[male[i]].values[inds].astype(int)
for i in range(len(female)):
data[:, i+len(male)+2] = zensus[female[i]
].values[inds].astype(int)
# define new columns for dataframe
columns = [
dd.EngEng['idCounty'],
dd.EngEng['population'],
'M <3 years', 'M 3-5 years', 'M 6-14 years', 'M 15-17 years',
'M 18-24 years', 'M 25-29 years', 'M 30-39 years',
'M 40-49 years', 'M 50-64 years', 'M 65-74 years',
'M >74 years', 'F <3 years', 'F 3-5 years', 'F 6-14 years',
'F 15-17 years', 'F 18-24 years', 'F 25-29 years',
'F 30-39 years', 'F 40-49 years', 'F 50-64 years',
'F 65-74 years', 'F >74 years']
data = get_new_counties(data)
# create Dataframe of raw data without adjusting population
df = pd.DataFrame(data.astype(int), columns=columns)
# compute ratio of current and 2011 population data
ratio = np.ones(len(data[:, 0]))
for i in range(len(ratio)):
for j in range(len(counties)):
if not counties['Schlüssel-nummer'].isnull().values[j]:
try:
if data[i, 0] == int(
counties['Schlüssel-nummer'].values[j]):
ratio[i] = counties['Bevölkerung2)'].values[j]/data[i, 1]
except ValueError:
pass
# adjust population data for all ages to current level
data_current = np.zeros(data.shape)
data_current[:, 0] = data[:, 0].copy()
for i in range(len(data[0, :]) - 1):
data_current[:, i + 1] = np.multiply(data[:, i + 1], ratio)
# create dataframe
df_current = pd.DataFrame(
np.round(data_current).astype(int), columns=columns)
directory = os.path.join(out_folder, 'Germany/')
gd.check_dir(directory)
if merge_eisenach == True:
# Merge Eisenach and Wartburgkreis
df_current = geoger.merge_df_counties_all(
df_current, sorting=[dd.EngEng["idCounty"]],
columns=dd.EngEng["idCounty"])
df = geoger.merge_df_counties_all(
df, sorting=[dd.EngEng["idCounty"]],
columns=dd.EngEng["idCounty"])
filename = 'county_current_population'
filename_raw = 'county_population'
else: # Write Dataframe without merging
filename = 'county_current_population_dim401'
filename_raw = 'county_population_dim401'
gd.write_dataframe(df_current, directory, filename, file_format)
gd.write_dataframe(df, directory, filename_raw, file_format)
return df_current | 4fd20f945d6303ae34da71f7021174f2a3df655e | 3,628,648 |
import time
def getEpoch( ):
"""
Return the Unix epoch divided by a constant as string.
This function returns a coarse-grained version of the Unix epoch. The
seconds passed since the epoch are divided by the constant
`EPOCH_GRANULARITY'.
"""
return str(int(time.time()) / const.EPOCH_GRANULARITY) | 649fb0dc8bb8a2bbd8294449ef1c3ffb64ca5aa1 | 3,628,649 |
def build_py_idiv(builder, a, b, name):
"""Build expression for floor integer division.
As seen in Cython:
long q = a / b;
long r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
"""
q = BuildSDiv(builder, a, b, name + "_q")
r = BuildSub(builder, a, BuildMul(builder, q, b, name + "_r"), name + "_sub")
# TODO Assumes signed integers
zero = ConstNull(TypeOf(r))
q_sub = BuildAnd(builder,
BuildICmp(builder, IntNE, r, zero, name + "_cmp_1"),
BuildICmp(builder, IntSLT,
BuildXor(builder, r, b, name + "_xor"),
zero, name + "_cmp_2"),
name + "_q_and")
return BuildSub(builder, q,
BuildCast(builder, ZExt, q_sub, TypeOf(a), name + "_cast"),
name) | 5cdf5281d7bb1272e4bb2468f3d044d16970e444 | 3,628,650 |
def moveeffect_02C(score: int, move: Move, user: Pokemon, target: Pokemon, battle: AbstractBattle) -> int:
"""
Move Effect Name: Increases user's special attack and special defense (Calm Mind)
"""
if user.first_turn:
score += 40
if user.boosts.get("spa", 0) == 6 and user.boosts.get("spd", 0) == 6:
score -= 90
else:
score -= 10 * user.boosts.get("spa", 0)
score -= 10 * user.boosts.get("spd", 0)
has_spec = False
for _, mv in user.moves.items():
if mv.category == MoveCategory.SPECIAL:
has_spec = True
break
if has_spec:
score += 20
else:
score -= 90
return score | 6bff14b50029af1a60c47c7e478766edbf033c9a | 3,628,651 |
import json
def all_players_are_ready(game_id):
"""Returns true if the game exists and all players are ready."""
# TODO: acquire lock for game_id
game_json_string = redis.get(game_id)
if game_json_string is None:
# TODO: release lock for game_id
return
game_json = json.loads(game_json_string)
for player_json in game_json['players']:
if player_json['status'] != 'ready':
# TODO: release lock for game_id
return False
# TODO: release lock for game_id
return True | f592d1d761a4fb0eb3691fa7dd449b08b6850448 | 3,628,652 |
from pathlib import Path
import os
import shutil
def copy_file(src, dst):
"""
Copy the file (or folder) to the specified directory.
You can copy multiple files to the specified directory by listing.
"""
src_type = typeof(src)
dst_type = typeof(dst)
if not src_type == 'str' or 'list':
return 'SRC_INVAILD'
if not dst_type == 'str':
return 'DST_INVAILD'
try:
dst_tmp = Path(dst)
except:
return 'DST_INVAILD'
if not os.path.exists(dst):
os.mkdir(dst)
if src_type == 'list':
for tmp in src:
try:
filename_tmp = Path(tmp)
except:
continue
if filename_tmp.is_file():
shutil.copyfile(tmp, dst_tmp)
elif filename_tmp.is_dir():
shutil.copytree(tmp, dst_tmp)
else:
continue
elif src_type == 'str':
try:
filename_tmp = Path(tmp)
except:
return 'SRC_INVAILD'
if filename_tmp.is_file():
shutil.copyfile(tmp, dst_tmp)
elif filename_tmp.is_dir():
shutil.copytree(tmp, dst_tmp)
else:
return 'SRC_NOT_FOUND' | e28200e73ec015a87930a5aea003f1b3df1accc0 | 3,628,653 |
from typing import Optional
from typing import Tuple
from typing import Dict
from typing import Hashable
def get_dataset_subsampling_slices(
dataset: xr.Dataset,
step: int,
xy_dim_names: Optional[Tuple[str, str]] = None
) -> Dict[Hashable, Optional[Tuple[slice, ...]]]:
"""
Compute subsampling slices for variables in *dataset*.
Only data variables with spatial dimensions given by
*xy_dim_names* are considered.
:param dataset: the dataset providing the variables
:param step: the integer subsampling step
:param xy_dim_names: the spatial dimension names
"""
assert_instance(dataset, xr.Dataset, name='dataset')
assert_instance(step, int, name='step')
slices_dict: Dict[Tuple[Hashable, ...], Tuple[slice, ...]] = dict()
vars_dict: Dict[Hashable, Optional[Tuple[slice, ...]]] = dict()
for var_name, var in dataset.data_vars.items():
var_index = slices_dict.get(var.dims)
if var_index is None:
var_index = get_variable_subsampling_slices(
var, step, xy_dim_names=xy_dim_names
)
if var_index is not None:
slices_dict[var.dims] = var_index
if var_index is not None:
vars_dict[var_name] = var_index
return vars_dict | 0cef7a329bc33f124e3c5f7263a08cc9df8e9c37 | 3,628,654 |
import types
import asyncio
def loop_apply_coroutine(loop, func: types.FunctionType, *args, **kwargs) -> object:
"""
Call a function with the supplied arguments.
If the result is a coroutine, use the supplied loop to run it.
"""
if asyncio.iscoroutinefunction(func):
future = asyncio.ensure_future(
func(*args, **kwargs), loop=loop)
loop.run_until_complete(future)
return future.result()
else:
return func(*args, **kwargs) | d77a70540237f690e712e30b93b53b363907b678 | 3,628,655 |
def zipmap(keys, vals):
"""
Return a ``dict`` with the keys mapped to the corresponding ``vals``.
"""
return dict(zip(keys, vals)) | a058e5a4e462416f48d83b3c288a0cd8d6b000ef | 3,628,656 |
def _create_thingy(sql_entity, session):
"""Internal call that holds the boilerplate for putting a new SQLAlchemy object
into the database. BC suggested this should be a decorator but I don't think
that aids legibility. Maybe should rename this though.
"""
session.add(sql_entity)
#Note that this commit causes the .id to be populated.
session.commit()
return sql_entity.id | 4d50da3a15606c7adf61e2c9c09a4a3a9898edf9 | 3,628,657 |
import torch
def mix_estimator(states, actions, disc_rewards, mask, policy, result='mean'):
"""
states: NxHxm
actions: NxHx1
disc_rewards, mask: NxH
"""
upsilon_scores = policy.loc_score(states, actions) #NxHxm
G = torch.cumsum(upsilon_scores, 1) #NxHxm
sigma_scores = policy.scale_score(states, actions).squeeze() #NxH
H = torch.cumsum(sigma_scores, 1) #NxH
baseline = torch.mean(tensormat(G, H * disc_rewards), 0) / torch.mean(tensormat(G, H), 0)
baseline[baseline != baseline] = 0
values = disc_rewards.unsqueeze(2) - baseline.unsqueeze(0)
G = tensormat(G, mask)
terms = tensormat(G * values, H) #NxHxm
samples = torch.sum(terms, 1) #Nxm
if result == 'samples':
return samples #Nxm
else:
return torch.mean(samples, 0) | 510e88bbd9b36e89853341951d67cfc254e68d61 | 3,628,658 |
def weighted_loss(func):
"""
A syntactic sugar for loss functions with dynamic weights and average
factors. This method is expected to be used as a decorator.
"""
@wraps(func)
def _wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
assert reduction in ('mean', 'sum', 'none')
loss = func(pred, target, **kwargs)
if weight is not None:
loss = loss * weight
if reduction == 'mean':
if avg_factor is None:
loss = loss.mean()
else:
loss = loss.sum() / avg_factor
elif reduction == 'sum':
loss = loss.sum()
return loss
return _wrapper | 078ddd743e5f1e3b835ac86ea25966a3813001ce | 3,628,659 |
def similarity(s, theta, axis, tx, ty, tz):
"""
Create a 4x4 similarity transformation matrix.
Parameters:
-----------
s: isotropic scaling ratio.
theta: angle of rotation about `thetaaxis`.
axis: a vector (not necessarily a unit vector along the axis of rotation.
tx: translation in the x-direction.
ty: translation in the y-direction
tz: translation in the z-direction
Output:
-------
H: a 4x4 similarity transformation matrix
"""
# calling rotation function to find the rotation matrix
rotationMatrix = rotation(theta,axis)
#multiplying it with s to get the similarity matrix
similarityMatrix = s*rotationMatrix
# Since H is just similarity matrix with added tx,ty,tz we can use affine function to do make H
H = affinity(similarityMatrix,tx,ty,tz)
return H | 8303d2b2c988aae8e6217807a35afef1058c42dc | 3,628,660 |
def compute_sequences_weight(alignment_data=None, seqid=None):
"""Computes weight of sequences. The weights are calculated by lumping
together sequences whose identity is greater that a particular threshold.
For example, if there are m similar sequences, each of them will be assigned
a weight of 1/m. Note that the effective number of sequences is the sum of
these weights.
Parameters
----------
alignmnet_data : np.array()
Numpy 2d array of the alignment data, after the alignment is put in
integer representation
seqid : float
Value at which beyond this sequences are considered similar. Typical
values could be 0.7, 0.8, 0.9 and so on
Returns
-------
seqs_weight : np.array()
A 1d numpy array containing computed weights. This array has a size
of the number of sequences in the alignment data.
"""
alignment_shape = alignment_data.shape
num_seqs = alignment_shape[0]
seqs_len = alignment_shape[1]
seqs_weight = np.zeros((num_seqs,), dtype=np.float64)
#count similar sequences
for i in parallel_range(num_seqs):
seq_i = alignment_data[i]
for j in range(num_seqs):
seq_j = alignment_data[j]
iid = np.sum(seq_i==seq_j)
if np.float64(iid)/np.float64(seqs_len) > seqid:
seqs_weight[i] += 1
#compute the weight of each sequence in the alignment
for i in range(num_seqs): seqs_weight[i] = 1.0/float(seqs_weight[i])
return seqs_weight | b0da45ffb48526cbaaa2d976405323a6c41b6072 | 3,628,661 |
def parameter_bank_names(device, bank_name_dict=BANK_NAME_DICT):
""" Determine the bank names to use for a device """
if device != None:
if device.class_name in bank_name_dict.keys():
return bank_name_dict[device.class_name]
else:
banks = number_of_parameter_banks(device)
def _default_bank_name(bank_index):
return b'Bank ' + str(bank_index + 1)
if device.class_name in MAX_DEVICES and banks != 0:
def _is_ascii(c):
return ord(c) < 128
def _bank_name(bank_index):
try:
name = device.get_bank_name(bank_index)
except:
name = None
if name:
return str(filter(_is_ascii, name))
else:
return _default_bank_name(bank_index)
return
return map(_bank_name, range(0, banks))
return map(_default_bank_name, range(0, banks))
return [] | 3dbb95d35a7bb837208d812ba377bc546c986993 | 3,628,662 |
def maxsubarray(list):
"""
Naive approach to calculating max subarray
Iterating all possible subarrays
Complexity (n = list size)
Time complexity: O(n^2)
Space complexity: O(1)
"""
maxStart = 0
maxEnd = 0
maxSum = list[0]
for i in range (len(list)):
currentSum = 0
for j in range (i, len(list)):
currentSum += list[j]
if currentSum > maxSum:
maxSum = currentSum
maxStart = i
maxEnd = j
return (maxSum, maxStart, maxEnd) | 71b4a12d02fab45fc14890ae4a34a0dc50d6a7b4 | 3,628,663 |
def deserialize_transaction_data(f):
"""
Deserialize transaction data
More info: https://learnmeabitcoin.com/technical/transaction-data
:param f: buffer, required
:return: dict
"""
transaction = Transaction()
start_transaction_data = f.tell()
transaction.version = f.read(4)[::-1].hex()
transaction.input_count = get_var_int(f)
transaction_inputs = []
for input_number in range(int(transaction.input_count, 16)):
transaction_input = TransactionInput()
transaction_input.id = f.read(32)[::-1].hex()
transaction_input.vout = f.read(4)[::-1].hex()
transaction_input.script_sig_size = get_var_int(f)
transaction_input.script_sig = f.read(int(transaction_input.script_sig_size, 16)).hex()
transaction_input.sequence = f.read(4)[::-1].hex()
transaction_inputs.append(transaction_input.__dict__)
transaction.output_count = get_var_int(f)
transaction_outputs = []
for output_number in range(int(transaction.output_count, 16)):
transaction_output = TransactionOutput()
transaction_output.value = f.read(8)[::-1].hex()
transaction_output.script_pub_key_size = get_var_int(f)
transaction_output.script_pub_key = f.read(int(transaction_output.script_pub_key_size, 16)).hex()
transaction_outputs.append(transaction_output.__dict__)
transaction.lock_time = f.read(4)[::-1].hex()
# Compute transaction id
end_transaction_data = f.tell()
transaction_data_size = end_transaction_data - start_transaction_data
f.seek(start_transaction_data)
transaction_data = f.read(transaction_data_size)
f.seek(end_transaction_data)
transaction.id = compute_hash(transaction_data)
transaction_dict = transaction.__dict__
transaction_dict['inputs'] = transaction_inputs
transaction_dict['outputs'] = transaction_outputs
return transaction_dict | 8231bca1dbf7dfb86f16dbd4a5ac30d1c80579e7 | 3,628,664 |
def parse_string(string: str) -> list:
"""Parsing specified string
:param string: File content for parsing
:type string: str
:rtype: list
:raises: ParseException
"""
parsed = create_grammar().parseString(string, parseAll=True)
return parse_tokens(parsed) | 0b94d3917cfb37b28d22c90a877cbe4df32b1d67 | 3,628,665 |
def reset_config():
"""Reset the configuration.
An endpoint that accepts a POST method. The json request object
must contain the key ``reset`` (with any value).
The method will reset the configuration to the original configuration files that were
used, skipping the local (and saved file).
.. note::
If the server was originally started with a local version of the file, those will
be skipped upon reload. This is not ideal but hopefully this method is not used too
much.
Returns:
str: A json string object containing the keys ``success`` and ``msg`` that indicate
success or failure.
"""
params = dict()
if request.method == 'GET':
params = request.args
elif request.method == 'POST':
params = request.get_json()
logger.warning(f'Resetting config server')
if params['reset']:
# Reload the config
config = load_config(config_files=app.config['config_file'],
load_local=app.config['load_local'])
# Add an entry to control running of the server.
config['config_server'] = dict(running=True)
app.config['POCS'] = config
app.config['POCS_cut'] = Cut(config)
else:
return jsonify({
'success': False,
'msg': "Invalid. Need json request: {'reset': True}"
})
return jsonify({
'success': True,
'msg': f'Configuration reset'
}) | f4c8c7aeea43a7c2c8bc196855ce4fb8081ac31c | 3,628,666 |
from typing import Sequence
def is_sequence(obj):
"""Is the object a *non-str* sequence?
Checking whether an object is a *non-string* sequence is a bit
unwieldy. This makes it simple.
"""
return isinstance(obj, Sequence) and not isinstance(obj, str) | 06129c6122fec0290edb34cadc75b68199738435 | 3,628,667 |
import numbers
import numpy
import collections
def sample_points(rng, N=None, conc=None, lower=None, upper=None, start=0, ndim=3):
"""Generate points distributed uniformly.
Args:
rng (numpy.RandomState, optional): A random number generator.
N (int or list, optional): The number of points to be generated.
conc (float or list, optional): The concentration of points.
Either one of `N` or `conc` must be given.
upper (Number or array, optional): An upper limit of the position. Defaults to 1.
lower (Number or array, optional): A lower limit of the position. Defaults to 0.
start (int, optional): The first index. Defaults to 0.
ndim (int, optional) The number of dimensions. Defaults to 3.
Returns:
A pair of an array and the last ID.
An array of points. Each point consists of a coordinate, an index,
p_state (defaults to 1) and cyc_id (defaults to `inf`).
The last ID. The sum of `start` and the number of points generated.
"""
if N is None and conc is None:
raise ValueError('Either one of N or conc must be given.')
_log.info('sample_points: N={}, conc={}, lower={}, upper={}, start={}, ndim={}.'.format(
N, conc, lower, upper, start, ndim))
lower = (numpy.ones(ndim) * lower if isinstance(lower, numbers.Number)
else numpy.array(lower) if lower is not None
else numpy.zeros(ndim))
upper = (numpy.ones(ndim) * upper if isinstance(upper, numbers.Number)
else numpy.array(upper) if upper is not None
else numpy.ones(ndim))
if len(lower) < ndim or len(upper) < ndim:
raise ValueError(
"The wrong size of limits was given [(lower={}, upper={}) != {}].".format(len(lower), len(upper), ndim))
for dim in range(ndim):
if lower[dim] > upper[dim]:
lower[dim], upper[dim] = upper[dim], lower[dim]
_log.debug('lower was set to {}.'.format(lower))
_log.debug('upper was set to {}.'.format(upper))
if N is None:
lengths = upper - lower
size = numpy.prod(lengths[lengths != 0])
if isinstance(conc, collections.abc.Iterable):
N_list = [rng.poisson(size * conc_) for conc_ in conc]
else:
N_list = [rng.poisson(size * conc)]
elif not isinstance(N, collections.abc.Iterable):
N_list = [N]
else:
N_list = N
N = sum(N_list)
_log.debug('{} points would be distributed {}.'.format(N, list(N_list)))
if N <= 0:
return numpy.array([]), start
maxdim = len(lower)
ret = numpy.zeros((N, maxdim + 2))
for dim in range(maxdim):
if lower[dim] < upper[dim]:
ret[: , dim] = rng.uniform(lower[dim], upper[dim], N)
else:
ret[: , dim] = lower[dim]
ret[: , maxdim + 0] = numpy.arange(start, start + N) # Molecule ID
ret[: , maxdim + 1] = 1.0 # Photon state
return ret, start + N | acadabe2076ad0fed412fec5ba26ef44c0025117 | 3,628,668 |
def make_exponential_statistics(state):
"""Make ExponentialMovingStatistics object from state."""
return ExponentialMovingStatistics.fromstate(state) | bf9df2841707e2c12d91e23f17bc6de52728e66d | 3,628,669 |
import time
import socket
def DetectAio(timeout=1.1):
"""Detect AIO nodes on the network, present all options if none detected."""
sources = aio.aio_node_helper.Names()
types = aio.message_type_helper.Names()
client = aio.AioClient(types, timeout=0.1, allowed_sources=sources)
ip_list = []
version_list = []
timer_start = time.time()
while time.time() - timer_start < timeout:
try:
ip, header, _ = client.Recv(accept_invalid=True)
ip_list.append(ip)
version_list.append(header.version)
except socket.error:
pass
client.Close()
if ip_list and version_list:
# De-duplication using set conversion.
ip_tuple, version_tuple = zip(*set(zip(ip_list, version_list)))
return tuple([IpToAioNode(ip) for ip in ip_tuple]), version_tuple
return tuple(), tuple() | 805dc33200a6e774fc2d55032242ea79aa97c28f | 3,628,670 |
def getAxisList(var):
"""
Returns a list of coordinates from: var
"""
return [var.coords[key] for key in var.dims] | eae9b971bcbf021ef2203dd6cb21df6343d0f19a | 3,628,671 |
import functools
def listify(fn=None, wrapper=list):
"""
From https://github.com/shazow/unstdlib.py/blob/master/unstdlib/standard/list_.py#L149
A decorator which wraps a function's return value in ``list(...)``.
Useful when an algorithm can be expressed more cleanly as a generator but
the function should return an list.
Example::
>>> @listify
... def get_lengths(iterable):
... for i in iterable:
... yield len(i)
>>> get_lengths(["spam", "eggs"])
[4, 4]
>>>
>>> @listify(wrapper=tuple)
... def get_lengths_tuple(iterable):
... for i in iterable:
... yield len(i)
>>> get_lengths_tuple(["foo", "bar"])
(3, 3)
"""
def listify_return(fn):
@functools.wraps(fn)
def listify_helper(*args, **kw):
return wrapper(fn(*args, **kw))
return listify_helper
if fn is None:
return listify_return
return listify_return(fn) | d9fe22ffdda41938aa6029bbb3ac54aeda4b818b | 3,628,672 |
def skip_nothing(name, dirpath):
"""Always returns :obj:`False`.
"""
return False | 9e846b7060af43b2c4165e6530fcabc66415615b | 3,628,673 |
import json
def _load(fpath):
"""Get content of json file at <fpath>."""
if not pth.__is_file(fpath):
log.critical("\"{0}\" doesn\'t exist, can\'t load data from it".format(fpath))
log.debug("Loading data from \"{0}\"".format(fpath))
fjson = pth.__open_file(fpath)
ret = json.load(fjson)
pth.__close_file(fjson)
return ret | 8eb8d1bf98eaccdda38a31f9b050db1b89eccf1b | 3,628,674 |
import os
import subprocess
import re
def get_version_number(klocwork_path):
"""This function determines the Klocwork version number.
Inputs:
- klocwork_path: Absolute path to the bin directory of the Klocwork installation [string]
Ouputs:
- version_number: The version number of the Klocwork instance being tested [string]
"""
try:
# Set the path, if necessary
if klocwork_path == '':
call_string = 'which kwinject'
my_env = os.environ.copy()
subprocess.call(call_string, shell=True, env=my_env)
proc = subprocess.Popen(call_string, shell=True, env=my_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, encoding='utf-8')
klocwork_path = os.path.dirname(proc.communicate()[0].strip())
# Get the version number
call_string = klocwork_path + '/kwinject -v'
my_env = os.environ.copy()
proc = subprocess.Popen(call_string, shell=True, env=my_env, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
encoding='utf-8')
std_out, std_err = proc.communicate()
# Get the version number
version_number = re.split(' ', re.split('\n', std_out)[1])[-1]
# Truncate the version number if necessary
version_split = re.split('\\.', version_number)
if len(version_split) > 3:
version_number = '.'.join(version_split[0:3])
except: # lgtm [py/catch-base-exception]
version_number = 'Unknown'
return version_number | 08f919420a1f2465895b3cab26d062fc78b58b95 | 3,628,675 |
def titlecase(string):
"""Titlecase all words in the string.
Words with quote (') titlecased only in the beginning, as opposed to
built-in ``str.title()``. Roman numerals are uppercased.
"""
def f(mo):
roman_mo = ROMAN_PATTERN.match(mo.group())
if roman_mo:
return mo.group().upper()
word = mo.group(0)
tail = word[1:]
tail_lower = tail.lower()
# If the tail of the word is uppercase then leave it as is.
if tail_lower != tail:
return word
return (word[0].upper() + tail_lower)
return PATTERN.sub(f, string) | 88140ff62db5601573227654f876b0310a343709 | 3,628,676 |
def register(key=None):
"""Returns a decorator registering a widget class in the widget registry.
If no key is provided, the class name is used as a key.
A key is provided for each core Jupyter widget so that the frontend can use
this key regardless of the language of the kernel.
"""
def wrap(widget):
l = key if key is not None else widget.__module__ + widget.__name__
Widget.widget_types[l] = widget
return widget
return wrap | c21246a30926f3e78c3e85c487de76affe776b72 | 3,628,677 |
import os
def load_data_dict(project_dir_string):
"""Load data from a colab_zirc_dims project folder into a dictionary.
Parameters
----------
project_dir_string : str
Path to a colab_zirc_dims project folder.
Returns
-------
Dict
A dict of dicts containing data from project folder w/ format:
{'SAMPLE NAME': {'Scanlist': SCANLIST (.SCANCSV) PATH,
'Mosaic': MOSAIC .BMP PATH,
'Align_file': MOSAIC ALIGN FILE PATH,
'Max_zircon_size': MAX USER-INPUT ZIRCON SIZE,
'Offsets': [USER X OFFSET, USER Y OFFSET],
'Scan_dict': DICT LOADED FROM .SCANCSV FILE},
...}.
"""
# initializes output dict
temp_output_dict = {}
# file paths
mosaic_path = os.path.join(project_dir_string, 'mosaics')
scanlist_path = os.path.join(project_dir_string, 'scanlists')
mos_csv_path = os.path.join(project_dir_string, 'mosaic_info.csv')
# loads info csv as dictionary
mos_csv_dict = pd.read_csv(mos_csv_path, header=0, index_col=False,
squeeze=False).to_dict('list')
if not check_mos_csv_keys(mos_csv_dict):
print('Incorrect mosaic_info.csv headers: correct and re-save')
return {}
# lists of files in directories
mosaic_bmp_filenames = list_if_endswith(os.listdir(mosaic_path),
'.bmp')
mosaic_align_filenames = list_if_endswith(os.listdir(mosaic_path),
'.Align')
scanlist_filenames = list_if_endswith(os.listdir(scanlist_path),
'.scancsv')
# loops through mos_csv_dict in order to collect data, \
# verify that all files given in mosaic_info.csv are present
for eachindex, eachsample in enumerate(mos_csv_dict['Sample']):
each_include_bool = True
each_csv_scanlist_name = mos_csv_dict['Scanlist'][eachindex]
# mosaic name without file extension
each_csv_mosaic_name = mos_csv_dict['Mosaic'][eachindex].split('.')[0]
# checks if files are in directories, gets full file paths if so
act_scn_file = join_1st_match(scanlist_filenames,
each_csv_scanlist_name,
scanlist_path)
act_mos_file = join_1st_match(mosaic_bmp_filenames,
each_csv_mosaic_name,
mosaic_path)
act_align_file = join_1st_match(mosaic_align_filenames,
each_csv_mosaic_name,
mosaic_path)
# verifies that matches found, provides user feedback if not
error_strings = ['scanlist', 'mosaic .bmp file', 'mosaic .Align file']
for error_idx, pth_var in enumerate([act_scn_file, act_mos_file,
act_align_file]):
if each_include_bool and not pth_var:
print(eachsample, ': matching', error_strings[error_idx],
'not found')
each_include_bool = False
if each_include_bool:
# dictionary for info on individual scans and their coordinates
coords_dict = scancsv_to_dict(act_scn_file)
#adds collected data to output dict
temp_output_dict[eachsample] = {}
temp_output_dict[eachsample]['Scanlist'] = act_scn_file
temp_output_dict[eachsample]['Mosaic'] = act_mos_file
temp_output_dict[eachsample]['Align_file'] = act_align_file
temp_output_dict[eachsample]['Max_zircon_size'] = mos_csv_dict['Max_zircon_size'][eachindex]
temp_output_dict[eachsample]['Offsets'] = [mos_csv_dict['X_offset'][eachindex],
mos_csv_dict['Y_offset'][eachindex]]
temp_output_dict[eachsample]['Scan_dict'] = coords_dict
return temp_output_dict | e09839bb26f00a9494926ad23b3a9ed2298c3ef7 | 3,628,678 |
import re
def find(pattern):
"""
Find all instances where the pattern is in the running command
.. code-block:: bash
salt '*' nxos.cmd find '^snmp-server.*$'
.. note::
This uses the `re.MULTILINE` regex format for python, and runs the
regex against the whole show_run output.
"""
matcher = re.compile(pattern, re.MULTILINE)
return matcher.findall(show_run()) | b60aa06d973ec7725351276aa3898be45e73757e | 3,628,679 |
import torch
def get_center(arcs):
"""Centre of the arc
Args:
arcs: tensor [batch_size, num_arcs, 7]
arcs[b, i] = [x_start, y_start, dx, dy, theta, sharpness, width]
Returns: tensor [batch_size, num_arcs, 2]
"""
x_start = arcs[..., 0]
y_start = arcs[..., 1]
dx = arcs[..., 2]
dy = arcs[..., 3]
theta = arcs[..., 4]
# HACK
theta = theta.clone()
theta[theta == 0] += 1e-7
x_center = x_start + dx / 2 + safe_div(dy, (2 * torch.tan(theta)))
y_center = y_start + dy / 2 - safe_div(dx, (2 * torch.tan(theta)))
return torch.stack([x_center, y_center], dim=-1) | 665685e4cc17ab34732ad0928363c2f882bdc03e | 3,628,680 |
import urllib
from bs4 import BeautifulSoup
def get_urls_towns(url_index_by_letter):
"""Provides from the page corresponding to a city index the pages corresponding to the cities of the index.
Args:
url_index_by_letter (string): Url corresponding to a city index
e.g. : "https://elections.interieur.gouv.fr/municipales-2020/080/080G.html"
Returns:
list of string : List of urls corresponding to the results page for each city of the index
e.g. : ['https://elections.interieur.gouv.fr/municipales-2020/080/080373.html','https://elections.interieur.gouv.fr/municipales-2020/080/080374.html',..., 'https://elections.interieur.gouv.fr/municipales-2020/080/080403.html']
"""
page = urllib.request.urlopen(url_index_by_letter)
soup = BeautifulSoup(page, 'html.parser')
mydivs = soup.find("table",
{"class": "table table-bordered tableau-communes"})
results = mydivs.findAll('td')
urls_towns = []
for i in range(len(results)):
if (i % 3 == 2 and results[i].getText() != 'Aucun résultat reçu'):
urls_towns.append(
"https://elections.interieur.gouv.fr/municipales-2020" +
results[i].find("a").get('href')[2:])
return urls_towns | 3282a27a4ded8d80f020b2306ab5e62c56c07be9 | 3,628,681 |
def simple_app(global_config, **settings):
"""This function returns a Pyramid WSGI application."""
with Configurator(settings=settings) as config:
config.set_security_policy(TestingSecurityPolicy())
apps = global_config.get('apps', '')
if not isinstance(apps, (list, tuple)):
apps = aslist(apps)
for app in apps:
config.include(app)
return config.make_wsgi_app() | 7aa1edead8a557f6b08cb9323440633684cea6ab | 3,628,682 |
import sys
def _sized_dataframes(dataframe: pd.DataFrame) -> tuple:
""" Determines optimal chunks to publish the dataframe in. In smaller dataframes
this may be the whole dataframe.
This is determined by the hard values of 60MB being ideal for Spectrum, and
assumed compression ratio for dataframes to parquet being ~4; the bytes per
row are then estimated and then chunks are determined.
Args:
dataframe (pd.DataFrame): Dataframe to size out
Yields:
A dictionary containing the upper and lower index of the dataframe to chunk
"""
def log_size_estimate(num_bytes):
if num_bytes == 0:
return "0MB"
elif round(num_bytes / float(1 << 20), 2) == 0.0:
return "<0.1MB"
else:
return str(round(num_bytes / float(1 << 20), 2)) + "MB"
# get first row size
row_size_est = sys.getsizeof(dataframe.head(1))
# get number of rows
num_rows = int(dataframe.shape[0])
frame_size_est = row_size_est * num_rows
# at scale dataframes seem to compress around 3.5-4.5 times as parquet.
# TODO: should build a real regression to calc this!
compression_ratio = 4
# 60MB compressed is ideal for Spectrum
ideal_size = compression_ratio * (60 * float(1 << 20))
# short circut if < ideal size
batch_log_message = f"""row size estimate: {row_size_est} bytes.
number of rows: {num_rows} rows
frame size estimate: {frame_size_est} bytes
compression ratio: {compression_ratio}:1
ideal size: {ideal_size} bytes
"""
logger.debug(batch_log_message)
if ideal_size > frame_size_est:
yield {'lower': 0, 'upper': len(dataframe)}
return
# math the number of estimated partitions
sized_frames = []
num_partitions = int(row_size_est * num_rows / ideal_size)
rows_per_partition = int(num_rows / num_partitions)
# for each partition do the thing
logger.info(
f"Sized out {len(range(0, num_rows, rows_per_partition))} dataframes.")
for index, lower in enumerate(range(0, num_rows, rows_per_partition)):
lower = lower if lower == 0 else lower + 1
if index == num_partitions:
upper = num_rows
else:
if lower == 0:
upper = lower + rows_per_partition + 1
else:
upper = lower + rows_per_partition
yield {'lower': lower, 'upper': upper} | 953f491df83d1c7bb169c353fa22abb6aa52c914 | 3,628,683 |
from typing import Optional
import glob
import tqdm
import re
from pathlib import Path
import PIL
def imgs_preds(model_config: tuple[Model, tuple[int, int]], path: str, slc: Optional[slice] = None) -> list[
list[str, str]]:
"""
Get the predicted class for each image
:param model_config: model object and image dimensions that the model expects to receive
:param path: path to image directory
:param slc: slice of images from directory
:return: list of lists containing image filenames and their predicted classes
"""
images, res = glob(f'{path}/*')[slc] if slc else glob(f'{path}/*'), []
for img in tqdm((lambda x, y: filter(re.compile(x).match, y))(r'.*\.(jpg|png|jpeg)', images), total=len(images)):
try:
image = preprocess_input(r_[[img_to_array(load_img(img, target_size=model_config[1]))]])
pred = model_config[0].predict(image)
res.append([Path(img).name, decode_predictions(pred)[0][0][1]])
except (OSError, FileNotFoundError, PIL.UnidentifiedImageError) as e:
print('\n', e)
return res | e81c0eaeaba9597656ba2e7459e4c5a316237540 | 3,628,684 |
def vertical_cross(in_field, lon, lat, line_points, npts=100):
"""
Interpolate 2D or multiple dimensional grid data to vertical cross section.
:param in_field: 2D or multiple dimensional grid data,
the rightest dimension [..., lat, lon].
:param lon: grid data longitude.
:param lat: grid data latitude.
:param line_points: cross section line points,
should be [n_points, 2] array.
:param npts: the point number of great circle line.
:return: cross section [..., n_points], points
"""
if np.ndim(in_field) < 2:
raise ValueError("in_field must be at least 2 dimension")
# reshape field to 3d array
old_shape = in_field.shape
if np.ndim(in_field) == 2:
field = in_field.reshape(1, *old_shape)
else:
field = in_field.reshape(np.product(old_shape[0:-2]), *old_shape[-2:])
# get great circle points
points = None
n_line_points = line_points.shape[0]
geod = Geod("+ellps=WGS84")
for i in range(n_line_points-1):
seg_points = geod.npts(
lon1=line_points[i, 0], lat1=line_points[i, 1],
lon2=line_points[i+1, 0], lat2=line_points[i+1, 1], npts=npts)
if points is None:
points = np.array(seg_points)
else:
points = np.vstack((points, np.array(seg_points)))
# convert to pixel coordinates
x = np.interp(points[:, 0], lon, np.arange(len(lon)))
y = np.interp(points[:, 1], lat, np.arange(len(lat)))
# loop every level
zdata = []
for i in range(field.shape[0]):
zdata.append(
ndimage.map_coordinates(np.transpose(field[i, :, :]),
np.vstack((x, y))))
# reshape zdata
zdata = np.array(zdata)
if np.ndim(in_field) > 2:
zdata = zdata.reshape(np.append(old_shape[0:-2], points.shape[0]))
# return vertical cross section
return zdata, points | 873985f361883992e23a8bc5142c5d0e469ca6c3 | 3,628,685 |
import csv
def write_wordlist(wordlist):
"""
Write a wordlist to a temporary file.
"""
handler = NamedTemporaryFile("w", encoding="utf-8", delete=False)
writer = csv.DictWriter(
handler, delimiter="\t", fieldnames=list(wordlist[0].keys())
)
writer.writeheader()
writer.writerows(wordlist)
handler.close()
return handler.name | 377bb1871afaed6109e24e502752f7223238dec6 | 3,628,686 |
def hasfield(model_cls, field_name):
"""
Like `hasattr()`, but for model fields.
>>> from django.contrib.auth.models import User
>>> hasfield(User, 'password')
True
>>> hasfield(User, 'foobarbaz')
False
"""
try:
model_cls._meta.get_field(field_name)
return True
except exceptions.FieldDoesNotExist:
return False | 186b0754b3c87b5fb4c2b9a003221b57887160d1 | 3,628,687 |
def get_distance(point_a, point_b):
"""Receives two coordinates by parameter, and returns the geodesic distance between them (in km).
Params:
- point_a: tuple or list expected, first coordinate
- point_b: tuple or list expected, second coordinate
Returns:
- Formatted string indicating the distance between the two points
Raises:
- TypeError if unexpected inputs
"""
# Evaluating inputs
if type(point_a) not in (tuple, list) or type(point_b) not in (tuple,list):
logger.error('Bad data from input.')
raise TypeError('Unexpected input; required two coordinates in an iterable object')
if len(point_a) != 2 or len(point_b) != 2:
logger.error('Bad data from input.')
raise TypeError('Each coordinate must have two floats: longitude and latitude.')
if type(point_a[0]) != float or type(point_a[1]) != float or \
type(point_b[0]) != float or type(point_b[1]) != float:
logger.error('Error. Bad data received.')
raise TypeError('Error. Both latitude and longitude must be float numbers.')
# Functionality
try:
distance_km = round(GeodesicDistance(point_a, point_b).get_distance, 3)
return f'Distance between {point_a} and {point_b}: {distance_km} km.'
except Exception as e:
logger.error('Unexpected error', e)
return 'Unexpected error. Impossible to calculate distance.' | 5a897797d5b89448f0af1c7d09b1d7dc9fbd4942 | 3,628,688 |
def __substitute_controller_variables(config):
"""Substitute variables and set defaults for config
Arguments:
config {dict}
Raises:
Exception: [description]
Returns:
[dict]
"""
global_variables = config.get("variables", {})
set_master_password = global_variables.get("set_master_password")
master_password = global_variables.get("master_password")
for controller in config["controllers"]:
# add global variables controller state, overriding with local variables
local_variables = dict(global_variables, **controller.get("variables", {}))
# Password logic:
# - use passwords passed by ENV
# - if none, use passwords in config file
# - if still none >
# - if master password is passed and set_master_password flag is NOT true, use master
# - else use default password for clouds
if not local_variables.get("api_password"):
if not master_password or set_master_password:
cloud = local_variables.get("cloud", None)
if cloud == "azure":
local_variables["api_password"] = "%s-%s" % (
local_variables["instance_name"],
local_variables["primary_private_ip"],
)
else:
local_variables["api_password"] = local_variables["instance_id"]
else:
local_variables["api_password"] = master_password
if not local_variables.get("host"):
local_variables["host"] = local_variables["public_ip"]
for key, value in local_variables.items():
if util.is_formattable_string(value):
try:
local_variables[key] = value.format(**local_variables)
except KeyError:
raise CohesiveSDKException("Missing variable %s" % value)
controller["variables"] = local_variables
__resolve_route_config_variables(controller, config)
__resolve_peering_config_variables(controller, config)
__resolve_plugins_config_variables(controller, config)
return config | 29f55775284a5d1f8bd2bbc6ba2dcb18a4b838ec | 3,628,689 |
import os
import sys
def credentials():
"""Retrieves credentials"""
username = os.environ.get('OS_USERNAME')
password = os.environ.get('OS_PASSWORD')
tenant_name = os.environ.get('OS_TENANT_NAME')
auth_url = os.environ.get('OS_AUTH_URL')
if not all((username, password, tenant_name, auth_url)):
sys.stderr.write("ERROR: Unable to get Keystone credentials\n")
exit(1)
return {
'username': username,
'password': password,
'tenant_name': tenant_name,
'auth_url': auth_url
} | 6ac966762db6bb8c9079502d4adcfbf94ec753e8 | 3,628,690 |
def CNOT_like_PTM(idx):
"""
Returns the pauli transfer matrix for gates of the cnot like class
(q0) --C1--•--S1-- --C1--•--S1------
| -> |
(q1) --C1--⊕--S1-- --C1--•--S1^Y90--
"""
assert(idx < 5184)
idx_0 = idx % 24
idx_1 = (idx // 24) % 24
idx_2 = (idx // 576) % 3
idx_3 = (idx // 1728)
C1_q0 = np.kron(np.eye(4), C1[idx_0])
C1_q1 = np.kron(C1[idx_1], np.eye(4))
CZ
S1_q0 = np.kron(np.eye(4), S1[idx_2])
S1y_q1 = np.kron(np.dot(C1[idx_3], Y90), np.eye(4))
return np.linalg.multi_dot(list(reversed([C1_q0, C1_q1, CZ, S1_q0, S1y_q1]))) | ff861d8490742a9982fbfdb68e89d487e881bf1c | 3,628,691 |
def _random_binary_string_matrix(rows, cols, max_length):
"""Returns a list of lists of random strings"""
return [[_random_binary_string_gen(max_length) for _ in range(cols)] for _ in range(rows)] | 841ea51e2ae54da8d2381bcdc95c696f67ede17e | 3,628,692 |
import time
import json
import hashlib
def emit_webhook(client, url, job_id="test", worker_id=None, signal="new_judgments", unit_state="finalized", treatment="t10", by_get=True):
"""
:param client:
:param url: (str) relative path to target api
:param job_id: (str)
:param worker_id: (str)
:param signal: (new_judgments|unit_complete)
:param unit_state: (finalized|new|judging|judgeable?)
:param by_get: (True|False) simulate a setup were each user triggers the webhook using a get request
"""
app.logger.debug(f"emit_webhook: job_id: {job_id}, worker_id: {worker_id}, treatment: {treatment}")
proceed = False
max_retries = 5
with app.app_context():
while not proceed and max_retries>0:
table_resp = get_table("resp", job_id, "result", treatment=treatment)
table_prop = get_table("prop", job_id, "result", treatment=treatment)
app.logger.debug("Waiting for the db...")
with get_db() as con:
if con.execute(f"SELECT * FROM {table_resp} where worker_id=?", (worker_id,)).fetchone():
proceed = True
elif con.execute(f"SELECT * FROM {table_prop} where worker_id=?", (worker_id,)).fetchone():
proceed = True
con = None
time.sleep(0.01)
max_retries -= 1
data_dict = dict(webhook_data_template)
data_dict["signal"] = signal
data_dict["payload"]["job_id"] = job_id
data_dict["payload"]["results"]["judgments"][0]["job_id"] = job_id
data_dict["payload"]["results"]["judgments"][0]["worker_id"] = worker_id
data_dict["payload"]["results"]["judgments"][0]["unit_state"] = unit_state
with get_db() as con:
job_config = get_job_config(con, job_id)
payload = json.dumps(data_dict["payload"])
payload_ext = payload + str(job_config["api_key"])
signature = hashlib.sha1(payload_ext.encode()).hexdigest()
data = {
"signal": signal,
"payload": payload,
"signature": signature
}
if by_get:
# An empty form is submitted when triggering the webhook by click
data = {}
if "?" in url:
url += f"&job_id={job_id}&worker_id={worker_id}"
else:
url += f"?job_id={job_id}&worker_id={worker_id}"
res = client.get(url, follow_redirects=True).status
else:
res = client.post(url, data=data, follow_redirects=True).status
return res | 860a63da9b45991d24f447257ec4a475def2dee8 | 3,628,693 |
def comment_list_view(request, slug):
"""
Get post comments from a server
Selects the post instance matching the comment slug and then
gets all displayed comments that match that post instance.
Then we pass the objects we want to use into the serializer.
The serializer will take that information and turn into an
input the webpage can understand.
"""
post_instance = get_object_or_404(Post, slug=slug)
comment_list = Comment.objects.filter(post=post_instance, is_displayed=True)
serializer = CommentSerializer(comment_list, many=True)
return Response(serializer.data, status=status.HTTP_200_OK) | f8c344d8c8e3fc1d7ca0f1c9db95adb7f0a8d174 | 3,628,694 |
def cos(x):
"""
Takes the cosine of a DualNumber object and returns a DualNumber object with updated value and derivatives.
"""
x = DualNumber.promote(x)
output = x.promote(np.cos(x.value))
# real part of the first parent distributes
for k1 in x.derivatives:
output.derivatives[k1] += -np.sin(x.value)*x.derivatives[k1]
return output | 4f20588ae91891287f334243d1db4be5cc29bc9d | 3,628,695 |
def get_defense_strategy(arg_dict, dataset_name, strategy_name, strategy_gpu_id,
defense_desc, metric_bundle, field):
"""Take the strategy name and construct a strategy object."""
return built_in_defense_strategies[strategy_name](
arg_dict, dataset_name, strategy_gpu_id, defense_desc, metric_bundle, field) | 1b0e4701ad1e85fd6fddf2ca174d30e2904b1c7b | 3,628,696 |
def get_role_description(role: str) -> str:
"""Gets the description for a role.
Args:
role (str): The programmatic role name.
Returns:
str: The corresponding role description from the game-info json.
"""
return game_info_json['roles'][role.lower()]['description'] | d6500f2b716938211aebbe11da10bcb6b93c6ce8 | 3,628,697 |
def pairwise_phase_pattern(module, window_type='voronoi', from_absolute=True,
project_phases=False, full_window=False,
sign='regular', length_unit='cm',
palette=None):
"""
Convenience function to plot nice pairwise phase patterns
Parameters
----------
module : Module
Module to plot phase pattern from.
window_type : string, optional
The type of window to use. See `Module.window_vertices` for possible
values.
from_absolute : bool, optional
If True, the pairwise phases are computed from the absolute phases. If
False, they are computed directly from each pair of cells.
project_phases : bool, optional
If True, the phases are projected to a regular hexagonal grid.
full_window : bool, optional
If True, the full window used for absolute phases is also used for
relative phases.
sign : string, optional
Flag to select how to resolve the sin ambiguity when `full_window ==
True`.
length_unit : string, optional
The length unit to add to the axis labels. If None, no length unit is
printed. This is ignored if project_phases is True.
palette : sequence, optional
Color palette to use for the artists: the first color is used for the
window, the second for the points in the pattern, and the third for the
periodic extension (if any).
Returns
-------
See `Module.plot_phases`.
"""
if palette is None:
palette = [None] + COLOR_CYCLE
phase_pattern = module.pairwise_phase_pattern(
window_type=window_type,
from_absolute=from_absolute,
project_phases=project_phases,
full_window=full_window,
sign=sign)
artists = phase_pattern.plot_pattern(window=True,
window_kw={'color': palette[0]},
periodic_kw={'color': palette[2]},
color=palette[1])
axes = artists[0].axes
if project_phases:
range_ = ((-1.0, 1.0), (-1.0, 1.0))
axes.set(xlabel=r"$\delta_x$",
ylabel=r"$\delta_y$")
else:
range_ = next(iter(module)).params['range_']
if length_unit is None:
axes.set(xlabel=r"$\delta_x$",
ylabel=r"$\delta_y$")
else:
axes.set(xlabel=r"$\delta_x \ / \ \mathrm{{{}}}$"
.format(length_unit),
ylabel=r"$\delta_y \ / \ \mathrm{{{}}}$"
.format(length_unit))
if window_type == 'rhomboid':
range_ = ((-0.5, 0.5), (-0.5, 0.5))
axes.set(xticks=range_[0], yticks=range_[1])
return artists | 7262b594974214fb8b43be79ff837ae419591149 | 3,628,698 |
def centtoinch(cents):
"""Cents to inch."""
return .3937*cents | 517142a29242246721abd05638c8ecbefcd888cb | 3,628,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.