content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def test_function(client: Client) -> str:
"""
Performs test connectivity by valid http response
:param client: client object which is used to get response from api
:return: raise ValueError if any error occurred during connection
"""
client.http_request(method='GET', url_suffix=URL_SUFFIX['TEST_MODULE'])
return 'ok'
| 5,339,400
|
def _close_output(out_file: TextIOWrapper):
"""
Closes the output. Does not close stdout.
:param out_file: File object to close
"""
if out_file is not sys.stdout:
out_file.close()
| 5,339,401
|
def load_tests(loader, tests, ignore):
"""Create tests from all docstrings by walking the package hierarchy."""
modules = pkgutil.walk_packages(rowan.__path__, rowan.__name__ + ".")
for _, module_name, _ in modules:
tests.addTests(doctest.DocTestSuite(module_name, globs={"rowan": rowan}))
return tests
| 5,339,402
|
def k_hot_array_from_string_list(context,
typename,
entity_names):
"""Create a numpy array encoding a k-hot set.
Args:
context: a NeuralExpressionContext
typename: type of entity_names
entity_names: list of names of type typename
Returns:
A k-hot-array representation of the set of entity_names. For frozen
dictionaries, unknown entity names are mapped to the unknown_id of their
type or discarded if the unknown_value of the type is None. Unknown entity
names will throw an nql.EntityNameException for non-frozen dictionaries.
It is possible for this method to return an all-zeros array.
"""
# Empty string is not a valid entity_name.
ids = [context.get_id(e, typename) for e in entity_names if e]
# None is not a valid id.
valid_ids = [x for x in ids if x is not None]
max_id = context.get_max_id(typename)
result = np.zeros((max_id,), dtype='float32')
if valid_ids:
result[valid_ids] = 1.
return result
| 5,339,403
|
def get_validation_data_iter(data_loader: RawParallelDatasetLoader,
validation_sources: List[str],
validation_target: str,
buckets: List[Tuple[int, int]],
bucket_batch_sizes: List[BucketBatchSize],
source_vocabs: List[vocab.Vocab],
target_vocab: vocab.Vocab,
max_seq_len_source: int,
max_seq_len_target: int,
batch_size: int,
fill_up: str) -> 'ParallelSampleIter':
"""
Returns a ParallelSampleIter for the validation data.
"""
logger.info("=================================")
logger.info("Creating validation data iterator")
logger.info("=================================")
validation_length_statistics = analyze_sequence_lengths(validation_sources, validation_target,
source_vocabs, target_vocab,
max_seq_len_source, max_seq_len_target)
validation_sources_sentences = [SequenceReader(source, vocab, add_bos=False) for source, vocab in
zip(validation_sources, source_vocabs)]
validation_target_sentences = SequenceReader(validation_target, target_vocab, add_bos=True, limit=None)
validation_data_statistics = get_data_statistics(validation_sources_sentences,
validation_target_sentences,
buckets,
validation_length_statistics.length_ratio_mean,
validation_length_statistics.length_ratio_std,
source_vocabs, target_vocab)
validation_data_statistics.log(bucket_batch_sizes)
validation_data = data_loader.load(validation_sources_sentences, validation_target_sentences,
validation_data_statistics.num_sents_per_bucket).fill_up(bucket_batch_sizes,
fill_up)
return ParallelSampleIter(data=validation_data,
buckets=buckets,
batch_size=batch_size,
bucket_batch_sizes=bucket_batch_sizes,
num_factors=len(validation_sources))
| 5,339,404
|
def _serialize_property(
target_expr: str, value_expr: str, a_property: mapry.Property,
auto_id: _AutoID, cpp: mapry.Cpp) -> str:
"""
Generate the code to serialize the property.
The value as the property is given as ``value_expr`` and serialized
into the ``target_expr``.
:param target_expr: C++ expression of the Json::Value to be set
:param value_expr: C++ expression of the value to be serialized
:param a_property: the property definition
:param auto_id: generator of unique identifiers
:param cpp: C++ settings
:return: generated serialization code
"""
if not a_property.optional:
return _serialize_value(
target_expr=target_expr,
value_expr=value_expr,
a_type=a_property.type,
auto_id=auto_id,
cpp=cpp)
##
# Handle optional property
##
deref_value_expr = "(*{})".format(value_expr)
serialization = _serialize_value(
target_expr=target_expr,
value_expr=deref_value_expr,
a_type=a_property.type,
auto_id=auto_id,
cpp=cpp)
return _SERIALIZE_OPTIONAL_PROPERTY_TPL.render(
value_expr=value_expr, serialization=serialization)
| 5,339,405
|
def read_raw_data(pattern):
""":return X"""
if isinstance(pattern, basestring):
fpaths = glob.glob(pattern)
elif isinstance(pattern, list):
fpaths = pattern
X = []
for fpath in fpaths:
print 'loading file {} ... ' . format(fpath)
X.extend(loadtxt(fpath))
return X
| 5,339,406
|
def warp_affine_rio(src: np.ndarray,
dst: np.ndarray,
A: Affine,
resampling: Resampling,
src_nodata: Nodata = None,
dst_nodata: Nodata = None,
**kwargs) -> np.ndarray:
"""
Perform Affine warp using rasterio as backend library.
:param src: image as ndarray
:param dst: image as ndarray
:param A: Affine transformm, maps from dst_coords to src_coords
:param resampling: str|rasterio.warp.Resampling resampling strategy
:param src_nodata: Value representing "no data" in the source image
:param dst_nodata: Value to represent "no data" in the destination image
**kwargs -- any other args to pass to ``rasterio.warp.reproject``
:returns: dst
"""
crs = _WRP_CRS
src_transform = Affine.identity()
dst_transform = A
if isinstance(resampling, str):
resampling = resampling_s2rio(resampling)
# GDAL support for int8 is patchy, warp doesn't support it, so we need to convert to int16
if src.dtype.name == 'int8':
src = src.astype('int16')
if dst.dtype.name == 'int8':
_dst = dst.astype('int16')
else:
_dst = dst
rasterio.warp.reproject(src,
_dst,
src_transform=src_transform,
dst_transform=dst_transform,
src_crs=crs,
dst_crs=crs,
resampling=resampling,
src_nodata=src_nodata,
dst_nodata=dst_nodata,
**kwargs)
if dst is not _dst:
# int8 workaround copy pixels back to int8
np.copyto(dst, _dst, casting='unsafe')
return dst
| 5,339,407
|
def encode_integer_leb128(value: int) -> bytes:
"""Encode an integer with signed LEB128 encoding.
:param int value: The value to encode.
:return: ``value`` encoded as a variable-length integer in LEB128 format.
:rtype: bytes
"""
if value == 0:
return b"\0"
# Calculate the number of bits in the integer and round up to the nearest multiple
# of 7. We need to add 1 bit because bit_length() only returns the number of bits
# required to encode the magnitude, but not the sign.
n_bits = value.bit_length() + 1
if n_bits % 7:
n_bits += 7 - (n_bits % 7)
# Bit operations force a negative integer to its unsigned two's-complement
# representation, e.g. -127 & 0xff = 0x80, -10 & 0xfff = 0xff6, etc. We use this to
# sign-extend the number *and* make it unsigned. Once it's unsigned, we can use
# ULEB128.
mask = (1 << n_bits) - 1
value &= mask
output = bytearray(n_bits // 7)
for i in range(n_bits // 7):
output[i] = 0x80 | (value & 0x7F)
value >>= 7
# Last byte shouldn't have the high bit set.
output[-1] &= 0x7F
return bytes(output)
| 5,339,408
|
def test_MS2DeepScoreMonteCarlo_score_pair(average_type):
"""Test score calculation using *.pair* method."""
spectrums, _, similarity_measure = get_test_ms2_deep_score_instance(n_ensembles=5,
average_type=average_type)
score = similarity_measure.pair(spectrums[0], spectrums[1])
assert score['score'].dtype == np.float64, "Expected float as score."
assert score['score'] > 0.65 and score['score'] < 0.9, "Expected score in different range"
assert score['uncertainty'].dtype == np.float64, "Expected float as uncertainty."
if average_type == 'median':
assert score['uncertainty'] > 0.01 and score['uncertainty'] < 0.12, \
"Expected uncertainty in different range"
else:
assert score['uncertainty'] > 0.01 and score['uncertainty'] < 0.06, \
"Expected uncertainty in different range"
| 5,339,409
|
def google_sen_new(text_content):
"""
Analyzing Entity Sentiment in a String
Args:
text_content The text content to analyze
"""
# text_content = 'Grapes are good. Bananas are bad.' Available types: PLAIN_TEXT, HTML
client = language_v1.LanguageServiceClient()
type_ = enums.Document.Type.PLAIN_TEXT
language = "en"
document = {"content": text_content, "type": type_, "language": language}
# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = enums.EncodingType.UTF8
response = client.analyze_entity_sentiment(document, encoding_type=encoding_type)
result_dict = {} # "entity":[]
for entity in response.entities:
result_list = []
result_list.append(entity.name) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al
result_list.append(enums.Entity.Type(
entity.type).name) # Get the salience score associated with the entity in the [0, 1.0] range
result_list.append(
entity.salience) # Get the aggregate sentiment expressed for this entity in the provided document.
sentiment = entity.sentiment
result_list.append(sentiment.score)
result_list.append(sentiment.magnitude)
result_dict[entity] = result_list
return result_dict
| 5,339,410
|
def _create_ip_config_data():
"""
This loads into a map the result of IPCONFIG command.
"""
map_ipconfigs = dict()
curr_itf = ""
proc = subprocess.Popen(['ipconfig', '/all'], stdout=subprocess.PIPE)
for curr_line in proc.stdout.readlines():
curr_line = curr_line.decode("utf-8").rstrip()
if curr_line:
if curr_line[0] != " ":
curr_itf = curr_line.strip()
if curr_itf[-1] == ":":
curr_itf = curr_itf[:-1]
map_ipconfigs[curr_itf] = []
else:
idx_colon = curr_line.find(":")
if idx_colon >= 0:
curr_key = curr_line[:idx_colon].replace(". ","").strip()
curr_val = curr_line[idx_colon+1:].strip()
else:
curr_val = curr_line.strip()
map_ipconfigs[curr_itf].append((curr_key, curr_val))
return map_ipconfigs
| 5,339,411
|
def simplify_polygon_by(points, is_higher, should_stop, refresh_node):
"""
Simplify the given polygon by greedily removing vertices using a given priority.
This is generalized from Visvalingam's algorithm, which is described well here:
http://bost.ocks.org/mike/simplify/
is_higher = function(a,b) returns node higher in priority to be removed.
should_stop = function(a) returns True if given highest priority node stops simplification.
refresh_node = function(a) refreshes attributes dependent on adjacent vertices.
"""
length = len(points)
# build nodes
nodes = [VertexNode(p) for p in points]
# connect nodes
for i in xrange(length):
prev_i = (i+length-1) % length
next_i = (i+1) % length
node = nodes[i]
node.prev_node = nodes[prev_i]
node.next_node = nodes[next_i]
refresh_node(node)
node.orig_index = i
def on_index_change(node,i):
"""Callback that allows a node to know its location in the heap."""
node.heap_index = i
heap = Heap(nodes, is_higher, on_index_change)
while True:
node = heap.peek()
if should_stop(node):
break
heap.pop()
# Close gap in doubly-linked list.
prev_node, next_node = node.prev_node, node.next_node
prev_node.next_node = next_node
next_node.prev_node = prev_node
# Refresh vertices that have new adjacents.
refresh_node(prev_node)
heap.reorder_node(prev_node.heap_index)
refresh_node(next_node)
heap.reorder_node(next_node.heap_index)
# Return remaining points in their original order.
return [node.point for node in sorted(heap.array, key=(lambda node: node.orig_index))]
| 5,339,412
|
def main(argv):
"""Main entry for Dataflow python job launcher.
expected input args are as follows:
project - Required. The project of which the resource will be launched.
Location - Required. The region of which the resource will be launched.
python_module_path - The gcs path to the python file or folder to run.
temp_location - A GCS path for Dataflow to stage temporary job files created
during the execution of the pipeline.
requirements_file_path - The gcs or local path to the requirements file.
args - The list of args to pass to the python file.
gcp_resources - A placeholder output for returning the gcp_resouces proto.
Args:
argv: A list of system arguments.
"""
parsed_args = _parse_args(argv)
dataflow_python_job_remote_runner.create_python_job(**parsed_args)
| 5,339,413
|
def get_builder(slug):
"""
Get the Builder object for a given slug name.
Args:
slug - The slug name of the installable software
"""
for builder in Index().index:
if builder.slug == slug:
return builder
return False
| 5,339,414
|
def preprocess_normscale(patient_data, result, index, augment=True,
metadata=None,
normscale_resize_and_augment_function=normscale_resize_and_augment,
testaug=False):
"""Normalizes scale and augments the data.
Args:
patient_data: the data to be preprocessed.
result: dict to store the result in.
index: index indicating in which slot the result dict the data
should go.
augment: flag indicating wheter augmentation is needed.
metadata: metadata belonging to the patient data.
"""
if augment:
if testaug:
augmentation_params = sample_test_augmentation_parameters()
else:
augmentation_params = sample_augmentation_parameters()
else:
augmentation_params = None
zoom_factor = None
# Iterate over different sorts of data
for tag, data in patient_data.items():
if tag in metadata:
metadata_tag = metadata[tag]
desired_shape = result[tag][index].shape
cleaning_processes = getattr(config(), 'cleaning_processes', [])
cleaning_processes_post = getattr(config(), 'cleaning_processes_post', [])
if tag.startswith("sliced:data:singleslice"):
# Cleaning data before extracting a patch
data = clean_images(
[patient_data[tag]], metadata=metadata_tag,
cleaning_processes=cleaning_processes)
# Augment and extract patch
# Decide which roi to use.
shift_center = (None, None)
if getattr(config(), 'use_hough_roi', False):
shift_center = metadata_tag["hough_roi"]
patient_3d_tensor = normscale_resize_and_augment_function(
data, output_shape=desired_shape[-2:],
augment=augmentation_params,
pixel_spacing=metadata_tag["PixelSpacing"],
shift_center=shift_center[::-1])[0]
if augmentation_params is not None:
zoom_factor = augmentation_params["zoom_x"] * augmentation_params["zoom_y"]
else:
zoom_factor = 1.0
# Clean data further
patient_3d_tensor = clean_images(
patient_3d_tensor, metadata=metadata_tag,
cleaning_processes=cleaning_processes_post)
if "area_per_pixel:sax" in result:
raise NotImplementedError()
if augmentation_params and not augmentation_params.get("change_brightness", 0) == 0:
patient_3d_tensor = augment_brightness(patient_3d_tensor, augmentation_params["change_brightness"])
put_in_the_middle(result[tag][index], patient_3d_tensor, True)
elif tag.startswith("sliced:data:randomslices"):
# Clean each slice separately
data = [
clean_images([slicedata], metadata=metadata, cleaning_processes=cleaning_processes)[0]
for slicedata, metadata in zip(data, metadata_tag)]
# Augment and extract patches
shift_centers = [(None, None)] * len(data)
if getattr(config(), 'use_hough_roi', False):
shift_centers = [m["hough_roi"] for m in metadata_tag]
patient_3d_tensors = [
normscale_resize_and_augment_function(
[slicedata], output_shape=desired_shape[-2:],
augment=augmentation_params,
pixel_spacing=metadata["PixelSpacing"],
shift_center=shift_center[::-1])[0]
for slicedata, metadata, shift_center in zip(data, metadata_tag, shift_centers)]
if augmentation_params is not None:
zoom_factor = augmentation_params["zoom_x"] * augmentation_params["zoom_y"]
else:
zoom_factor = 1.0
# Clean data further
patient_3d_tensors = [
clean_images([patient_3d_tensor], metadata=metadata, cleaning_processes=cleaning_processes_post)[0]
for patient_3d_tensor, metadata in zip(patient_3d_tensors, metadata_tag)]
patient_4d_tensor = _make_4d_tensor(patient_3d_tensors)
if augmentation_params and not augmentation_params.get("change_brightness", 0) == 0:
patient_4d_tensor = augment_brightness(patient_4d_tensor, augmentation_params["change_brightness"])
if "area_per_pixel:sax" in result:
raise NotImplementedError()
put_in_the_middle(result[tag][index], patient_4d_tensor, True)
elif tag.startswith("sliced:data:sax:locations"):
pass # will be filled in by the next one
elif tag.startswith("sliced:data:sax:is_not_padded"):
pass # will be filled in by the next one
elif tag.startswith("sliced:data:sax"):
# step 1: sort (data, metadata_tag) with slice_location_finder
slice_locations, sorted_indices, sorted_distances = slice_location_finder({i: metadata for i,metadata in enumerate(metadata_tag)})
data = [data[idx] for idx in sorted_indices]
metadata_tag = [metadata_tag[idx] for idx in sorted_indices]
slice_locations = np.array([slice_locations[idx]["relative_position"] for idx in sorted_indices])
slice_locations = slice_locations - (slice_locations[-1] + slice_locations[0])/2.0
data = [
clean_images([slicedata], metadata=metadata, cleaning_processes=cleaning_processes)[0]
for slicedata, metadata in zip(data, metadata_tag)]
# Augment and extract patches
shift_centers = [(None, None)] * len(data)
if getattr(config(), 'use_hough_roi', False):
shift_centers = [m["hough_roi"] for m in metadata_tag]
patient_3d_tensors = [
normscale_resize_and_augment_function(
[slicedata], output_shape=desired_shape[-2:],
augment=augmentation_params,
pixel_spacing=metadata["PixelSpacing"],
shift_center=shift_center[::-1])[0]
for slicedata, metadata, shift_center in zip(data, metadata_tag, shift_centers)]
if augmentation_params is not None:
zoom_factor = augmentation_params["zoom_x"] * augmentation_params["zoom_y"]
else:
zoom_factor = 1.0
# Clean data further
patient_3d_tensors = [
clean_images([patient_3d_tensor], metadata=metadata, cleaning_processes=cleaning_processes_post)[0]
for patient_3d_tensor, metadata in zip(patient_3d_tensors, metadata_tag)]
patient_4d_tensor = _make_4d_tensor(patient_3d_tensors)
if augmentation_params and not augmentation_params.get("change_brightness", 0) == 0:
patient_4d_tensor = augment_brightness(patient_4d_tensor, augmentation_params["change_brightness"])
# Augment sax order
if augmentation_params and augmentation_params.get("flip_sax", 0) > 0.5:
patient_4d_tensor = patient_4d_tensor[::-1]
slice_locations = slice_locations[::-1]
# Put data (images and metadata) in right location
put_in_the_middle(result[tag][index], patient_4d_tensor, True)
if "sliced:data:sax:locations" in result:
eps_location = 1e-7
is_padded = np.array([False]*len(result["sliced:data:sax:locations"][index]))
put_in_the_middle(result["sliced:data:sax:locations"][index], slice_locations + eps_location, True, is_padded)
if "sliced:data:sax:distances" in result:
eps_location = 1e-7
sorted_distances.append(0.0) # is easier for correct padding
is_padded = np.array([False]*len(result["sliced:data:sax:distances"][index]))
put_in_the_middle(result["sliced:data:sax:distances"][index], np.array(sorted_distances) + eps_location, True, is_padded)
if "sliced:data:sax:is_not_padded" in result:
result["sliced:data:sax:is_not_padded"][index] = np.logical_not(is_padded)
elif tag.startswith("sliced:data:chanzoom:2ch"):
# step 1: sort (data, metadata_tag) with slice_location_finder
slice_locations, sorted_indices, sorted_distances = slice_location_finder({i: metadata for i,metadata in enumerate(metadata_tag[2])})
top_slice_metadata = metadata_tag[2][sorted_indices[0]]
bottom_slice_metadata = metadata_tag[2][sorted_indices[-1]]
ch2_metadata = metadata_tag[1]
ch4_metadata = metadata_tag[0]
trf_2ch, trf_4ch = get_chan_transformations(
ch2_metadata=ch2_metadata,
ch4_metadata=ch4_metadata,
top_point_metadata = top_slice_metadata,
bottom_point_metadata = bottom_slice_metadata,
output_width=desired_shape[-1]
)
ch4_3d_patient_tensor, ch2_3d_patient_tensor = [], []
ch4_data = data[0]
ch2_data = data[1]
if ch4_data is None and ch2_data is not None:
ch4_data = ch2_data
ch4_metadata = ch2_metadata
if ch2_data is None and ch4_data is not None:
ch2_data = ch4_data
ch2_metadata = ch4_metadata
for ch, ch_result, transform, metadata in [(ch4_data, ch4_3d_patient_tensor, trf_4ch, ch4_metadata),
(ch2_data, ch2_3d_patient_tensor, trf_2ch, ch2_metadata)]:
tform_shift_center, tform_shift_uncenter = build_center_uncenter_transforms(desired_shape[-2:])
zoom_factor = np.sqrt(np.abs(np.linalg.det(transform.params[:2,:2])) * np.prod(metadata["PixelSpacing"]))
normalise_zoom_transform = build_augmentation_transform(zoom_x=zoom_factor, zoom_y=zoom_factor)
if augmentation_params:
augment_tform = build_augmentation_transform(**augmentation_params)
total_tform = tform_shift_uncenter + augment_tform + normalise_zoom_transform + tform_shift_center + transform
else:
total_tform = tform_shift_uncenter + normalise_zoom_transform + tform_shift_center + transform
ch_result[:] = [fast_warp(c, total_tform, output_shape=desired_shape[-2:]) for c in ch]
# print "zoom factor:", zoom_factor
if augmentation_params is not None:
zoom_factor = augmentation_params["zoom_x"] * augmentation_params["zoom_y"]
else:
zoom_factor = 1.0
# Clean data further
ch4_3d_patient_tensor = clean_images(np.array([ch4_3d_patient_tensor]), metadata=ch4_metadata, cleaning_processes=cleaning_processes_post)[0]
ch2_3d_patient_tensor = clean_images(np.array([ch2_3d_patient_tensor]), metadata=ch2_metadata, cleaning_processes=cleaning_processes_post)[0]
# Put data (images and metadata) in right location
put_in_the_middle(result["sliced:data:chanzoom:2ch"][index], ch2_3d_patient_tensor, True)
put_in_the_middle(result["sliced:data:chanzoom:4ch"][index], ch4_3d_patient_tensor, True)
elif tag.startswith("sliced:data:shape"):
raise NotImplementedError()
elif tag.startswith("sliced:data"):
# put time dimension first, then axis dimension
data = clean_images(patient_data[tag], metadata=metadata_tag)
patient_4d_tensor, zoom_ratios = resize_and_augment(data, output_shape=desired_shape[-2:], augment=augmentation_parameters)
if "area_per_pixel:sax" in result:
result["area_per_pixel:sax"][index] = zoom_ratios[0] * np.prod(metadata_tag[0]["PixelSpacing"])
if "noswitch" not in tag:
patient_4d_tensor = np.swapaxes(patient_4d_tensor,1,0)
put_in_the_middle(result[tag][index], patient_4d_tensor)
elif tag.startswith("sliced:meta:all"):
# TODO: this probably doesn't work very well yet
result[tag][index] = patient_data[tag]
elif tag.startswith("sliced:meta:PatientSex"):
result[tag][index][0] = -1. if patient_data[tag]=='M' else 1.
elif tag.startswith("sliced:meta:PatientAge"):
number, letter = patient_data[tag][:3], patient_data[tag][-1]
letter_rescale_factors = {'D': 365.25, 'W': 52.1429, 'M': 12., 'Y': 1.}
result[tag][index][0] = float(patient_data[tag][:3]) / letter_rescale_factors[letter]
if augmentation_params and zoom_factor:
label_correction_function = lambda x: x * zoom_factor
classification_correction_function = lambda x: utils.zoom_array(x, 1./zoom_factor)
return label_correction_function, classification_correction_function
else:
return lambda x: x, lambda x: x
| 5,339,415
|
def get_contact_lookup_list():
"""get contact lookup list"""
try:
return jsonify(Contact.get_contact_choices())
except Exception as e:
return e.message
| 5,339,416
|
def argmax(X, axis=None):
"""
Return tuple (values, indices) of the maximum entries of matrix
:param:`X` along axis :param:`axis`. Row major order.
:param X: Target matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil,
dia or :class:`numpy.matrix`
:param axis: Specify axis along which to operate. If not specified,
whole matrix :param:`X` is considered.
:type axis: `int`
"""
if sp.isspmatrix(X):
X = X.tocsr()
assert axis == 0 or axis == 1 or axis is None, "Incorrect axis number."
res = [[float('-inf'), 0]
for _ in range(X.shape[1 - axis])] if axis is not None else [float('-inf'), 0]
def _caxis(row, col):
if X[row, col] > res[col][0]:
res[col] = (X[row, col], row)
def _raxis(row, col):
if X[row, col] > res[row][0]:
res[row] = (X[row, col], col)
def _naxis(row, col):
if X[row, col] > res[0]:
res[0] = X[row, col]
res[1] = row * X.shape[0] + col
check = _caxis if axis == 0 else _raxis if axis == 1 else _naxis
[check(row, col) for row in range(X.shape[0])
for col in range(X.shape[1])]
if axis is None:
return res
elif axis == 0:
t = list(zip(*res))
return list(t[0]), np.mat(t[1])
else:
t = list(zip(*res))
return list(t[0]), np.mat(t[1]).T
else:
idxX = np.asmatrix(X).argmax(axis)
if axis is None:
eX = X[idxX // X.shape[1], idxX % X.shape[1]]
elif axis == 0:
eX = [X[idxX[0, idx], col]
for idx, col in zip(range(X.shape[1]), range(X.shape[1]))]
else:
eX = [X[row, idxX[idx, 0]]
for row, idx in zip(range(X.shape[0]), range(X.shape[0]))]
return eX, idxX
| 5,339,417
|
def show_new_high_score():
""" If a new high score is achieved, the score and winning player is displayed """
print("new_high_score")
myfont = pygame.font.SysFont("Comic Sans MS", 30)
winner = "left" if cfg.l_score > cfg.r_score else "right"
score = str(cfg.new_high_score)
print(score)
text = myfont.render(f"{winner} player, new highest score! {score} points!!!", 1, white)
cfg.window.blit(text, (win_width/9, win_height/2))
| 5,339,418
|
def plot_pdn(Px, Tx, pdi_nCO_v, pdi_mu_v, Tmin, Tmax, Pmin, Pmax, iso_names):
"""Generate plots for a size n"""
# extract the dimensions of the simulations
pdi_nCO_v = pdi_nCO_v.copy()
pdi_mu_v = pdi_mu_v.copy()
if len(pdi_nCO_v.shape) == 2: # add one more dimension if only two provided
pdi_nCO_v = pdi_nCO_v[np.newaxis, :]
pdi_mu_v = pdi_mu_v[np.newaxis, :]
# the tensor has 3 dimension: the isomer structure, temperature, and pressure
# set the dimensions to be consistent with the input
n_sim, nT, nP = pdi_nCO_v.shape
# reaction condition vector
Tv = np.linspace(Tmin, Tmax, nT)
Pv = np.logspace(Pmin, Pmax, nP) # Partial pressure of CO in bar
for xi in range(n_sim):
# Select an isomer
# Plot the heatmap, Tv is for the rows so it becomes yv, Pv is for the columns for xv
pdix_mu_v = pdi_mu_v[xi, :, :]
pdix_nCO_v = pdi_nCO_v[xi, :, :]
# for chemical potential
mu_fig_i = plot_heatmap(Pv, Tv, -4, 0, pdix_mu_v)
# for the number of CO
mco_fig_i = plot_heatmap(Pv, Tv, 0, 2 , pdix_nCO_v)
| 5,339,419
|
def test_verify_sub_value_NotOK_wrong_sub():
"""
arg=None
"""
_info = setup_conv()
conv = _info['conv']
# Need an IdToken and an AuthorizationRequest with a claims request
ar = {
'scope': 'openid',
'redirect_uri': 'https://example.com/cb',
'client_id': 'client',
'response_type': 'code',
'state': 'state',
'claims': {
'id_token': {
'sub': {'value': 'foo'}
}
}
}
_ar = AuthorizationRequest(**ar)
_url = _ar.request('https://guarded-cliffs-8635.herokuapp.com/auth')
conv.events.store(EV_REDIRECT_URL, _url)
conv.events.store(EV_PROTOCOL_REQUEST, _ar)
# Access token response with id_token with 'sub' claims
conv.events.store(EV_PROTOCOL_RESPONSE, ACCESS_TOKEN_RESPONSE_2)
chk = VerifySubValue()
kwargs = {}
chk._kwargs = kwargs
_ = chk._func(conv)
assert chk._status == ERROR
| 5,339,420
|
def posts():
"""
Function accessed by AJAX to handle a Series of Posts
"""
try:
series_id = request.args[0]
except:
raise HTTP(400)
try:
recent = request.args[1]
except:
recent = 5
table = s3db.cms_post
# List of Posts in this Series
query = (table.series_id == series_id)
posts = db(query).select(table.name,
table.body,
table.avatar,
table.created_by,
table.created_on,
limitby=(0, recent))
output = UL(_id="comments")
import hashlib
for post in posts:
author = B(T("Anonymous"))
if post.created_by:
utable = s3db.auth_user
ptable = s3db.pr_person
ltable = s3db.pr_person_user
query = (utable.id == post.created_by)
left = [ltable.on(ltable.user_id == utable.id),
ptable.on(ptable.pe_id == ltable.pe_id)]
row = db(query).select(utable.email,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
left=left, limitby=(0, 1)).first()
if row:
person = row.pr_person
user = row[utable._tablename]
username = s3_fullname(person)
email = user.email.strip().lower()
hash = hashlib.md5(email).hexdigest()
url = "http://www.gravatar.com/%s" % hash
author = B(A(username, _href=url, _target="top"))
header = H4(post.name)
if post.avatar:
avatar = s3base.s3_avatar_represent(post.created_by)
else:
avatar = ""
row = LI(DIV(avatar,
DIV(DIV(header,
_class="comment-header"),
DIV(XML(post.body),
_class="comment-body"),
_class="comment-text"),
DIV(DIV(post.created_on,
_class="comment-date"),
_class="fright"),
DIV(author,
_class="comment-footer"),
_class="comment-box"))
output.append(row)
return XML(output)
| 5,339,421
|
def resolve_game_object_y_collision(moving, static):
"""Resolves a collision by moving an object along the y axis.
Args:
moving (:obj:`engine.game_object.PhysicalGameObject`):
The object to move along the y axis.
static (:obj:`engine.game_object.PhysicalGameObject`):
The object to leave as-is.
Returns:
The change in the velocity of the object along the y axis.
"""
has_overlap = geometry.detect_overlap_1d(
moving.x, moving.width, static.x, static.width)
if has_overlap:
# Overlap detected along x-axis, resolve collision on y-axis
return _resolve_game_object_axis_collision(moving, static, 'y')
return 0
| 5,339,422
|
def update():
"""The program starts here"""
global current_level
# Initialization (only runs on start/restart)
# player = Player()
dino = DinoSprite()
group = pg.sprite.Group(dino)
sp = spritesheet("tileset48.png")
wall, wall_floor, floor = sp.images_at(
[(48, y, 16*3, 16*3)
for y in range(22*3, 22*3 + 16*3 * 3, 16*3)])
tile_wall = Tile(wall)
tile_wall_floor = Tile(wall_floor)
tile_floor = Tile(floor)
walls, goals, start = parse_level(levels[current_level])
dino.centerx = start[0]
dino.centery = start[1]
# Main update loop
j = 0
food_target = 10
start_time = time.time()
time_for_win = 30
while True:
# update_player(dino, delta())
group.update(delta())
# group.draw(pg.display.get_surface())
window = pg.display.get_surface()
for y, line in enumerate(levels[current_level].split('\n')):
for x, tile in enumerate(line):
if tile == '#':
img = tile_wall
else:
img = tile_floor
# draw_transformed(img.image, ((x * 48)/2, (y * 48)/2))
draw_transformed(img.image, (x * 48 - 48/2, y * 48 - 48/2))
dino.draw()
for wall in walls:
# window = pg.display.get_surface()
# pg.draw.rect(window, pg.Color(100, 100, 100), wall)
# draw_transformed(img, (self.centerx, self.centery), scale=(0.1,0.1))
player_vel, wall_vel, overlap = solve_rect_overlap(dino,
wall,
dino.velocity,
mass_b=0,
bounce=0.00)
dino.velocity = player_vel
for i, goal in enumerate(goals):
window = pg.display.get_surface()
pg.draw.rect(window, pg.Color(20, 100, 20), goal)
normal, depth = overlap_data(dino, goal)
if depth > 0:
j += 1
del goals[i]
# dino.width += 2
# dino.height += 2
dino.scale *= 1.1
random_food(goals)
# draw_text(f"Level: {current_level + 1}", (0, 0))
draw_text(f'Food eaten: {j}/{food_target}', (0,0))
draw_text(f"Time left: {start_time + time_for_win - time.time():.2f}", (0, 20))
if j >= food_target and (time.time()-start_time<time_for_win) :
#yield
draw_text("Du vann", (220, 200))
yield
pg.time.delay(1000)
break
elif (time.time()-start_time>time_for_win):
draw_text("Du Förlora", (220, 200))
yield
pg.time.delay(1000)
break
# Main loop ends here, put your code above this line
yield
| 5,339,423
|
def filter_all(fn, *l):
"""
Runs the filter function on all items in a list of lists
:param fn: Filter function
:param l: list of lists to filter
:return: list of filtered lists
>>> filter_all(lambda x: x != "", ['a'], ['b'], [""], ["d"])
[['a'], ['b'], [], ['d']]
"""
return [filter(fn, lst) for lst in chain(*l)]
| 5,339,424
|
async def reply_on_message(message: types.Message):
"""Replies on user message."""
payload, url = _get_server_payload_for_user(message)
async with aiohttp.ClientSession(auth=AUTH) as session:
async with session.post(url, data=payload, headers=HEADERS) as server_response:
await reply_using_server_response(server_response, message)
| 5,339,425
|
def get_test_runners(args):
""" Get Test Runners """
res = list()
qitest_jsons = args.qitest_jsons or list()
# first case: qitest.json in current working directory
test_runner = get_test_runner(args)
if test_runner:
res.append(test_runner)
# second case: qitest.json specified with --qitest-json
for qitest_json in qitest_jsons:
test_runner = get_test_runner(args, qitest_json=qitest_json)
res.append(test_runner)
# third case: parsing build projects
build_projects_runners = parse_build_projects(args)
# avoid appending a test_runner guessed from a build project
# when res already contains a test runner computed from a
# --qitest-json argument
known_cwds = [x.cwd for x in res]
for test_runner in build_projects_runners:
if test_runner.cwd not in known_cwds:
res.append(test_runner)
if args.coverage and not build_projects_runners:
raise Exception("""--coverage can only be used from a qibuild CMake project\n""")
elif args.coverage:
return build_projects_runners
if not res:
raise EmptyTestListException("Nothing found to test")
return res
| 5,339,426
|
def test_decorator_return_val():
""" Tests tha the decorator returns value. """
returned = on_completed()
assert returned == "It works!"
| 5,339,427
|
def test_build_package_name(tmp_path, monkeypatch):
"""The zip file name comes from the metadata."""
to_be_zipped_dir = tmp_path / BUILD_DIRNAME
to_be_zipped_dir.mkdir()
# the metadata
metadata_data = {'name': 'name-from-metadata'}
metadata_file = tmp_path / 'metadata.yaml'
with metadata_file.open('wt', encoding='ascii') as fh:
yaml.dump(metadata_data, fh)
# zip it
monkeypatch.chdir(tmp_path) # so the zip file is left in the temp dir
builder = Builder({
'from': pathlib.Path(str(tmp_path)), # bad support for tmp_path's pathlib2 in Py3.5
'entrypoint': 'whatever',
'requirement': [],
})
zipname = builder.handle_package()
assert zipname == "name-from-metadata.charm"
| 5,339,428
|
def save_image(image, path):
"""Save an image as a png file."""
min_val = image.min()
if min_val < 0:
image = image + min_val
scipy.misc.imsave(path, image)
print('[#] Image saved {}.'.format(path))
| 5,339,429
|
def get_preselected_facets(params, all_categories):
""" Resolve all facets that have been determined by the GET parameters.
Args:
params: Contains the categories/facets
all_categories:
Returns:
dict: Contains all sorted facets
"""
ret_arr = {}
iso_cat = params.get("isoCategories", "")
custom_cat = params.get("customCategories", "")
inspire_cat = params.get("inspireThemes", "")
org_cat = params.get("registratingDepartments", "")
# resolve ids by iterating all_categories
all_iso_cat = all_categories[0]
all_inspire_cat = all_categories[1]
all_custom_cat = all_categories[2]
all_org_cat = all_categories[3]
iso_preselect = __resolve_single_facet(iso_cat, all_iso_cat)
inspire_preselect = __resolve_single_facet(inspire_cat, all_inspire_cat)
custom_preselect = __resolve_single_facet(custom_cat, all_custom_cat)
org_preselect = __resolve_single_facet(org_cat, all_org_cat)
if len(iso_preselect) > 0:
ret_arr["ISO 19115"] = iso_preselect
if len(inspire_preselect) > 0:
ret_arr["INSPIRE"] = inspire_preselect
if len(custom_preselect) > 0:
ret_arr["Custom"] = custom_preselect
if len(org_preselect) > 0:
ret_arr["Organizations"] = org_preselect
return ret_arr
| 5,339,430
|
def has_joined(*args: list, **kwargs) -> str:
"""
Validates the user's joining the channel after being required to join.
:param args: *[0] -> first name
:param kwargs:
:return: Generated validation message
"""
first_name = args[0]
text = f"{_star_struck}{_smiling_face_with_heart} بسیار خب " \
f"<b>{first_name}</b> " \
f", حالا تمام دسترسی ها رو داری{_party_popper}{_confetti_ball}\n\n" \
f"تبریک از طرف @chromusic_fa {_red_heart}\n" \
f"با خیال راحت هر فایل صوتی رو سرچ کن {_face_blowing_a_kiss}"
return text
| 5,339,431
|
def depth_residual_regresssion_subnet(x, flg, regular, subnet_num):
"""Build a U-Net architecture"""
""" Args: x is the input, 4-D tensor (BxHxWxC)
flg represent weather add the BN
regular represent the regularizer number
Return: output is 4-D Tensor (BxHxWxC)
"""
pref = 'depth_regression_subnet_' + str(subnet_num) + '_'
# whether to train flag
train_ae = flg
# define initializer for the network
keys = ['conv', 'upsample']
keys_avoid = ['OptimizeLoss']
inits = []
init_net = None
if init_net != None:
for name in init_net.get_variable_names():
# select certain variables
flag_init = False
for key in keys:
if key in name:
flag_init = True
for key in keys_avoid:
if key in name:
flag_init = False
if flag_init:
name_f = name.replace('/', '_')
num = str(init_net.get_variable_value(name).tolist())
# self define the initializer function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops.init_ops import Initializer
exec(
"class " + name_f + "(Initializer):\n def __init__(self,dtype=tf.float32): self.dtype=dtype \n def __call__(self,shape,dtype=None,partition_info=None): return tf.cast(np.array(" + num + "),dtype=self.dtype)\n def get_config(self):return {\"dtype\": self.dtype.name}")
inits.append(name_f)
# autoencoder
n_filters = [
128, 96,
64, 32,
16, 1,
]
filter_sizes = [
3, 3,
3, 3,
3, 3,
]
pool_sizes = [ \
1, 1,
1, 1,
1, 1,
]
pool_strides = [
1, 1,
1, 1,
1, 1,
]
skips = [ \
False, False,
False, False,
False, False,
]
# change space
ae_inputs = tf.identity(x, name='ae_inputs')
# prepare input
current_input = tf.identity(ae_inputs, name="input")
####################################################################################################################
# convolutional layers: depth regression
feature = []
for i in range(0, len(n_filters)):
name = pref + "conv_" + str(i)
# define the initializer
if name + '_bias' in inits:
bias_init = eval(name + '_bias()')
else:
bias_init = tf.zeros_initializer()
if name + '_kernel' in inits:
kernel_init = eval(name + '_kernel()')
else:
kernel_init = None
if i == (len(n_filters) - 1):
activation = None
else:
activation = relu
# convolution
current_input = tf.layers.conv2d(
inputs=current_input,
filters=n_filters[i],
kernel_size=[filter_sizes[i], filter_sizes[i]],
padding="same",
activation=activation,
trainable=train_ae,
kernel_initializer=kernel_init,
bias_initializer=bias_init,
name=name,
)
if pool_sizes[i] == 1 and pool_strides[i] == 1:
feature.append(current_input)
else:
feature.append(
tf.layers.max_pooling2d( \
inputs=current_input,
pool_size=[pool_sizes[i], pool_sizes[i]],
strides=pool_strides[i],
name=pref + "pool_" + str(i)
)
)
current_input = feature[-1]
depth_coarse = tf.identity(feature[-1], name='depth_coarse_output')
return depth_coarse
| 5,339,432
|
def dist2(x, c):
"""
Calculates squared distance between two sets of points.
Parameters
----------
x: numpy.ndarray
Data of shape `(ndata, dimx)`
c: numpy.ndarray
Centers of shape `(ncenters, dimc)`
Returns
-------
n2: numpy.ndarray
Squared distances between each pair of data from x and c, of shape
`(ndata, ncenters)`
"""
assert x.shape[1] == c.shape[1], \
'Data dimension does not match dimension of centers'
x = np.expand_dims(x, axis=0) # new shape will be `(1, ndata, dimx)`
c = np.expand_dims(c, axis=1) # new shape will be `(ncenters, 1, dimc)`
# We will now use broadcasting to easily calculate pairwise distances
n2 = np.sum((x - c) ** 2, axis=-1)
return n2
| 5,339,433
|
def process_name(i: int, of: int) -> str:
"""Return e.g. '| | 2 |': an n-track name with track `i` (here i=2) marked.
This makes it easy to follow each process's log messages, because you just
go down the line until you encounter the same number again.
Example: The interleaved log of four processes that each simulate a car
visiting a charging station. The processes have been named with
`process_name()`, and their log messages start with their `self.name`.
(Car #2 does not turn up in this snippet.)
| | | 3 arriving at 6
| 1 | | starting to charge at 7
0 | | | starting to charge at 7
| 1 | | leaving the bcs at 9
"""
lines = ["|"] * of
lines[i] = str(i)
return " ".join(lines)
| 5,339,434
|
def cards_db(db):
"""
CardsDB object that's empty.
"""
db.delete_all()
return db
| 5,339,435
|
def _geo_connected(geo, rxn):
""" Assess if geometry is connected. Right now only works for
minima
"""
# Determine connectivity (only for minima)
if rxn is not None:
gra = automol.geom.graph(geo)
conns = automol.graph.connected_components(gra)
lconns = len(conns)
else:
lconns = 1
# Check connectivity
if lconns == 1:
connected = True
else:
ioprinter.bad_conformer('disconnected')
connected = False
return connected
| 5,339,436
|
def scale_to_range(image, dest_range=(0,1)):
""" Scale an image to the given range.
"""
return np.interp(image, xp=(image.min(), image.max()), fp=dest_range)
| 5,339,437
|
def files():
"""Hypothesis strategy for generating objects pyswagger can use as file
handles to populate `file` format parameters.
Generated values take the format: `dict('data': <file object>)`"""
return file_objects().map(lambda x: {"data": x})
| 5,339,438
|
def get_histograms(
query: Optional[str] = None, delta: Optional[bool] = None
) -> Generator[dict, dict, list[Histogram]]:
"""Get Chrome histograms.
Parameters
----------
query: Optional[str]
Requested substring in name. Only histograms which have query as a
substring in their name are extracted. An empty or absent query returns
all histograms.
delta: Optional[bool]
If true, retrieve delta since last call.
Returns
-------
histograms: list[Histogram]
Histograms.
**Experimental**
"""
response = yield {
"method": "Browser.getHistograms",
"params": filter_none({"query": query, "delta": delta}),
}
return [Histogram.from_json(h) for h in response["histograms"]]
| 5,339,439
|
def energy_decay_curve_chu_lundeby(
data,
sampling_rate,
freq='broadband',
noise_level='auto',
is_energy=False,
time_shift=True,
channel_independent=False,
normalize=True,
plot=False):
""" This function combines Chu's and Lundeby's methods:
The estimated noise level is subtracted before backward integration,
the impulse response is truncated at the intersection time,
and the correction for the truncation is applied [1, 2, 3]_
Parameters
----------
data : ndarray, double
The room impulse response with dimension [..., n_samples]
sampling_rate: integer
The sampling rate of the room impulse response.
freq: integer OR string
The frequency band. If set to 'broadband',
the time window of the Lundeby-algorithm will not be set in dependence
of frequency.
noise_level: ndarray, double OR string
If not specified, the noise level is calculated based on the last 10
percent of the RIR. Otherwise specify manually for each channel
as array.
is_energy: boolean
Defines, if the data is already squared.
time_shift : boolean
Defines, if the silence at beginning of the RIR should be removed.
channel_independent : boolean
Defines, if the time shift and normalizsation is done
channel-independently or not.
normalize : boolean
Defines, if the energy decay curve should be normalized in the end
or not.
plot: Boolean
Specifies, whether the results should be visualized or not.
Returns
-------
energy_decay_curve: ndarray, double
Returns the noise handeled edc.
References
----------
.. [1] Lundeby, Virgran, Bietz and Vorlaender - Uncertainties of
Measurements in Room Acoustics - ACUSTICA Vol. 81 (1995)
.. [2] W. T. Chu. “Comparison of reverberation measurements using
Schroeder’s impulse method and decay-curve averaging method”. In:
Journal of the Acoustical Society of America 63.5 (1978),
pp. 1444–1450.
.. [3] M. Guski, “Influences of external error sources on measurements of
room acoustic parameters,” 2015.
"""
energy_data, n_channels, data_shape = preprocess_rir(
data,
is_energy=is_energy,
time_shift=time_shift,
channel_independent=channel_independent)
n_samples = energy_data.shape[-1]
subtraction = subtract_noise_from_squared_rir(
energy_data,
noise_level=noise_level)
intersection_time, late_reverberation_time, noise_level = \
intersection_time_lundeby(
energy_data,
sampling_rate=sampling_rate,
freq=freq,
initial_noise_power=noise_level,
is_energy=True,
time_shift=False,
channel_independent=False,
plot=False)
time_vector = smooth_rir(energy_data, sampling_rate)[2]
energy_decay_curve = np.zeros([n_channels, n_samples])
for idx_channel in range(0, n_channels):
intersection_time_idx = np.argmin(np.abs(
time_vector - intersection_time[idx_channel]))
if noise_level == 'auto':
p_square_at_intersection = estimate_noise_energy(
energy_data[idx_channel], is_energy=True)
else:
p_square_at_intersection = noise_level[idx_channel]
# calculate correction term according to DIN EN ISO 3382
correction = (p_square_at_intersection
* late_reverberation_time[idx_channel]
* (1 / (6*np.log(10)))
* sampling_rate)
energy_decay_curve[idx_channel, :intersection_time_idx] = \
ra.schroeder_integration(
subtraction[idx_channel, :intersection_time_idx],
is_energy=True)
energy_decay_curve[idx_channel] += correction
if normalize:
# Normalize the EDC...
if not channel_independent:
# ...by the first element of each channel.
energy_decay_curve = (energy_decay_curve.T /
energy_decay_curve[..., 0]).T
else:
# ...by the maximum first element of each channel.
max_start_value = np.amax(energy_decay_curve[..., 0])
energy_decay_curve /= max_start_value
energy_decay_curve[..., intersection_time_idx:] = np.nan
if plot:
plt.figure(figsize=(15, 3))
plt.subplot(131)
plt.plot(time_vector, 10*np.log10(energy_data.T))
plt.xlabel('Time [s]')
plt.ylabel('Squared IR [dB]')
plt.subplot(132)
plt.plot(time_vector, 10*np.log10(subtraction.T))
plt.xlabel('Time [s]')
plt.ylabel('Noise subtracted IR [dB]')
plt.subplot(133)
plt.plot(time_vector[0:energy_decay_curve.shape[-1]], 10*np.log10(
energy_decay_curve.T))
plt.xlabel('Time [s]')
plt.ylabel('Tr. EDC with corr. & subt. [dB]')
plt.tight_layout()
# Recover original data shape:
energy_decay_curve = np.reshape(energy_decay_curve, data_shape)
energy_decay_curve = np.squeeze(energy_decay_curve)
return energy_decay_curve
| 5,339,440
|
def sinkhorn(
p, q, metric="euclidean",
):
"""
Returns the earth mover's distance between two point clouds
Parameters
----------
cloud1 : 2-D array
First point cloud
cloud2 : 2-D array
Second point cloud
Returns
-------
distance : float
The distance between the two point clouds
"""
p_weights = np.ones(len(p)) / len(p)
q_weights = np.ones(len(q)) / len(q)
pairwise_dist = np.ascontiguousarray(
pairwise_distances(p, Y=q, metric=metric, n_jobs=-1)
)
result = pot.sinkhorn2(
p_weights,
q_weights,
pairwise_dist,
reg=0.05,
numItermax=100,
return_matrix=False,
)
return np.sqrt(result)
| 5,339,441
|
def generate_s3_events(cluster_name, cluster_dict, config):
"""Add the S3 Events module to the Terraform cluster dict.
Args:
cluster_name (str): The name of the currently generating cluster
cluster_dict (defaultdict): The dict containing all Terraform config for a given cluster.
config (dict): The loaded config from the 'conf/' directory
Returns:
bool: Result of applying the s3_events module
"""
s3_event_buckets = config['clusters'][cluster_name]['modules']['s3_events']
generate_s3_events_by_bucket(cluster_name, cluster_dict, config, s3_event_buckets)
return True
| 5,339,442
|
def _check_predictor_matrix(
predictor_matrix, allow_nan=False, min_num_dimensions=3,
max_num_dimensions=5):
"""Checks predictor matrix for errors.
:param predictor_matrix: numpy array of predictor images. Dimensions may be
E x M x N, E x M x N x C, or E x M x N x T x C.
:param allow_nan: Boolean flag. If allow_nan = False and this method finds
a NaN, it will error out.
:param min_num_dimensions: Minimum number of dimensions expected in
`predictor_matrix`.
:param max_num_dimensions: Max number of dimensions expected in
`predictor_matrix`.
"""
if allow_nan:
error_checking.assert_is_real_numpy_array(predictor_matrix)
else:
error_checking.assert_is_numpy_array_without_nan(predictor_matrix)
num_dimensions = len(predictor_matrix.shape)
error_checking.assert_is_geq(num_dimensions, min_num_dimensions)
error_checking.assert_is_leq(num_dimensions, max_num_dimensions)
| 5,339,443
|
def cluster_seg(bt, seg_list, radius):
"""
Fetch segments which align themself for a given tolerance.
"""
cluster, seen_ix = [], set()
for i, seg in enumerate(seg_list):
if i not in seen_ix:
sim_seg_ix = list(bt.query_radius([seg], radius)[0])
seen_ix |= set(sim_seg_ix)
cluster.append(sim_seg_ix)
return _find_connected_components(cluster)
| 5,339,444
|
def stage_grid(
Dstg,
A,
dx_c,
tte,
min_Rins=None,
recamber=None,
stag=None,
resolution=1.
):
"""Generate an H-mesh for a turbine stage."""
# Change scaling factor on grid points
# Distribute the spacings between stator and rotor
dx_c = np.array([[dx_c[0], dx_c[1] / 2.0], [dx_c[1] / 2.0, dx_c[2]]])
# Streamwise grids for stator and rotor
x_c, ilte = streamwise_grid(dx_c, resolution=resolution)
x = [x_ci * Dstg.cx[0] for x_ci in x_c]
# Generate radial grid
Dr = np.array([Dstg.Dr[:2], Dstg.Dr[1:]])
r = merid_grid(x_c, Dstg.rm, Dr, resolution=resolution)
# Evaluate radial blade angles
r1 = r[0][ilte[0][0], :]
spf = (r1 - r1.min()) / r1.ptp()
chi = np.stack((Dstg.free_vortex_vane(spf), Dstg.free_vortex_blade(spf)))
# If recambering, then tweak the metal angles
if not recamber is None:
dev = np.reshape(recamber, (2, 2, 1))
dev[1] *= -1 # Reverse direction of rotor angles
chi += dev
# Get sections (normalised by axial chord for now)
sect = [
geometry.radially_interpolate_section(
spf, chii, spf, tte, Ai, stag=stagi
)
for chii, Ai, stagi in zip(chi, A, stag)
]
# If we have asked for a minimum inscribed circle, confirm that the
# constraint is not violated
if min_Rins:
for i, row_sect in enumerate(sect):
for rad_sect in row_sect:
current_radius = geometry.largest_inscribed_circle(rad_sect.T)
if current_radius < min_Rins:
raise geometry.GeometryConstraintError(
(
"Row %d, Thickness is too small for the constraint "
"inscribed circle: %.3f < %.3f"
% (i, current_radius, min_Rins)
)
)
# Now we can do b2b grids
rt = [b2b_grid(*args, resolution=resolution) for args in zip(x, r, Dstg.s, Dstg.cx, sect)]
# Offset the rotor so it is downstream of stator
x[1] = x[1] + x[0][-1] - x[1][0]
# fig, ax = plt.subplots()
# ax.plot(x[0],rt[0][:,0,(0,-1)])
# ax.plot(x[1],rt[1][:,0,(0,-1)])
# ax.axis('equal')
# plt.savefig('sect.pdf')
# quit()
return x, r, rt, ilte
| 5,339,445
|
def get_best_trial(trial_list, metric):
"""Retrieve the best trial."""
return max(trial_list, key=lambda trial: trial.last_result.get(metric, 0))
| 5,339,446
|
def connect_signals(**kwargs):
"""
Listens to the ``initializing`` signal and tells other modules to
connect their signals. This is done so as to guarantee that django
is loaded first.
"""
from reviewboard.notifications import email, webhooks
email.connect_signals()
webhooks.connect_signals()
| 5,339,447
|
def director_score():
"""
use .agg() operation to pass .count() and .sum()
counts the number of movies for each director.
sum the IMDB score of the movies from each director.
renames columns, sort the values in descending order.
calculates average score and create a new column.
"""
gb_director_score = df.groupby('Director').agg(
{
'Title': 'count',
'IMDB Score': 'sum'}).rename(
columns={
'Title': 'Number of movies',
'IMDB Score': 'Total IMDB Score'}).sort_values(
by=[
'Number of movies',
'Total IMDB Score'],
ascending=False).reset_index()
gb_director_score['Average Score'] = (
gb_director_score["Total IMDB Score"] /
gb_director_score["Number of movies"]).round(2)
print(f"""{Fore.YELLOW + Style.BRIGHT}
The average score of the most prolific directors:
{Style.RESET_ALL}{gb_director_score.head(10).to_string(index=False)}\n""")
welcome()
| 5,339,448
|
def plot_confusion_matrix(cm, xticks, yticks, normalize=False, ignore_main_diagonal=False,
cmap=plt.cm.binary):
"""
plots a confusion matrix using matplotlib
Parameters
----------
cm : (tensor or numpy array)
confusion matrix
e.g. from tf.math.confusion_matrix(y_test, y_pred)
xticks : (list)
x tick labels
yticks : (list)
x tick labels
normalize : (bool), optional
scales cm to 1. The default is False.
ignore_main_diagonal : (bool), optional
sets the main diagonal to zero. The default is False.
cmap : matplotlib colormap, optional
Returns
-------
None.
"""
cm = np.array(cm)
if normalize: # normalize to 1.0
cm = cm / cm.max()
if ignore_main_diagonal: # set main diagonal to zero
for i in range(len(cm)):
cm[i, i] = 0
plt.imshow(cm, cmap=cmap)
plt.xticks(ticks=range(len(xticks)), labels=xticks, rotation=90)
plt.yticks(ticks=range(len(yticks)), labels=yticks)
plt.xlabel("predicted class")
plt.ylabel("actual class")
# put numbers inside the heatmap
thresh = cm.max() / 2.
for i, row in enumerate(cm):
for j, val in enumerate(row):
plt.text(j, i, format(int(val)),
horizontalalignment="center",
color = "white" if val > thresh else "black")
plt.colorbar()
| 5,339,449
|
def processMultiplierSegment(segment, source_dir_band, wind_prj, bear_prj, dst_band):
"""
Calculates local wind multiplier data by image segments
and writes to corresponding segment of output file
:param segment: image segment specified by [x_offset, y_offset,
width, height, segment_count, total_segments]
:param source_dir_band: 8 band array representing wind mulitpliers
data in 8 directions
:param wind_prj: band representing gust data
:param bear_prj: band representing bear data
:param dst_band: band of output file
"""
band_numbers_for_indices_in_geotiff = [2, 3, 1, 6, 5, 7, 8, 4, 2]
indices = {
0: {'dir': 'n', 'min': 0., 'max': 22.5},
1: {'dir': 'ne', 'min': 22.5, 'max': 67.5},
2: {'dir': 'e', 'min': 67.5, 'max': 112.5},
3: {'dir': 'se', 'min': 112.5, 'max': 157.5},
4: {'dir': 's', 'min': 157.5, 'max': 202.5},
5: {'dir': 'sw', 'min': 202.5, 'max': 247.5},
6: {'dir': 'w', 'min': 247.5, 'max': 292.5},
7: {'dir': 'nw', 'min': 292.5, 'max': 337.5},
8: {'dir': 'n', 'min': 337.5, 'max': 360.}
}
[x_offset, y_offset, width, height, segment_id, total_segments] = segment
log.debug("Processing segment {0}/{1}: {2} {3} {4} {5}"
.format(segment_id, total_segments, x_offset, y_offset, width, height))
with threadLock_gust:
wind_data = wind_prj.ReadAsArray(x_offset, y_offset, width, height)
with threadLock_bear:
bear_data = bear_prj.ReadAsArray(x_offset, y_offset, width, height)
m4_all = loadAllBandArrayData(source_dir_band, segment_info=segment)
local = np.zeros([height, width], dtype='float32')
for i in list(indices.keys()):
m4 = m4_all[band_numbers_for_indices_in_geotiff[i] - 1]
idx = np.where((bear_data >= indices[i]['min']) &
(bear_data < indices[i]['max']))
local[idx] = wind_data[idx] * m4[idx]
with threadLock_out:
dst_band.WriteArray(local, x_offset, y_offset)
print('\rProgress: {0:.2f}'.format((segment_id * 100) / total_segments), "%", end="")
if segment_id % int(math.ceil(total_segments / 20)) == 0:
if log.getLogger(__name__).getEffectiveLevel() == log.DEBUG:
print("")
log.debug('Progress: {0} %'.format(int((segment_id * 100) / total_segments)))
| 5,339,450
|
def make_registry_metaclass(registry_store):
"""Return a new Registry metaclass."""
if not isinstance(registry_store, dict):
raise TypeError("'registry_store' argument must be a dict")
class Registry(type):
"""A metaclass that stores a reference to all registered classes."""
def __new__(mcs, class_name, base_classes, class_dict):
"""Create and returns a new instance of Registry.
The registry is a class named 'class_name' derived from 'base_classes'
that defines 'class_dict' as additional attributes.
The returned class is added to 'registry_store' using
class_dict["REGISTERED_NAME"] as the name, or 'class_name'
if the "REGISTERED_NAME" attribute isn't defined. If the
sentinel value 'LEAVE_UNREGISTERED' is specified as the
name, then the returned class isn't added to
'registry_store'.
The returned class will have the "REGISTERED_NAME" attribute
defined either as its associated key in 'registry_store' or
the 'LEAVE_UNREGISTERED' sentinel value.
"""
registered_name = class_dict.setdefault("REGISTERED_NAME", class_name)
cls = type.__new__(mcs, class_name, base_classes, class_dict)
if registered_name is not LEAVE_UNREGISTERED:
if registered_name in registry_store:
raise ValueError("The name %s is already registered; a different value for the"
" 'REGISTERED_NAME' attribute must be chosen" %
(registered_name))
registry_store[registered_name] = cls
return cls
return Registry
| 5,339,451
|
def test_chairman_info(
session: Session, id: int, true_title: str, true_firstname: str, true_lastname: str
) -> None:
"""Test that chairman info is correctly parsed."""
[element] = session.xml_transcript.xpath(f"(. //*[local-name() = 'Toimenpide'])[{id}]")
title, firstname, lastname = session.get_chairman_info(element)
assert title == true_title
assert firstname == true_firstname
assert lastname == true_lastname
| 5,339,452
|
def bind11(reactant, max_helix = True):
"""
Returns a list of reaction pathways which can be produced by 1-1 binding
reactions of the argument complex. The 1-1 binding reaction is the
hybridization of two complementary unpaired domains within a single complex
to produce a single unpseudoknotted product complex.
"""
reactions = set()
structure = list(reactant.pair_table)
for (strand_index, strand) in enumerate(structure):
for (domain_index, domain) in enumerate(strand):
# The displacing domain must be free
if structure[strand_index][domain_index] is not None :
continue
start_loc = (strand_index, domain_index)
# search (one direction) around the loop for an open domain that can be bound.
results = find_on_loop(reactant, start_loc, filter_bind11)
assert len(results) == len(find_on_loop(reactant, start_loc, filter_bind11, direction = -1))
for e, (invader, before, target, after) in enumerate(results):
if max_helix:
invader, before, target, after = zipper(
reactant, invader[0], before, target[0], after, filter_bind11)
results[e] = list(map(Loop, [invader, before, target, after]))
# build products
for (loc1s, before, loc2s, after) in results:
# Should be reversed loc2s right?
assert [x == ~y for x,y in zip(loc1s.domains, loc2s.domains)]
product = do_bind11(reactant, loc1s.domain_locs, loc2s.domain_locs)
reaction = PepperReaction([reactant], [product], 'bind11')
if reaction.rate_constant[0] is None:
reaction.rate_constant = (unimolecular_binding_rate(loc1s.dlength, before, after), '/s')
reactions.add(reaction)
return sorted(reactions)
| 5,339,453
|
def test_three_code_wars():
"""Test function that emulates test.assert_equals(nth_even(3), 4)."""
from get_nth_even_number import nth_even
assert nth_even(3) == 4
| 5,339,454
|
def benchmark():
"""
Some basic performance benchmarks. You are encouraged to implement
your own 'next' function!
"""
sg = SudokuGames(end=24)
iter_, recu_ = Sudoku.solve_iterative, Sudoku.solve_recursive
# tuple of pairs that define the solver function. First element picks the
# flavour (iterative or recursive), second element picks the 'next'
# function (see documentation for Sudoku.most_constrained_zero)
solve_funs = (
(iter_, Sudoku.most_constrained_zero),
(iter_, Sudoku.most_constrained_zero_alt1),
(iter_, Sudoku.most_constrained_zero_alt2)
)
seed(0)
table = sample(sg.sudoku17.items(), 1)[0][1]
# table = SudokuGames.samples[2]
print(f'\nSudoku to solve:\n{table}')
sud = None
num_runs = 20
for meth, next_fun in solve_funs:
tt = 0
for n in range(num_runs):
sud = Sudoku(table)
meth(sud, next_fun)
tt += sud.solve_time
print(f'\nSolving {num_runs} times with {next_fun.__name__}() '
f'({meth.__name__.split("_")[1]}): \n'
f'{tt/num_runs*1000: 0.1f} ms/sudoku\n{sud.solution}')
| 5,339,455
|
def main(url, owner, repo, outdir=None, suffix=None):
"""Route request."""
token, api_url = parse_url(url)
dl_assets = get_latest_assets(api_url, owner, repo, token, suffix)
if outdir is not None and not os.path.isdir(outdir):
os.makedirs(outdir)
for pkg_name, pkg_url in dl_assets:
local_path = pkg_name if outdir is None else os.path.join(outdir, pkg_name)
handle_redirects(pkg_url, local_path, token)
| 5,339,456
|
def main():
"""
Creates a knowledge resource from triplets file: first step,
receives the entire triplets file and saves the following files:
'_path_to_id.db', '_id_to_path.db', '_term_to_id.db', '_id_to_term.db'
"""
# Get the arguments
args = docopt("""Creates a knowledge resource from triplets file: first step,
receives the entire triplets file and saves the following files:
'_path_to_id.db', '_id_to_path.db', '_term_to_id.db', '_id_to_term.db'
Usage:
create_resource_from_corpus_1.py <frequent_paths_file> <terms_file> <resource_prefix>
<frequent_paths_file> = the file containing the frequent paths, that should be included in the resource.
Similarly to Snow et al. (2004), we considered only paths that occurred with 5 different term-pairs in the
corpus.
<terms_file> = the file containing all the terms (=vocabulary).
<resource_prefix> = the file names' prefix for the resource files
""")
frequent_paths_file = args['<frequent_paths_file>']
terms_file = args['<terms_file>']
resource_prefix = args['<resource_prefix>']
# Load the frequent paths
print('Saving the paths...')
with codecs.open(frequent_paths_file, 'r', 'utf-8') as f_in:
frequent_paths = set([line.strip() for line in f_in])
# Save the paths
path_to_id = {path: i for i, path in enumerate(list(frequent_paths))}
path_to_id_db = bsddb3.btopen(resource_prefix + '_path_to_id.db', 'c')
id_to_path_db = bsddb3.btopen(resource_prefix + '_id_to_path.db', 'c')
for path, id in path_to_id.items():
id, path = str(id), str(path)
path_to_id_db[path] = id
id_to_path_db[id] = path
path_to_id_db.sync()
id_to_path_db.sync()
# frequent_paths = None # TODO delete?
# Load the terms
print('Saving the terms...')
with codecs.open(terms_file, 'r', 'utf-8') as f_in:
terms = [line.strip() for line in f_in]
# Save the terms
term_to_id = {term: i for i, term in enumerate(terms)}
term_to_id_db = bsddb3.btopen(resource_prefix + '_term_to_id.db', 'c')
id_to_term_db = bsddb3.btopen(resource_prefix + '_id_to_term.db', 'c')
for term, id_ in term_to_id.items():
id_, term = str(id_), str(term)
term_to_id_db[term] = id_
id_to_term_db[id_] = term
term_to_id_db.sync()
id_to_term_db.sync()
| 5,339,457
|
def get_all_movie_props(movies_set: pd.DataFrame, flag: int, file_path: str):
"""
Function that returns the data frame of all movie properties from dbpedia
:param movies_set: data set of movies with columns movie id and movie dbpedia uri
:param flag: 1 to generate the data frame from scratch and 0 to read from file
:param file_path: file path to read if flag is not 0
:return: the data frame of all movie properties from dbpedia
"""
cols = ['movie_id', 'prop', 'obj']
if flag == 1:
all_movie_props = obtain_all_movie_props(movies_set, cols)
all_movie_props.to_csv(file_path, mode='w', header=False, index=False)
else:
all_movie_props = pd.read_csv(file_path, header=None)
all_movie_props.columns = cols
all_movie_props = all_movie_props.set_index(cols[0])
return all_movie_props
| 5,339,458
|
def convert_to_clocks(duration, f_sampling=200e6, rounding_period=None):
"""
convert a duration in seconds to an integer number of clocks
f_sampling: 200e6 is the CBox sampling frequency
"""
if rounding_period is not None:
duration = max(duration//rounding_period, 1)*rounding_period
clock_duration = int(duration*f_sampling)
return clock_duration
| 5,339,459
|
def _add_supplemental_plot_info(infos_copy, item, common_data):
"""Add supplemental info to plot description"""
suppl_info = []
if item.get('dpSupplementalMessage'):
# Short information about future release of tv show season or other
suppl_info.append(item['dpSupplementalMessage'])
# The 'sequiturEvidence' dict can be of type 'hook' or 'watched'
if (item.get('sequiturEvidence') and
item['sequiturEvidence'].get('type') == 'hook' and
item['sequiturEvidence'].get('value')):
# Short information about the actors career/awards and similarities/connections with others films or tv shows
suppl_info.append(item['sequiturEvidence']['value']['text'])
suppl_text = '[CR][CR]'.join(suppl_info)
plot = infos_copy.get('Plot', '')
plotoutline = infos_copy.get('PlotOutline', '')
if suppl_text:
suppl_text = _colorize_text(common_data['supplemental_info_color'], suppl_text)
if plot:
plot += '[CR][CR]'
if plotoutline:
plotoutline += '[CR][CR]'
infos_copy.update({'Plot': plot + suppl_text})
infos_copy.update({'PlotOutline': plotoutline + suppl_text})
| 5,339,460
|
def account_approved(f):
"""Checks whether user account has been approved, raises a 401 error
otherwise .
"""
def decorator(*args, **kwargs):
if not current_user:
abort(401, {'message': 'Invalid user account.'})
elif not current_user.is_approved:
abort(401, {'message': 'Account has not yet been approved.'})
return f(*args, **kwargs)
return decorator
| 5,339,461
|
def multiply(x):
"""Multiply operator.
>>> multiply(2)(1)
2
"""
def multiply(y):
return y * x
return multiply
| 5,339,462
|
def tally_transactions(address, txs):
"""Calculate the net value of all deposits, withdrawals and fees
:param address: Address of the account
:param txs: Transactions JSON for the address
:returns: The total net value of all deposits, withdrawals and fees
"""
send_total = 0
for item in txs['result']:
if item['success']:
# Check for deposits/withdrawals
if "MsgSend" in item['messageTypes']:
if item['messages'][0]['content']['toAddress'] != address:
# Remove withdrawals
send_total -= translate_basecro_to_cro(Decimal(item['messages'][0]['content']['amount'][0]['amount']))
else:
# Add deposits
send_total += translate_basecro_to_cro(Decimal(item['messages'][0]['content']['amount'][0]['amount']))
# Remove fees
send_total -= translate_basecro_to_cro(Decimal(item['fee'][0]['amount']))
return send_total
| 5,339,463
|
def test_fft3d():
"""Test 3d fft core function."""
arr3d_f = fourier.fft3d(arr3d, mode="forward")
assert arr3d_f.shape == (n_pixels, n_pixels, n_pixels)
arr3d_r = fourier.fft3d(arr3d_f, mode="inverse")
assert arr3d_r.shape == (n_pixels, n_pixels, n_pixels)
| 5,339,464
|
def expose(policy):
"""
Annotate a method to permit access to contexts matching an authorization
policy. The annotation may be specified multiple times. Methods lacking any
authorization policy are not accessible.
::
@mitogen.service.expose(policy=mitogen.service.AllowParents())
def unsafe_operation(self):
...
:param mitogen.service.Policy policy:
The policy to require.
"""
def wrapper(func):
func.mitogen_service__policies = [policy] + getattr(
func, "mitogen_service__policies", []
)
return func
return wrapper
| 5,339,465
|
def _bytes_feature(value):
"""Creates a bytes feature from the passed value.
Args:
value: An numpy array.
Returns:
A TensorFlow feature.
"""
return tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[value.astype(np.float32).tostring()]))
| 5,339,466
|
def get_cell_phase(
adata: anndata.AnnData,
layer: str = None,
gene_list: Union[OrderedDict, None] = None,
refine: bool = True,
threshold: Union[float, None] = 0.3,
) -> pd.DataFrame:
"""Compute cell cycle phase scores for cells in the population
Arguments
---------
adata: :class:`~anndata.AnnData`
layer: `str` or None (default: `None`)
The layer of data to use for calculating correlation. If None, use adata.X.
gene_list: `OrderedDict` or None (default: `None`)
OrderedDict of marker genes to use for cell cycle phases. If None, the default
list will be used.
refine: `bool` (default: `True`)
whether to refine the gene lists based on how consistent the expression is among
the groups
threshold: `float` or None (default: `0.3`)
threshold on correlation coefficient used to discard genes (expression of each
gene is compared to the bulk expression of the group and any gene with a correlation
coefficient less than this is discarded)
Returns
-------
Cell cycle scores indicating the likelihood a given cell is in a given cell cycle phase
"""
# get list of genes if one is not provided
if gene_list is None:
cell_phase_genes = get_cell_phase_genes(adata, layer, refine=refine, threshold=threshold)
else:
cell_phase_genes = gene_list
adata.uns["cell_phase_genes"] = cell_phase_genes
# score each cell cycle phase and Z-normalize
phase_scores = pd.DataFrame(batch_group_score(adata, layer, cell_phase_genes))
normalized_phase_scores = phase_scores.sub(phase_scores.mean(axis=1), axis=0).div(phase_scores.std(axis=1), axis=0)
normalized_phase_scores_corr = normalized_phase_scores.transpose()
normalized_phase_scores_corr["G1-S"] = [1, 0, 0, 0, 0]
normalized_phase_scores_corr["S"] = [0, 1, 0, 0, 0]
normalized_phase_scores_corr["G2-M"] = [0, 0, 1, 0, 0]
normalized_phase_scores_corr["M"] = [0, 0, 0, 1, 0]
normalized_phase_scores_corr["M-G1"] = [0, 0, 0, 0, 1]
phase_list = ["G1-S", "S", "G2-M", "M", "M-G1"]
# final scores for each phaase are correlation of expression profile with vectors defined above
cell_cycle_scores = normalized_phase_scores_corr.corr()
tmp = -len(phase_list)
cell_cycle_scores = cell_cycle_scores[tmp:].transpose()[: -len(phase_list)]
# pick maximal score as the phase for that cell
cell_cycle_scores["cell_cycle_phase"] = cell_cycle_scores.idxmax(axis=1)
cell_cycle_scores["cell_cycle_phase"] = cell_cycle_scores["cell_cycle_phase"].astype("category")
cell_cycle_scores["cell_cycle_phase"].cat.set_categories(phase_list, inplace=True)
def progress_ratio(x, phase_list):
ind = phase_list.index(x["cell_cycle_phase"])
return x[phase_list[(ind - 1) % len(phase_list)]] - x[phase_list[(ind + 1) % len(phase_list)]]
# interpolate position within given cell cycle phase
cell_cycle_scores["cell_cycle_progress"] = cell_cycle_scores.apply(
lambda x: progress_ratio(x, list(phase_list)), axis=1
)
cell_cycle_scores.sort_values(
["cell_cycle_phase", "cell_cycle_progress"],
ascending=[True, False],
inplace=True,
)
# order of cell within cell cycle phase
cell_cycle_scores["cell_cycle_order"] = cell_cycle_scores.groupby("cell_cycle_phase").cumcount()
cell_cycle_scores["cell_cycle_order"] = cell_cycle_scores.groupby("cell_cycle_phase")["cell_cycle_order"].apply(
lambda x: x / (len(x) - 1)
)
return cell_cycle_scores
| 5,339,467
|
def pm_callback(sender, **kwargs):
"""
Post Migrate callbghack Re/load sql files and move models to schemas
"""
load_sql_files(sender)
move_models_to_schemas(sender)
| 5,339,468
|
def variational_lower_bound(prediction):
"""
This is the variational lower bound derived in
Auto-Encoding Variational Bayes, Kingma & Welling, 2014
:param [posterior_means, posterior_logvar,
data_means, data_logvar, originals]
posterior_means: predicted means for the posterior
posterior_logvar: predicted log variances for the posterior
data_means: predicted mean parameter
for the voxels modelled as Gaussians
data_logvar: predicted log variance parameter
for the voxels modelled as Gaussians
originals: the original inputs
:return:
"""
# log_2pi = np.log(2*np.pi)
log_2pi = 1.837877
assert len(prediction) >= 5, \
"please see the returns of network/vae.py" \
"for the prediction list format"
posterior_means, posterior_logvar = prediction[:2]
data_means, data_logvar = prediction[2:4]
originals = prediction[4]
squared_diff = tf.square(data_means - originals)
log_likelihood = \
data_logvar + log_2pi + tf.exp(-data_logvar) * squared_diff
# batch_size = tf.shape(log_likelihood)[0]
batch_size = log_likelihood.get_shape().as_list()[0]
log_likelihood = tf.reshape(log_likelihood, shape=[batch_size, -1])
log_likelihood = -0.5 * tf.reduce_sum(log_likelihood, axis=[1])
KL_divergence = 1 + posterior_logvar \
- tf.square(posterior_means) \
- tf.exp(posterior_logvar)
KL_divergence = -0.5 * tf.reduce_sum(KL_divergence, axis=[1])
return tf.reduce_mean(KL_divergence - log_likelihood)
| 5,339,469
|
def didGen(vk, method="dad"):
"""
didGen accepts an EdDSA (Ed25519) key in the form of a byte string and returns a DID.
:param vk: 32 byte verifier/public key from EdDSA (Ed25519) key
:param method: W3C did method string. Defaults to "dad".
:return: W3C DID string
"""
if vk is None:
return None
# convert verkey to jsonable unicode string of base64 url-file safe
vk64u = base64.urlsafe_b64encode(vk).decode("utf-8")
return "did:{0}:{1}".format(method, vk64u)
| 5,339,470
|
def part_1( pwd_data ):
"""
For each line in pwd_data:
* element 0 contains the min/max counts. It is stored as <str>-<str> it will
need to be split and each of the 2 new elements cast to ints.
* element 1 contains the character to count in the provided password string.
It will have a trailing : character that will need to be stripped.
* element 2 is the password string
The character in element 1 must exist in the password a number of times
between the min and max values provided.
Args:
pwd_data ([list]):
"""
good_passwords = 0
for x in pwd_data:
min, max = x[ 0 ].split( '-' )
if int( min ) <= x[ 2 ].count( x[ 1 ].rstrip( ':' ) ) <= int( max ):
good_passwords += 1
print( good_passwords )
| 5,339,471
|
def load_dataset(datapath):
"""Extract class label info """
with open(datapath + "/experiment_dataset.dat", "rb") as f:
data_dict = pickle.load(f)
return data_dict
| 5,339,472
|
def deleteupload():
"""Deletes an upload.
An uploads_id is given and that entry is then removed from the uploads table
in the database.
"""
uploads_id = request.args.get('uploads_id')
if not uploads.exists(uploads_id=uploads_id):
return bad_json_response(
'BIG OOPS: Something went wrong deleting the file.'
)
uploads.delete(uploads_id=uploads_id)
return good_json_response('success')
| 5,339,473
|
def read(fn, offset, length, hdfs=None):
""" Read a block of bytes from a particular file """
with hdfs.open(fn, 'r') as f:
f.seek(offset)
bytes = f.read(length)
logger.debug("Read %d bytes from %s:%d", len(bytes), fn, offset)
return bytes
| 5,339,474
|
def load_json(filename: str) -> Dict:
"""Read JSON file from metadata folder
Args:
filename: Name of metadata file
Returns:
dict: Dictionary of data
"""
filepath = (
Path(__file__).resolve().parent.parent.joinpath("metadata").joinpath(filename)
)
metadata: Dict = json.loads(filepath.read_text())
return metadata
| 5,339,475
|
def assert_db_got_replaced(rotkehlchen_instance: Rotkehlchen, username: str):
"""For environment setup with setup_starting_environment make sure DB is replaced"""
# At this point pulling data from rotkehlchen server should have worked
# and our database should have been replaced. The new data have different
# main currency
assert rotkehlchen_instance.data.db.get_main_currency() == A_GBP
# Also check a copy of our old DB is kept around.
directory = os.path.join(rotkehlchen_instance.data.data_directory, username)
files = list(os.path.join(directory, f) for f in os.listdir(directory))
assert len(files) == 2
# The order of the files is not guaranteed
assert 'rotkehlchen.db' in files[0] or 'rotkehlchen.db' in files[1]
assert 'backup' in files[0] or 'backup' in files[1]
| 5,339,476
|
def main():
"""Given a dashboard title, get the ids of all dashboards with matching titles
and move them to trash.
$ python soft_delete_dashboard.py "An Unused Dashboard"
"""
dashboard_title = sys.argv[1] if len(sys.argv) > 1 else ""
if not dashboard_title:
raise sdk_exceptions.ArgumentError("Please provide: <dashboardTitle>")
dashboards = get_dashboards(dashboard_title)
delete_dashboards(dashboards)
| 5,339,477
|
def get_riemann_sum(x, delta_x):
"""
Returns the riemann `sum` given a `function` and
the input `x` and `delta_x`
Parameters
----------
x : list
List of numbers returned by `np.linspace` given a lower
and upper bound, and the number of intervals
delta_x :
The interval
Returns
-------
float
The integral sum
"""
return sum(f(x)*delta_x)
| 5,339,478
|
def initialize_library(verbose):
""" Sets the native library verbosity
Args:
verbose: Set to 1 for redirecting native library logs to stderr. For full debug
information, compile the RQRMI library with DEBUG flag.
"""
global _library_initialzied
if _library_initialzied: return
if verbose:
print('*** For maximum debug information on the RQRMI library, compile library with debug flag ***')
rqrmilib.initialize(1)
else:
rqrmilib.initialize(0)
_library_initialzied=True
| 5,339,479
|
def MPO_rand(n, bond_dim, phys_dim=2, normalize=True, cyclic=False,
herm=False, dtype=float, **mpo_opts):
"""Generate a random matrix product state.
Parameters
----------
n : int
The number of sites.
bond_dim : int
The bond dimension.
phys_dim : int, optional
The physical (site) dimensions, defaults to 2.
normalize : bool, optional
Whether to normalize the operator such that ``trace(A.H @ A) == 1``.
cyclic : bool, optional
Generate a MPO with periodic boundary conditions or not, default is
open boundary conditions.
dtype : {float, complex} or numpy dtype, optional
Data type of the tensor network.
herm : bool, optional
Whether to make the matrix hermitian (or symmetric if real) or not.
mpo_opts
Supplied to :class:`~quimb.tensor.tensor_1d.MatrixProductOperator`.
"""
cyc_shp = (bond_dim,) if cyclic else ()
shapes = [(*cyc_shp, bond_dim, phys_dim, phys_dim),
*((bond_dim, bond_dim, phys_dim, phys_dim),) * (n - 2),
(bond_dim, *cyc_shp, phys_dim, phys_dim)]
def gen_data(shape):
data = randn(shape, dtype=dtype)
if not herm:
return data
trans = (0, 2, 1) if len(shape) == 3 else (0, 1, 3, 2)
return data + data.transpose(*trans).conj()
arrays = map(lambda x: x / norm_fro_dense(x)**(1 / (x.ndim - 1)),
map(gen_data, shapes))
rmpo = MatrixProductOperator(arrays, **mpo_opts)
if normalize:
rmpo /= (rmpo.H @ rmpo)**0.5
return rmpo
| 5,339,480
|
def test_api_exception(symbol='invalid'):
"""Test API response Exception"""
with pytest.raises(CryptowatchAPIException):
client.get_assets(symbol)
| 5,339,481
|
def get_short_size(size_bytes):
"""
Get a file size string in short format.
This function returns:
"B" size (e.g. 2) when size_bytes < 1KiB
"KiB" size (e.g. 345.6K) when size_bytes >= 1KiB and size_bytes < 1MiB
"MiB" size (e.g. 7.8M) when size_bytes >= 1MiB
size_bytes: File size in bytes
"""
if size_bytes < 1024:
return str(size_bytes)
if size_bytes < 1048576:
return f"{size_bytes / 1024:.1f}K"
return f"{size_bytes / 1048576:.1f}M"
| 5,339,482
|
def stop_evennia():
"""
This instructs the Portal to stop the Server and then itself.
"""
def _portal_stopped(*args):
print("... Portal stopped.\nEvennia shut down.")
_reactor_stop()
def _server_stopped(*args):
print("... Server stopped.\nStopping Portal ...")
send_instruction(PSHUTD, {})
wait_for_status(False, None, _portal_stopped)
def _portal_running(response):
prun, srun, ppid, spid, _, _ = _parse_status(response)
if srun:
print("Server stopping ...")
send_instruction(SSHUTD, {})
wait_for_status_reply(_server_stopped)
else:
print("Server already stopped.\nStopping Portal ...")
send_instruction(PSHUTD, {})
wait_for_status(False, None, _portal_stopped)
def _portal_not_running(fail):
print("Evennia not running.")
_reactor_stop()
send_instruction(PSTATUS, None, _portal_running, _portal_not_running)
| 5,339,483
|
def pair_setup(
auth_type: AuthenticationType, connection: HttpConnection
) -> PairSetupProcedure:
"""Return procedure object used for Pair-Setup."""
_LOGGER.debug("Setting up new AirPlay Pair-Setup procedure with type %s", auth_type)
if auth_type == AuthenticationType.Legacy:
srp = LegacySRPAuthHandler(new_credentials())
srp.initialize()
return AirPlayLegacyPairSetupProcedure(connection, srp)
if auth_type == AuthenticationType.HAP:
srp = SRPAuthHandler()
srp.initialize()
return AirPlayHapPairSetupProcedure(connection, srp)
raise exceptions.NotSupportedError(
f"authentication type {auth_type} does not support Pair-Setup"
)
| 5,339,484
|
def score_from_srl(srl_path, truth_path, freq, verbose=False):
"""
Given source list output by PyBDSF and training truth catalogue,
calculate the official score for the sources identified in the srl.
Args:
srl_path (`str`): Path to source list (.srl file)
truth_path (`str`): Path to training truth catalogue
freq (`int`): Image frequency band (560, 1400 or 9200 MHz)
verbose (`bool`): True to print out size ratio info
"""
truth_df = load_truth_df(truth_path)
# Predict size ID and correct the Maj and Min values:
cat_df = cat_df_from_srl(srl_path)
scorer = Sdc1Scorer(cat_df, truth_df, freq)
score = scorer.run(train=True, detail=True, mode=1)
return score
| 5,339,485
|
def get_feature_read(key, max_num_bbs=None):
"""Choose the right feature function for the given key to parse TFRecords
Args:
key: the feature name
max_num_bbs: Max number of bounding boxes (used for `bounding_boxes` and `classes`)
max_num_groups: Number of pre-defined groups (used for `clustered_bounding_boxes`)
"""
if key in ['im_id', 'num_boxes']:
return tf.FixedLenFeature((), tf.int64)
elif key in ['bounding_boxes']:
assert max_num_bbs is not None
return tf.FixedLenFeature((max_num_bbs, 4), tf.float32)
elif key in ['classes']:
assert max_num_bbs is not None
return tf.FixedLenFeature((max_num_bbs,), tf.int64)
else:
raise SystemExit("Unknown feature", key)
| 5,339,486
|
def kernel(cc, eris, t1=None, t2=None, max_cycle=50, tol=1e-8, tolnormt=1e-6,
verbose=logger.INFO):
"""Exactly the same as pyscf.cc.ccsd.kernel, which calls a
*local* energy() function."""
if isinstance(verbose, logger.Logger):
log = verbose
else:
log = logger.Logger(cc.stdout, verbose)
if t1 is None and t2 is None:
t1, t2 = cc.init_amps(eris)[1:]
elif t1 is None:
nocc = cc.nocc
nvir = cc.nmo - nocc
t1 = numpy.zeros((nocc,nvir), eris.dtype)
elif t2 is None:
t2 = cc.init_amps(eris)[2]
cput1 = cput0 = (time.clock(), time.time())
nocc, nvir = t1.shape
eold = 0
eccsd = 0
if cc.diis:
adiis = lib.diis.DIIS(cc, cc.diis_file)
adiis.space = cc.diis_space
else:
adiis = lambda t1,t2,*args: (t1,t2)
conv = False
for istep in range(max_cycle):
t1new, t2new = cc.update_amps(t1, t2, eris)
normt = numpy.linalg.norm(t1new-t1) + numpy.linalg.norm(t2new-t2)
t1, t2 = t1new, t2new
t1new = t2new = None
if cc.diis:
t1, t2 = cc.diis(t1, t2, istep, normt, eccsd-eold, adiis)
eold, eccsd = eccsd, energy(cc, t1, t2, eris)
log.info('istep = %d E(CCSD) = %.15g dE = %.9g norm(t1,t2) = %.6g',
istep, eccsd, eccsd - eold, normt)
cput1 = log.timer('CCSD iter', *cput1)
if abs(eccsd-eold) < tol and normt < tolnormt:
conv = True
break
log.timer('CCSD', *cput0)
return conv, eccsd, t1, t2
| 5,339,487
|
def test_generate_import_code_2():
"""Assert that generate_import_code() returns the correct set of dependancies and dependancies are importable."""
pipeline_string = (
'KNeighborsClassifier(CombineDFs('
'DecisionTreeClassifier(input_matrix, DecisionTreeClassifier__criterion=gini, '
'DecisionTreeClassifier__max_depth=8,DecisionTreeClassifier__min_samples_leaf=5,'
'DecisionTreeClassifier__min_samples_split=5), ZeroCount(input_matrix))'
'KNeighborsClassifier__n_neighbors=10, '
'KNeighborsClassifier__p=1,KNeighborsClassifier__weights=uniform'
)
pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)
import_code = generate_import_code(pipeline, tpot_obj.operators)
expected_code = """import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import make_pipeline, make_union
from sklearn.tree import DecisionTreeClassifier
from tpot.builtins import StackingEstimator, ZeroCount
"""
exec(import_code) # should not raise error
assert expected_code == import_code
| 5,339,488
|
def on_session_started(session_started_request, session):
""" Called when the session starts
Can be used to initialize values if used across intents.
"""
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
| 5,339,489
|
def get_arguments():
"""parse provided command line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--server",
help="Where to send the output - use https URL to POST "
"to the dognews server API, or a file name to save locally as json",
required=True)
parser.add_argument(
"--imagefolder",
help="Where to save the thumbnails",
required=True)
parser.add_argument(
"--token",
help="Authentication token associated with the submit-bot user, generated in the dognews server app",
required=True)
return parser.parse_args()
| 5,339,490
|
async def getAllDestinyIDs():
"""Returns a list with all discord members destiny ids"""
select_sql = """
SELECT
destinyID
FROM
"discordGuardiansToken";"""
async with (await get_connection_pool()).acquire(timeout=timeout) as connection:
result = await connection.fetch(select_sql)
return [x[0] for x in result]
| 5,339,491
|
def login():
"""Handles login for Gello."""
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password.')
return render_template('auth/login.html', form=form)
| 5,339,492
|
def create_loss_functions(interconnector_coefficients, demand_coefficients, demand):
"""Creates a loss function for each interconnector.
Transforms the dynamic demand dependendent interconnector loss functions into functions that only depend on
interconnector flow. i.e takes the function f and creates g by pre-calculating the demand dependent terms.
f(inter_flow, flow_coefficient, nsw_demand, nsw_coefficient, qld_demand, qld_coefficient) = inter_losses
becomes
g(inter_flow) = inter_losses
The mathematics of the demand dependent loss functions is described in the
:download:`Marginal Loss Factors documentation section 3 to 5 <../../docs/pdfs/Marginal Loss Factors for the 2020-21 Financial year.pdf>`.
Examples
--------
>>> import pandas as pd
Some arbitrary regional demands.
>>> demand = pd.DataFrame({
... 'region': ['VIC1', 'NSW1', 'QLD1', 'SA1'],
... 'loss_function_demand': [6000.0 , 7000.0, 5000.0, 3000.0]})
Loss model details from 2020 Jan NEM web LOSSFACTORMODEL file
>>> demand_coefficients = pd.DataFrame({
... 'interconnector': ['NSW1-QLD1', 'NSW1-QLD1', 'VIC1-NSW1', 'VIC1-NSW1', 'VIC1-NSW1'],
... 'region': ['NSW1', 'QLD1', 'NSW1', 'VIC1', 'SA1'],
... 'demand_coefficient': [-0.00000035146, 0.000010044, 0.000021734, -0.000031523, -0.000065967]})
Loss model details from 2020 Jan NEM web INTERCONNECTORCONSTRAINT file
>>> interconnector_coefficients = pd.DataFrame({
... 'interconnector': ['NSW1-QLD1', 'VIC1-NSW1'],
... 'loss_constant': [0.9529, 1.0657],
... 'flow_coefficient': [0.00019617, 0.00017027],
... 'from_region_loss_share': [0.5, 0.5]})
Create the loss functions
>>> loss_functions = create_loss_functions(interconnector_coefficients, demand_coefficients, demand)
Lets use one of the loss functions, first get the loss function of VIC1-NSW1 and call it g
>>> g = loss_functions[loss_functions['interconnector'] == 'VIC1-NSW1']['loss_function'].iloc[0]
Calculate the losses at 600 MW flow
>>> print(g(600.0))
-70.87199999999996
Now for NSW1-QLD1
>>> h = loss_functions[loss_functions['interconnector'] == 'NSW1-QLD1']['loss_function'].iloc[0]
>>> print(h(600.0))
35.70646799999993
Parameters
----------
interconnector_coefficients : pd.DataFrame
====================== ========================================================================================
Columns: Description:
interconnector unique identifier of a interconnector (as `str`)
loss_constant the constant term in the interconnector loss factor equation (as np.float64)
flow_coefficient the coefficient of the interconnector flow variable in the loss factor equation
(as np.float64)
from_region_loss_share the proportion of loss attribute to the from region, remainer are attributed to the to
region (as np.float64)
====================== ========================================================================================
demand_coefficients : pd.DataFrame
================== =========================================================================================
Columns: Description:
interconnector unique identifier of a interconnector (as `str`)
region the market region whose demand the coefficient applies too, required (as `str`)
demand_coefficient the coefficient of regional demand variable in the loss factor equation (as `np.float64`)
================== =========================================================================================
demand : pd.DataFrame
==================== =====================================================================================
Columns: Description:
region unique identifier of a region (as `str`)
loss_function_demand the estimated regional demand, as calculated by initial supply + demand forecast,
in MW (as `np.float64`)
==================== =====================================================================================
Returns
-------
pd.DataFrame
loss_functions
================ ============================================================================================
Columns: Description:
interconnector unique identifier of a interconnector (as `str`)
loss_function a `function` object that takes interconnector flow (as `float`) an input and returns
interconnector losses (as `float`).
================ ============================================================================================
"""
demand_loss_factor_offset = pd.merge(demand_coefficients, demand, 'inner', on=['region'])
demand_loss_factor_offset['offset'] = demand_loss_factor_offset['loss_function_demand'] * \
demand_loss_factor_offset['demand_coefficient']
demand_loss_factor_offset = demand_loss_factor_offset.groupby('interconnector', as_index=False)['offset'].sum()
loss_functions = pd.merge(interconnector_coefficients, demand_loss_factor_offset, 'left', on=['interconnector'])
loss_functions['loss_constant'] = loss_functions['loss_constant'] + loss_functions['offset'].fillna(0)
loss_functions['loss_function'] = \
loss_functions.apply(lambda x: create_function(x['loss_constant'], x['flow_coefficient']), axis=1)
return loss_functions.loc[:, ['interconnector', 'loss_function', 'from_region_loss_share']]
| 5,339,493
|
def num2proto(pnum):
"""Protocol number to name"""
# Look for the common ones first
if pnum == 6:
return "tcp"
elif pnum == 17:
return "udp"
elif pnum == 1:
return "icmp"
elif pnum == 58:
# Use the short form of icmp-ipv6 when appropriate
return "icmpv6"
# Get cached proto table, else create new one
global proto_table
if not bool(proto_table):
proto_table = ProtocolTable()
pname = proto_table[pnum]
# If not found, return the number as a string
if pname == "Unassigned":
return str(pnum)
return pname
| 5,339,494
|
def get_problem_size(problem_size, params):
"""compute current problem size"""
if callable(problem_size):
problem_size = problem_size(params)
if isinstance(problem_size, (str, int, np.integer)):
problem_size = (problem_size, )
current_problem_size = [1, 1, 1]
for i, s in enumerate(problem_size):
if isinstance(s, str):
current_problem_size[i] = int(
eval(replace_param_occurrences(s, params)))
elif isinstance(s, (int, np.integer)):
current_problem_size[i] = s
else:
raise TypeError(
"Error: problem_size should only contain strings or integers")
return current_problem_size
| 5,339,495
|
def zip_dir(path):
"""
Create a zip archive containing all files and dirs rooted in path.
The archive is created in memory and a file handler is returned by the function.
Args:
path: directory containing the resources to archive.
Return:
file_out: file handler pointing to the compressed archive.
"""
file_out = BytesIO()
with zipfile.ZipFile(file_out, "w", zipfile.ZIP_DEFLATED) as ziph:
for root, _, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), start=path))
file_out.seek(0)
return file_out
| 5,339,496
|
def interpolate(x,
size=None,
scale_factor=None,
mode='nearest',
align_corners=False,
align_mode=0,
data_format='NCHW',
name=None):
"""
This op resizes a batch of images.
The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
or 4-D (num_batches, channels, in_h, in_w), or a 5-D Tensor of the shape
(num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
Where in_w is width of the input tensor, in_h is the height of the input tensor,
in_d is the depth of the intput tensor.
and the resizing only applies on the three dimensions(depth, height and width).
Supporting resample methods:
'linear' : Linear interpolation
'bilinear' : Bilinear interpolation
'trilinear' : Trilinear interpolation
'nearest' : Nearest neighbor interpolation
'bicubic' : Bicubic interpolation
'area': Area interpolation
Linear interpolation is the method of using a line connecting two known quantities
to determine the value of an unknown quantity between the two known quantities.
Nearest neighbor interpolation is to perform nearest neighbor interpolation
in both the 3rd dimension(in height direction) and the 4th dimension(in width
direction) on input tensor.
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
W-direction in this op) on a rectilinear 2D grid. The key idea is
to perform linear interpolation first in one direction, and then
again in the other direction.
Trilinear interpolation is an extension of linear interpolation for
interpolating functions of three variables (e.g. D-direction,
H-direction and W-direction in this op) on a rectilinear 3D grid.
The linear interpolation is performed on three directions.
align_corners and align_mode are optional parameters,the calculation method
of interpolation can be selected by them.
Bicubic interpolation is an extension of cubic interpolation for interpolating
data points on a two-dimensional regular grid. The interpolated surface is
smoother than corresponding surfaces obtained by bilinear interpolation or
nearest-neighbor interpolation.
Area interpolation is to perform area interpolation
in both the 3rd dimension(in height direction) , the 4th dimension(in width
direction) and the 5th dimension(in depth direction) on input tensor. Set to
area will directly call `paddle.nn.functional.adaptive_avg_pool1d` or
`paddle.nn.functional.adaptive_avg_pool2d` or `paddle.nn.functional.adaptive_avg_pool3d`.
Example:
.. code-block:: text
For scale_factor:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Linear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,W_in)
output: (N,C,W_out) where:
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,W_in)
output: (N,C,W_out) where:
W_out = W_{in} * scale_{factor}
Nearest neighbor interpolation:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = floor (H_{in} * scale_{factor})
W_out = floor (W_{in} * scale_{factor})
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Bicubic interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Trilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = (D_{in}+0.5) * scale_{factor} - 0.5
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = D_{in} * scale_{factor}
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
For details of linear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Linear_interpolation.
For details of nearest neighbor interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
For details of bilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bilinear_interpolation.
For details of trilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Trilinear_interpolation.
For details of bicubic interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bicubic_interpolation
Parameters:
x (Tensor): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
size (list|tuple|Tensor|None): Output shape of image resize
layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w)
when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor.
Default: None. If a list/tuple, each element can be an integer or a Tensor of shape: [1].
If a Tensor, its dimensions size should be a 1.
scale_factor (float|Tensor|list|tuple|None): The multiplier for the input height or width. At
least one of :attr:`size` or :attr:`scale_factor` must be set.
And :attr:`size` has a higher priority than :attr:`scale_factor`.Has to match input size if it is either a list or a tuple or a Tensor.
Default: None.
mode (str): The resample method. It supports 'linear', 'area', 'nearest', 'bilinear',
'bicubic' and 'trilinear' currently. Default: 'nearest'
align_corners(bool) : An optional bool, If True, the centers of the 4 corner pixels of the
input and output tensors are aligned, preserving the values at the
corner pixels.This only has an effect when 'linear', 'bilinear', 'bicubic' or 'trilinear'.
Default: False
align_mode(int) : An optional for linear/bilinear/trilinear interpolation. Refer to the formula in the example above,
it can be \'0\' for src_idx = scale_factor*(dst_indx+0.5)-0.5 , can be \'1\' for
src_idx = scale_factor*dst_index.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
`"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels),
A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
Raises:
TypeError: size should be a list or tuple or Tensor.
ValueError: The 'mode' of image_resize can only be 'linear', 'bilinear',
'trilinear', 'bicubic', 'area' or 'nearest' currently.
ValueError: 'linear' only support 3-D tensor.
ValueError: 'bilinear' and 'bicubic' only support 4-D tensor.
ValueError: 'nearest' only support 4-D or 5-D tensor.
ValueError: 'trilinear' only support 5-D tensor.
ValueError: One of size and scale_factor must not be None.
ValueError: size length should be 1 for input 3-D tensor.
ValueError: size length should be 2 for input 4-D tensor.
ValueError: size length should be 3 for input 5-D tensor.
ValueError: scale_factor should be greater than zero.
TypeError: align_corners should be a bool value
ValueError: align_mode can only be '0' or '1'
ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
Examples:
.. code-block:: python
import paddle
import numpy as np
import paddle.nn.functional as F
# given out size
input_data = np.random.rand(2,3,6,10).astype("float32")
x = paddle.to_tensor(input_data)
output_1 = F.interpolate(x=x, size=[12,12])
print(output_1.shape)
# [2L, 3L, 12L, 12L]
# given scale
output_2 = F.interpolate(x=x, scale_factor=[2,1])
print(output_2.shape)
# [2L, 3L, 12L, 10L]
# bilinear interp
output_3 = F.interpolate(x=x, scale_factor=[2,1], mode="bilinear")
print(output_2.shape)
# [2L, 3L, 12L, 10L]
"""
data_format = data_format.upper()
resample = mode.upper()
resample_type = mode.lower()
resample_methods = [
'LINEAR',
'BILINEAR',
'TRILINEAR',
'NEAREST',
'BICUBIC',
'AREA',
]
if resample not in resample_methods:
raise ValueError(
"The 'resample' of image_resize can only be 'area', 'linear', 'bilinear', 'trilinear', "
" 'bicubic' or 'nearest' currently.")
if resample in ['LINEAR'] and len(x.shape) != 3:
raise ValueError("'linear' only support 3-D tensor.")
if resample in ['NEAREST'] and len(x.shape) != 4 and len(x.shape) != 5:
raise ValueError("'NEAREST' only support 4-D or 5-D tensor.")
if resample in ['BILINEAR', 'BICUBIC'] and len(x.shape) != 4:
raise ValueError("'bilinear' and 'bicubic' only support 4-D tensor.")
if resample == 'TRILINEAR' and len(x.shape) != 5:
raise ValueError("'trilinear'only support 5-D tensor.")
if size is None and scale_factor is None:
raise ValueError("One of size and scale_factor must not be None.")
if not isinstance(align_corners, bool):
raise TypeError("Attr align_corners should be a bool value")
if align_mode != 0 and align_mode != 1:
raise ValueError("align_mode can only be 0 or 1")
if align_corners != 0 and resample == 'NEAREST':
raise ValueError(
"align_corners option can only be set with the interpolating modes: linear | bilinear | bicubic | trilinear"
)
if resample == 'AREA':
if isinstance(size, list) or isinstance(size, tuple) or isinstance(
size, Variable):
if len(size) == 0:
raise ValueError("output size can not be empty")
if len(x.shape) == 3:
return paddle.nn.functional.adaptive_avg_pool1d(x, size)
elif len(x.shape) == 4:
return paddle.nn.functional.adaptive_avg_pool2d(x, size)
elif len(x.shape) == 5:
return paddle.nn.functional.adaptive_avg_pool3d(x, size)
helper = LayerHelper('{}_interp_v2'.format(resample_type), **locals())
dtype = helper.input_dtype(input_param_name='x')
if len(x.shape) == 3 and data_format not in ['NCW', 'NWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCW` or `NWC` supported for 3-D input.")
elif len(x.shape) == 4 and data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCHW` or `NHWC` supported for 4-D input.")
elif len(x.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCDHW` or `NDHWC` supported for 5-D input.")
def _is_list_or_turple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW':
data_layout = 'NCHW'
if data_format == 'NHWC' or data_format == 'NDHWC' or data_format == 'NWC':
data_layout = 'NHWC'
if resample == 'NEAREST':
align_corners = False
inputs = {"X": x}
attrs = {
"out_d": -1,
"out_h": -1,
"out_w": -1,
"interp_method": resample_type,
"align_corners": align_corners,
"align_mode": align_mode,
"data_layout": data_layout
}
out_shape = size
scale = scale_factor
if out_shape is not None and scale is not None:
raise ValueError("Only one of size or scale_factor should be defined.")
if out_shape is not None:
if isinstance(out_shape, Variable) and not in_dynamic_mode():
out_shape.stop_gradient = True
inputs['OutSize'] = out_shape
else:
if in_dynamic_mode():
if isinstance(out_shape, Variable):
out_shape = list(out_shape.numpy())
for i, dim in enumerate(out_shape):
if isinstance(dim, Variable):
out_shape[i] = dim.numpy()[0]
if not (_is_list_or_turple_(out_shape)):
raise TypeError("size should be a list or tuple or Variable.")
# Validate the shape
contain_var = False
for dim_idx, dim_size in enumerate(out_shape):
if isinstance(dim_size, Variable):
contain_var = True
continue
assert dim_size > 0, (
"Each dimension size given in out_shape must be greater than 0."
)
if contain_var:
new_size_tensor = []
size_list = []
for dim in out_shape:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_size_tensor.append(dim)
size_list.append(-1)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference(
'int32')
fill_constant(
[1], 'int32', dim, force_cpu=True, out=temp_out)
new_size_tensor.append(temp_out)
size_list.append(dim)
inputs['SizeTensor'] = new_size_tensor
if len(x.shape) == 3:
if len(out_shape) != 1:
raise ValueError(
"size length should be 2 for input 3-D tensor")
if contain_var:
attrs['out_w'] = size_list[0]
else:
out_shape = list(map(int, out_shape))
attrs['out_w'] = out_shape[0]
if len(x.shape) == 4:
if len(out_shape) != 2:
raise ValueError("size length should be 2 for "
"input 4-D tensor.")
if contain_var:
attrs['out_h'] = size_list[0]
attrs['out_w'] = size_list[1]
else:
out_shape = list(map(int, out_shape))
attrs['out_h'] = out_shape[0]
attrs['out_w'] = out_shape[1]
if len(x.shape) == 5:
if len(out_shape) != 3:
raise ValueError("size length should be 3 for "
"input 5-D tensor.")
if contain_var:
attrs['out_d'] = size_list[0]
attrs['out_h'] = size_list[1]
attrs['out_w'] = size_list[2]
else:
out_shape = list(map(int, out_shape))
attrs['out_d'] = out_shape[0]
attrs['out_h'] = out_shape[1]
attrs['out_w'] = out_shape[2]
else:
if in_dynamic_mode() and isinstance(scale, Variable):
scale = list(scale.numpy())
if isinstance(scale, Variable):
scale.stop_gradient = True
inputs["Scale"] = scale
elif isinstance(scale, float) or isinstance(scale, int):
if scale <= 0:
raise ValueError("Attr(scale) should be greater than zero.")
scale_list = []
for i in range(len(x.shape) - 2):
scale_list.append(scale)
attrs['scale'] = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple):
if len(scale) != len(x.shape) - 2:
raise ValueError("scale_shape length should be {} for "
"input {}-D tensor.".format(
len(x.shape) - 2, len(x.shape)))
for value in scale:
if value <= 0:
raise ValueError("Attr(scale) should be greater than zero.")
attrs['scale'] = list(map(float, scale))
else:
raise TypeError(
"Attr(scale)'s type should be float, int, list, tuple, or Tensor."
)
if in_dynamic_mode():
attr_list = []
for k, v in attrs.items():
attr_list.append(k)
attr_list.append(v)
dy_attr = tuple(attr_list)
if resample_type == "linear":
out = _C_ops.linear_interp_v2(x, *dy_attr)
elif resample_type == "bilinear":
out = _C_ops.bilinear_interp_v2(x, *dy_attr)
elif resample_type == "trilinear":
out = _C_ops.trilinear_interp_v2(x, *dy_attr)
elif resample_type == "nearest":
out = _C_ops.nearest_interp_v2(x, *dy_attr)
elif resample_type == "bicubic":
out = _C_ops.bicubic_interp_v2(x, *dy_attr)
return out
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='{}_interp_v2'.format(resample_type),
inputs=inputs,
outputs={"Out": out},
attrs=attrs)
return out
| 5,339,497
|
def encode_auth_token(user_id):
"""
Generates the Auth Token
:return: string
"""
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=90),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
app.config.get('SECRET_KEY'),
algorithm='HS256'
)
except Exception as e:
return e
| 5,339,498
|
def count_weighted_pairs_3d_cuda_fix(
x1, y1, z1, w1, x2, y2, z2, w2, rbins_squared, result):
"""Naively count Npairs(<r), the total number of pairs that are separated
by a distance less than r, for each r**2 in the input rbins_squared.
"""
start = cuda.grid(1)
stride = cuda.gridsize(1)
n1 = x1.shape[0]
n2 = x2.shape[0]
nbins = rbins_squared.shape[0]
i = start
while i < n1:
px = x1[i]
py = y1[i]
pz = z1[i]
pw = w1[i]
j = 0
while j < n2:
qx = x2[j]
qy = y2[j]
qz = z2[j]
qw = w2[j]
dx = px-qx
dy = py-qy
dz = pz-qz
wprod = pw*qw
dsq = dx*dx + dy*dy + dz*dz
k = nbins-1
while dsq <= rbins_squared[k]:
cuda.atomic.add(result, k-1, wprod)
k = k-1
if k <= 0:
break
j+=1
i+=stride
| 5,339,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.