content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def update_internal_subnets(
self,
ipv4_list: list = [
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
"169.254.0.0/16",
"224.0.0.0/4",
],
ipv6_list: list = [],
segment_ipv4_list: list = [],
non_default_routes: bool = False,
) -> bool:
"""Update the list of internal subnets to use to classify internet
traffic.
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - internalSubnets
- POST
- /gms/internalSubnets2
Any traffic not matching the internal subnets will be classified as
internet traffic. This list will be pushed to all appliances. User
can configure up to 512 subnets in each ipv4 and ipv6 entry.
.. warning::
This will overwrite current subnets!
:param ipv4_list: List of ipv4 networks in CIDR format for all VRFs,
defaults to ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16",
"169.254.0.0/16", "224.0.0.0/4"]
:type ipv4_list: list, optional
:param ipv6_list: List of ipv6 networks in CIDR format,
defaults to []
:type ipv6_list: list, optional
:param segment_ipv4_list: List of ipv4 networks each prefaced with
related VRF id #, e.g. For VRF 1 only ["1:192.168.0.0/16"],
defaults to []
:type segment_ipv4_list: list, optional
:param non_default_routes: Treat non-default routes as internal
subnets, defaults to False
:param non_default_routes: bool, optional
:return: Returns True/False based on successful call.
:rtype: bool
"""
data = {
"ipv4": ipv4_list,
"ipv6": ipv6_list,
"segmentIpv4": segment_ipv4_list,
"nonDefaultRoutes": non_default_routes,
}
return self._post(
"/gms/internalSubnets2",
data=data,
expected_status=[204],
return_type="bool",
) | ce1a11f2cbdb01c81fb01a13ba3d73c7ce5d0cf6 | 3,629,200 |
def prune_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall outside a given window.
This function prunes bounding boxes that even partially fall outside the given
window. See also clip_to_window which only prunes bounding boxes that fall
completely outside the window, and clips any bounding boxes that partially
overflow.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
scope: name scope.
Returns:
pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope, 'PruneOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.less(y_min, win_y_min), tf.less(x_min, win_x_min),
tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices | 9a16f0e4e55b7588e6a59402d058eb09e3c63c2a | 3,629,201 |
from typing import Optional
def cross_section(adjacency_matrices: npt.NDArray[np.int_],
rng: Optional[np.random.Generator] = None):
"""
Parameters
----------
adjacency_matrices :
rng :
Returns
-------
"""
if rng is None:
rng = np.random.default_rng()
masks = rng.choice([0, 1], size=adjacency_matrices.shape)
masks = np.tril(masks, k=-1)
masks += np.transpose(masks, axes=(0, 2, 1))
x = adjacency_matrices[:, None, ...] & masks
y = adjacency_matrices & ~ masks
return (x | y).reshape(adjacency_matrices.shape[0] ** 2, *adjacency_matrices.shape[1:3]) | e75435a5888f89cd202b5ff8ecfce65565a58f59 | 3,629,202 |
def get_threshold_otsu(image: npt.ArrayLike, blur_sigma=5):
"""Perform Otsu's thresholding with Gaussian blur."""
skimage_gaussian = get_image_method(image, "skimage.filters.gaussian")
skimage_otsu = get_image_method(image, "skimage.filters.threshold_otsu")
image = skimage_gaussian(image, sigma=blur_sigma)
return skimage_otsu(image) | 0b1ea8e0652936697a47299743d090b0e665e1f3 | 3,629,203 |
def mask(batch_tokens, total_token_num, vocab_size, CLS=1, SEP=2, MASK=3):
"""
Add mask for batch_tokens, return out, mask_label, mask_pos;
Note: mask_pos responding the batch_tokens after padded;
"""
max_len = max([len(sent) for sent in batch_tokens])
mask_label = []
mask_pos = []
prob_mask = np.random.rand(total_token_num)
# Note: the first token is [CLS], so [low=1]
replace_ids = np.random.randint(1, high=vocab_size, size=total_token_num)
pre_sent_len = 0
prob_index = 0
for sent_index, sent in enumerate(batch_tokens):
mask_flag = False
prob_index += pre_sent_len
for token_index, token in enumerate(sent):
prob = prob_mask[prob_index + token_index]
if prob > 0.15:
continue
elif 0.03 < prob <= 0.15:
# mask
if token != SEP and token != CLS:
mask_label.append(sent[token_index])
sent[token_index] = MASK
mask_flag = True
mask_pos.append(sent_index * max_len + token_index)
elif 0.015 < prob <= 0.03:
# random replace
if token != SEP and token != CLS:
mask_label.append(sent[token_index])
sent[token_index] = replace_ids[prob_index + token_index]
mask_flag = True
mask_pos.append(sent_index * max_len + token_index)
else:
# keep the original token
if token != SEP and token != CLS:
mask_label.append(sent[token_index])
mask_pos.append(sent_index * max_len + token_index)
pre_sent_len = len(sent)
# ensure at least mask one word in a sentence
while not mask_flag:
token_index = int(np.random.randint(1, high=len(sent) - 1, size=1))
if sent[token_index] != SEP and sent[token_index] != CLS:
mask_label.append(sent[token_index])
sent[token_index] = MASK
mask_flag = True
mask_pos.append(sent_index * max_len + token_index)
mask_label = np.array(mask_label).astype("int64").reshape([-1, 1])
mask_pos = np.array(mask_pos).astype("int64").reshape([-1, 1])
return batch_tokens, mask_label, mask_pos | 3d0951770b9f6e13ce7e0ef6a100cdd95e331bed | 3,629,204 |
def get_lr_curves(
spark, features_df, cluster_ids, kernel_bandwidth, num_pdf_points, random_seed=None
):
""" Compute the likelihood ratio curves for clustered clients.
Work-flow followed in this function is as follows:
* Access the DataFrame including cluster numbers and features.
* Load same similarity function that will be used in TAAR module.
* Iterate through each cluster and compute in-cluster similarity.
* Iterate through each cluster and compute out-cluster similarity.
* Compute the kernel density estimate (KDE) per similarity score.
* Linearly down-sample both PDFs to 1000 points.
:param spark: the SparkSession object.
:param features_df: the DataFrame containing the user features (e.g. the
ones coming from |get_donors|).
:param cluster_ids: the list of cluster ids (e.g. the one coming from |get_donors|).
:param kernel_bandwidth: the kernel bandwidth used to estimate the kernel densities.
:param num_pdf_points: the number of points to sample for the LR-curves.
:param random_seed: the provided random seed (fixed in tests).
:return: A list in the following format
[(idx, (lr-numerator-for-idx, lr-denominator-for-idx)), (...), ...]
"""
# Instantiate holder lists for inter- and intra-cluster scores.
same_cluster_scores_rdd = spark.sparkContext.emptyRDD()
different_clusters_scores_rdd = spark.sparkContext.emptyRDD()
random_split_kwargs = {"seed": random_seed} if random_seed else {}
for cluster_number in cluster_ids:
# Pick the features for users belonging to the current cluster.
current_cluster_df = features_df.where(col("prediction") == cluster_number)
# Pick the features for users belonging to all the other clusters.
other_clusters_df = features_df.where(col("prediction") != cluster_number)
logger.debug(
"Computing scores for cluster", extra={"cluster_id": cluster_number}
)
# Compares the similarity score between pairs of clients in the same cluster.
cluster_half_1, cluster_half_2 = current_cluster_df.rdd.randomSplit(
[0.5, 0.5], **random_split_kwargs
)
pair_rdd = generate_non_cartesian_pairs(cluster_half_1, cluster_half_2)
intra_scores_rdd = pair_rdd.map(lambda r: similarity_function(*r))
same_cluster_scores_rdd = same_cluster_scores_rdd.union(intra_scores_rdd)
# Compares the similarity score between pairs of clients in different clusters.
pair_rdd = generate_non_cartesian_pairs(
current_cluster_df.rdd, other_clusters_df.rdd
)
inter_scores_rdd = pair_rdd.map(lambda r: similarity_function(*r))
different_clusters_scores_rdd = different_clusters_scores_rdd.union(
inter_scores_rdd
)
# Determine a range of observed similarity values linearly spaced.
all_scores_rdd = same_cluster_scores_rdd.union(different_clusters_scores_rdd)
stats = all_scores_rdd.aggregate(
StatCounter(), StatCounter.merge, StatCounter.mergeStats
)
min_similarity = stats.minValue
max_similarity = stats.maxValue
lr_index = np.arange(
min_similarity,
max_similarity,
float(abs(min_similarity - max_similarity)) / num_pdf_points,
)
# Kernel density estimate for the inter-cluster comparison scores.
kd_dc = KernelDensity()
kd_dc.setSample(different_clusters_scores_rdd)
kd_dc.setBandwidth(kernel_bandwidth)
denominator_density = kd_dc.estimate(lr_index)
# Kernel density estimate for the intra-cluster comparison scores.
kd_sc = KernelDensity()
kd_sc.setSample(same_cluster_scores_rdd)
kd_sc.setBandwidth(kernel_bandwidth)
numerator_density = kd_sc.estimate(lr_index)
# Structure this in the correct output format.
return list(zip(lr_index, list(zip(numerator_density, denominator_density)))) | 2d00cfe1204a814c3782344af762f5fc5b398d5f | 3,629,205 |
def orred_filter_list(prefix, subfilters):
"""Produces Q-object for a list of dicts
Each dict's (key: value) pairs are ANDed together (rows must satisfy all k:v)
List items are ORred together (satisfying any one is enough)
"""
result = Q()
for filter in subfilters:
subresult = Q()
for (key, values) in filter.items():
subresult &= filter_on(prefix, key, values)
result |= subresult
return result | 81bea3472ea975fa84431b801e11ebdb108c2b1e | 3,629,206 |
import unittest
def get_unittests():
""" Return all of the unit tests """
directory_of_tests=get_testdirectory()
basic_suite = unittest.TestLoader().discover(directory_of_tests,pattern='basic_tests_*.py')
advanced_suite = unittest.TestLoader().discover(directory_of_tests, pattern='advanced_tests_*.py')
return [basic_suite, advanced_suite] | 53a613a1c9463d3a0d239267b1cfa10f8d9a5625 | 3,629,207 |
def become_mapper_(mapper_idx, map_fun, input_files, n_mappers, n_reducers,
map_begin, map_end, unit_fun, reduce_factory,
load_balancing_scheme, input_files_delimiter, record_terminator,
mapper_output_template, mapper_idx_template):
"""
become mapper
(mandatory. must not be None)
mapper_idx :
map_fun :
input_files :
n_mappers :
n_reducers :
(optional. may be None)
map_begin :
map_end :
unit_fun :
reduce_factory :
load_balancing_scheme :
input_files_delimiter :
record_terminator :
mapper_output_template :
mapper_idx_template :
"""
if need_define(mapper_idx, "mapper_idx") == -1: return -1
if need_define(map_fun, "map_fun") == -1: return -1
if need_define(input_files, "input_files") == -1: return -1
if need_define(n_mappers, "n_mappers") == -1: return -1
if need_define(n_reducers, "n_reducers") == -1: return -1
# input_files = string.split(input_files, input_files_delimiter)
lb = load_balancer(input_files, load_balancing_scheme, mapper_idx, n_mappers)
output_file = mapper_output_template % mapper_idx
idx_file = mapper_idx_template % mapper_idx
R = reducers(mapper_idx, n_reducers, output_file, idx_file, unit_fun, reduce_factory)
R.ensure_open()
if map_begin: map_begin(R)
# FIXME: more flexible load balancing
for input_file,begin_pos,end_pos in lb.get_blocks():
reader = record_stream(input_file, begin_pos, end_pos, record_terminator)
reader.open()
while 1:
line,_,pos = reader.read_next_record()
if line == "": break
if 0: Ws("LINE: [%s]\n" % line)
map_fun(line, R, input_file, pos)
reader.close()
if map_end: map_end(R)
R.close()
return 0 | fbbb4fee89b5e2d69e69ee5274ff9dd6a7b504dd | 3,629,208 |
from typing import Dict
from typing import List
def execute_action(arena: ActiveArena, unit: ActiveUnit, act: Dict) -> List[Dict]:
"""
Based on the type of `act`, executes the given action.
Returns a summary of the action's effects, conforming to the action_output_schema.
Mutates the given arena and/or unit and/or their respective derived objects, in the
course of executing the action.
:param arena: an ActiveArena representing the entire scope of the current fight
:param unit: the particular ActiveUnit performing the action
:param act: the action being performed, conforming to any one of the allowed actions
defined in the action_input_schema (see api.arena.schemas)
:return: a list of each sub-action's effects, conforming to the format defined in the
action_output_schema (See api.arena.schemas)
"""
# validate
restricted_actions = unit.restricted_actions.split(',')
if act['action'] in restricted_actions:
raise ValueError(f"Unit is not allowed to perform action '{act['action']}' right now")
if act['action'] == 'equip_weapon':
return equip_weapon(arena.game, unit, act['weapon'])
elif act['action'] == 'equip_item':
return equip_item(unit, act['item'])
elif act['action'] == 'attack':
return attack(arena, unit, act['target'], act['with_weapon'], act['range'])
elif act['action'] == 'use_weapon':
return use_weapon(arena, unit, act['weapon'], act.get('target', None), act.get('extra_data', None))
elif act['action'] == 'use_item':
return use_item(arena, unit, act['item'], act.get('target', None), act.get('extra_data', None))
elif act['action'] == 'use_skill':
return use_skill(arena, unit, act['skill'], act.get('target', None), act.get('extra_data', None))
elif act['action'] == 'discard_weapon':
return discard_weapon(arena.game, unit, act['weapon'])
elif act['action'] == 'discard_item':
return discard_item(unit, act['item'])
elif act['action'] == 'wait':
return wait(arena)
else:
raise ValueError(f"'{act['action']}' is not a valid action") | c91bed4c185d31260ededc9f84b02848929d19ba | 3,629,209 |
import copy
def has_mutation(gm, example_inputs):
"""Check if the graph module has any form of mutation"""
# TODO - moco gives bad accuracy with Aliasing. gm is getting mutated in a bad way.
new_gm = copy.deepcopy(gm)
ShapeAliasingAndMutationProp(new_gm).run(*example_inputs)
for node in new_gm.graph.nodes:
if node.meta["is_mutation"] or node.meta["is_input_mutation"]:
return True
return False | 7b1a44f3389b5335f45574e22d5c9976dc4ab653 | 3,629,210 |
def comment_remove(obj_id, analyst, date):
"""
Remove an existing comment.
:param obj_id: The top-level ObjectId to find the comment to remove.
:type obj_id: str
:param analyst: The user removing the comment.
:type analyst: str
:param date: The date of the comment to remove.
:type date: datetime.datetime
:returns: dict with keys "success" (boolean) and "message" (str).
"""
comment = Comment.objects(obj_id=obj_id,
created=date).first()
if not comment:
message = "Could not find comment to remove!"
result = {'success': False, 'message': message}
elif comment.analyst != analyst:
message = "You cannot delete comments from other analysts!"
result = {'success': False, 'message': message}
else:
comment.delete()
message = "Comment removed successfully!"
result = {'success': True, 'message': message}
return result | e3c918960e28cbca567f719f53656ef80b1fb40f | 3,629,211 |
def eul2m_vector(in11, in12, in13, k1, k2, k3):
"""eul2m_vector(ConstSpiceDouble * in11, ConstSpiceDouble * in12, ConstSpiceDouble * in13, SpiceInt k1, SpiceInt k2, SpiceInt k3)"""
return _cspyce0.eul2m_vector(in11, in12, in13, k1, k2, k3) | fd3a7527698d7df289fff683fef75668bca5fa01 | 3,629,212 |
def helicsCreateMessageFederateFromConfig(config_file: str) -> HelicsMessageFederate:
"""
Create `helics.HelicsMessageFederate` from a JSON file or JSON string or TOML file.
`helics.HelicsMessageFederate` objects can be used in all functions that take a `helics.HelicsFederate` object as an argument.
**Parameters**
- **`config_file`** - A config (JSON,TOML) file or a JSON string that contains setup and configuration information.
**Returns**: `helics.HelicsMessageFederate`.
"""
f = loadSym("helicsCreateMessageFederateFromConfig")
err = helicsErrorInitialize()
result = f(cstring(config_file), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsMessageFederate(result) | 02102f97823ef2f59bb5d3b3170c6a47cc11e436 | 3,629,213 |
def chain_template_as_nx_graph(chain: ChainTemplate):
""" Convert FEDOT chain template into networkx graph object """
graph = nx.DiGraph()
node_labels = {}
for operation in chain.operation_templates:
unique_id, label = operation.operation_id, operation.operation_type
node_labels[unique_id] = label
graph.add_node(unique_id)
def add_edges(graph, chain):
for operation in chain.operation_templates:
if operation.nodes_from is not None:
for child in operation.nodes_from:
graph.add_edge(child, operation.operation_id)
add_edges(graph, chain)
return graph, node_labels | 8143f0563a9fc56c8bc02559fd90c0a65f5c6bf2 | 3,629,214 |
import os
def AbsoluteCanonicalPath(*path):
"""Return the most canonical path Python can provide."""
file_path = os.path.join(*path)
return os.path.realpath(os.path.abspath(os.path.expanduser(file_path))) | 31c9a4e6a7a52b856b0f7575fc6a231f2887fba5 | 3,629,215 |
def skip_if(expr, msg=""):
"""Skip the current substep and set _output to empty. Output
will be removed if already generated."""
if expr:
raise StopInputGroup(msg=msg, keep_output=False)
return 0 | 383b0edf96f2c088b5191a952970d6cccd481d06 | 3,629,216 |
import operator
def mergeGuideInfo(seq, startDict, pamPat, otMatches, inputPos, effScores, sortBy=None, org=None):
"""
merges guide information from the sequence, the efficiency scores and the off-targets.
creates rows with too many fields. Probably needs refactoring.
for each pam in startDict, retrieve the guide sequence next to it and score it
sortBy can be "effScore", "mhScore", "oofScore" or "pos"
"""
allEnzymes = readEnzymes()
guideData = []
guideScores = {}
hasNotFound = False
pamIdToSeq = {}
pamSeqs = list(flankSeqIter(seq.upper(), startDict, len(pamPat), True))
for pamId, pamStart, guideStart, strand, guideSeq, pamSeq, pamPlusSeq in pamSeqs:
# matches in genome
# one desc in last column per OT seq
if pamId in otMatches:
pamMatches = otMatches[pamId]
guideSeqFull = concatGuideAndPam(guideSeq, pamSeq)
mutEnzymes = matchRestrEnz(allEnzymes, guideSeq, pamSeq, pamPlusSeq)
posList, otDesc, guideScore, guideCfdScore, last12Desc, ontargetDesc, \
subOptMatchCount = \
makePosList(org, pamMatches, guideSeqFull, pamPat, inputPos)
# no off-targets found?
else:
posList, otDesc, guideScore = None, "Not found", None
guideCfdScore = None
last12Desc = ""
hasNotFound = True
mutEnzymes = []
ontargetDesc = ""
subOptMatchCount = False
seq34Mer = None
guideRow = [guideScore, guideCfdScore, effScores.get(pamId, {}), pamStart, guideStart, strand, pamId, guideSeq, pamSeq, posList, otDesc, last12Desc, mutEnzymes, ontargetDesc, subOptMatchCount]
guideData.append( guideRow )
guideScores[pamId] = guideScore
pamIdToSeq[pamId] = guideSeq
if sortBy == "pos":
sortFunc = (lambda row: row[3])
reverse = False
elif sortBy == "offCount":
sortFunc = (lambda row: len(row[9]))
reverse = False
elif sortBy is not None and sortBy!="spec":
sortFunc = (lambda row: row[2].get(sortBy, 0))
reverse = True
else:
sortFunc = operator.itemgetter(0)
reverse = True
guideData.sort(reverse=reverse, key=sortFunc)
return guideData, guideScores, hasNotFound, pamIdToSeq | bed13fdcdb6e32af64738b6d224412b34e066699 | 3,629,217 |
def get_distribution_centers_shipments(dc_id):
"""
Retrieve all shipments originating from the specified distribution center.
:param dc_id: The distribution center's id
:return: [{
"id": "123",
"status": "SHIPPED",
"createdAt": "2015-11-05T22:00:51.692765",
"updatedAt": "2015-11-08T22:00:51.692765",
"deliveredAt": "2015-11-08T22:00:51.692765",
"estimatedTimeOfArrival": "2015-11-07T22:00:51.692765",
"currentLocation": {Address},
"fromId": "123",
"toId:": "123"
}, {...}]
"""
check_null_input((dc_id, 'distribution center whose shipments you want to retrieve'))
status = request.args.get('status')
shipments = shipment_service.get_shipments(token=g.auth['loopback_token'],
dc_id=dc_id,
status=status)
return Response(shipments,
status=200,
mimetype='application/json') | 62bc07a405eb4db6bb11c29adcad0a5ab9af10f4 | 3,629,218 |
from typing import Union
from datetime import datetime
def historical_summary(date: Union[None, str, datetime] = None, filter: str = ''):
"""
https://iextrading.com/developer/docs/#historical-summary
Args:
date: fixme
filter: https://iextrading.com/developer/docs/#filter-results
Returns:
dict: result
"""
if date:
if isinstance(date, str):
return get_json('stats/historical?date=' + date, filter=filter)
elif isinstance(date, datetime):
return get_json('stats/historical?date=' + date.strftime('%Y%m'), filter=filter)
else:
raise TypeError(f"Can't handle type : {str(type(date))}. Filter: {filter}")
return get_json('stats/historical', filter=filter) | a0d2364b81dd7735dcd797f8ba9696e670224b19 | 3,629,219 |
def runIW(before, after, aoi, scl, tScl, ag):
"""
Run the complete iteratively weighted change analysis
Parameters:
before (ee.ImageCollection): images representing the reference landscape
after (ee.ImageCollection): images representing the after condition
aoi: (ee.Geometry): area of interest
scl (int): scale parameter for image statistics calculations
tileScale (int): even integer [2,12]
ag ('yes/no'): mask agricultural areas using Cultivated Lands Dataset?
Returns:
ee.Image: z-score image output of iw()
"""
CDL = ee.Image("USDA/NASS/CDL/2019")
DEM = ee.Image("USGS/SRTMGL1_003")
demMask = DEM.select(['elevation']).lte(3500)
agMask = CDL.select(['cultivated']).eq(1)
rgbn = ['B2', 'B3', 'B4', 'B8', 'B11', 'B12']
time1 = after
time2 = before
recent = time1.median().clip(aoi)
past = time2.median().clip(aoi)
recent = ee.Image(ee.Algorithms.If(ag == 'yes', recent.updateMask(agMask.And(demMask)), recent.updateMask(demMask)))
past = ee.Image(ee.Algorithms.If(ag == 'yes', past.updateMask(agMask.And(demMask)), past.updateMask(demMask)))
now = ND(recent, 'B8', 'B4', 'B3', 'B11', 'B12')
old = ND(past, 'B8', 'B4', 'B3', 'B11', 'B12')
# bands = now.bandNames()
# list = bands.getInfo()
# print('now bands:', bands.getInfo())
# CREATE IMAGE WITH BANDS FOR CHANGE METRICS CV, RCV, NDVI, NBR, NDSI
# Calculate cv from before and after images
cv = CV(old, now, rgbn, aoi, scl, tScl)
# Calculate rcv from before and after images
rcv = rcvmax(old, now, rgbn, aoi, scl, tScl)
#bands = rcv.bandNames()
#list = bands.getInfo()
#print('rcv bands:', rcv.bandNames().getInfo())
# Calculate combined normalized difference metrics from before and after images
diff = d(old, now, ['ndvi', 'ndsi', 'ndwi', 'nbr'])
#bands = diff.bandNames()
#list = bands.getInfo()
#print('diff bands:', bands.getInfo())
# Combine cv, rcv, and normalized difference images into single image
change = cv.addBands(diff).addBands(rcv)
#bands = change.bandNames()
#list = bands.getInfo()
#print('change bands:', bands.getInfo())
# zchange not used, but still need to call zp
#zchange = calc_zp(change, aoi, 30)
iwchange = iw(change, aoi, 10, scl, tScl)
#bands = iwchange.bandNames()
#list = bands.getInfo()
#print('iwchange bands:', bands.getInfo())
return iwchange | 67261ee476727329c0144a3f416e8fb1c40828c3 | 3,629,220 |
def normalize_text(text, norm_type="stemming", pos=False, pipeline=None):
"""
Preprocess text data
:param str text:
:param str norm_type: "stemming" or "lemmatization" of None
:param bool pos: Only for lemmatization. If True add tags to tokens like "_NOUN"
:param Pipeline pipeline: for lemmatization
:return str text:
"""
text = clean_html(text)
text = text.lower()
lang = detect_language(text)
text = nltk.word_tokenize(text)
# Excluding Stop-words
text = [word for word in text if
word not in stopwords.words(lang) and word.isalpha()]
if lang not in SnowballStemmer.languages:
lang = "english"
if isinstance(norm_type, type(None)):
pass
elif norm_type.lower() == "lemmatization":
text = lemmatization(text, lang, pos, pipeline)
elif norm_type.lower() == "stemming":
text = stemming(text, lang)
text = " ".join(text)
return text | 8c98cd3e931ec762c3b1b128c066aa34d4fc9e74 | 3,629,221 |
def make_field(
name: str,
dimensions: FieldDimensions,
is_temporary: bool = False
) -> Field:
""" Create a Field
:param name: Name of the field
:param dimensions: dimensions of the field (use make_field_dimensions_*)
:param is_temporary: Is it a temporary field?
"""
field = Field()
field.name = name
field.is_temporary = is_temporary
field.field_dimensions.CopyFrom(dimensions)
return field | d01f9436a563ac911ba097c0f4a2918db8e4e0d4 | 3,629,222 |
from operator import add
def expand_to_point(b1, p1):
"""
Expand bbox b1 to contain p1: [(x,y),(x,y)]
"""
for p in p1:
b1 = add(b1, (p[0], p[1], p[0], p[1]))
return b1 | 5a79646403f7f9c2397aadb4f1826d8309eb8dcb | 3,629,223 |
import collections
def get_closing_bracket(string, indice_inicio):
"""Retorna o indice da '}' correspondente a '{' no indice recebido."""
if string[indice_inicio] != '{':
raise ValueError("String invalida")
deque = collections.deque()
for atual in range(indice_inicio, len(string)):
if string[atual] == '}' and string[atual-1] != '\\':
deque.popleft()
elif string[atual] == '{' and string[atual-1] != '\\':
deque.append(string[indice_inicio])
if not deque:
return atual # O '}' correpondente foi encontrado
raise ValueError("String invalida") | 5a865de5f5d3589e04f1c1e50f817ec20d8e712f | 3,629,224 |
def read_matrix():
"""Returns a matrix from the input integers, spit by ', '"""
n = int(input())
matrix = []
for _ in range(n):
row = []
for x in input().split(', '):
row.append(int(x))
matrix.append(row)
return matrix | 7bd1e72fbf6c871324a02b0e11a2f10c2830bed2 | 3,629,225 |
def format_time_trigger_string(timer_instance):
"""
:param timer_instance: either instance of RepeatTimer or EventClock
:return: human-readable and editable string in one of two formats:
- 'at Day_of_Week-HH:MM, ..., Day_of_Week-HH:MM'
- 'every NNN'
"""
if isinstance(timer_instance, RepeatTimer):
return TRIGGER_PREAMBLE_EVERY + str(timer_instance.interval_new)
elif isinstance(timer_instance, EventClock):
timestamps = [repr(x) for x in timer_instance.timestamps]
return TRIGGER_PREAMBLE_AT + ','.join(timestamps)
else:
raise ValueError('Unknown timer instance type %s' % timer_instance.__class__.__name__) | 8d2f5399f4b96b3855d18b78c809bad6b90fb5df | 3,629,226 |
import importlib
def get_class_from_string(class_string: str):
"""Get class or function instance from a string, interpreted as Python module.
:param class_string:
:return:
"""
class_name = class_string.split(".")[-1]
module = class_string.replace(f".{class_name}", "")
lib = importlib.import_module(module)
return getattr(lib, class_name) | 5ffb49c23c815b4d3511b93a97a8a9aad4e30adb | 3,629,227 |
def safe_power(a, b): # pylint: disable=invalid-name
"""a limited exponent/to-the-power-of function, for safety reasons"""
if abs(a) > MAX_POWER or abs(b) > MAX_POWER:
raise NumberTooHigh("Sorry! I don't want to evaluate {0} ** {1}".format(a, b))
return a**b | 10aa89e299b75a361b0842d9acf56cf6390ca160 | 3,629,228 |
def ConcateMatching(vec1, vec2):
"""
ConcateMatching
"""
#TODO: assert shape
return fluid.layers.concat(input=[vec1, vec2], axis=1) | e54350fd17c4dc12cbb75f8146bab4c072c9523a | 3,629,229 |
import copy
import random
def defaults( d= cli(
cohen = .3
,data = "data/weather.csv"
,far = .9
,k = 1
,m = 2
,mostrest = 3
,p = 2
,seed = 1
,train = .66
,tiny = .6
)):
"""Calling `default` will return a fresh copy of the defaults
(optionally updated via command-line flags), and will reset
the random number seed to the `seed` value shown above."""
d = copy.deepcopy(obj(**d))
random.seed(d.seed)
return d | 47fd9146fb6256123b6281897f2a0ea802fe2da4 | 3,629,230 |
import itertools
def _racemization(compound, max_centers=3, carbon_only=True):
"""Enumerates all possible stereoisomers for unassigned chiral centers.
:param compound: A compound
:type compound: rdMol object
:param max_centers: The maximum number of unspecified stereocenters to
enumerate. Sterioisomers grow 2^n_centers so this cutoff prevents lag
:type max_centers: int
:param carbon_only: Only enumerate unspecified carbon centers. (other
centers are often not tautomeric artifacts)
:type carbon_only: bool
:return: list of stereoisomers
:rtype: list of rdMol objects
"""
new_comps = []
# FindMolChiralCenters (rdkit) finds all chiral centers. We get all
# unassigned centers (represented by "?" in the second element
# of the function's return parameters).
unassigned_centers = [c[0] for c in AllChem.FindMolChiralCenters(
compound, includeUnassigned=True) if c[1] == "?"]
# Get only unassigned centers that are carbon (atomic number of 6) if
# indicated
if carbon_only:
unassigned_centers = list(
filter(lambda x: compound.GetAtomWithIdx(x).GetAtomicNum() == 6,
unassigned_centers))
# Return original compound if no unassigned centers exist (or if above
# max specified (to prevent lag))
if not unassigned_centers or len(unassigned_centers) > max_centers:
return [compound]
for seq in itertools.product([1, 0], repeat=len(unassigned_centers)):
for atomId, cw in zip(unassigned_centers, seq):
# cw - Clockwise; ccw - Counterclockwise
# Get both cw and ccw chiral centers for each center. Used
# itertools.product to get all combinations.
if cw:
compound.GetAtomWithIdx(atomId).SetChiralTag(
AllChem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW)
else:
compound.GetAtomWithIdx(atomId).SetChiralTag(
AllChem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW)
# Duplicate C++ object so that we don't get multiple pointers to
# same object
new_comps.append(deepcopy(compound))
return new_comps | ecba340db624c48ba3518f101e703f803fb4133a | 3,629,231 |
def stop_gradient(variables):
"""Returns `variables` but with zero gradient with respect to every other
variables.
"""
return KerasSymbol(mx.sym.BlockGrad(variables.symbol)) | 9a75aa0abccd1173005cfd3b98927a3d8b2bc3a2 | 3,629,232 |
def get_repartition_emission_pies(data):
"""
This function will create a figure with 3 pies describing the repartition per building of the emission for electricity / gas / total
"""
fig = make_subplots(
rows=1,
cols=3,
specs=[[{"type": "domain"}, {"type": "domain"}, {"type": "domain"}]],
subplot_titles=["Électricité", "Gaz", "Emission totale"],
)
# Grouping by the building name
data = (
data.groupby("Nom du bien")[
[
oh.column_names["consumption"]["gas"],
oh.column_names["consumption"]["electricity"],
oh.column_names["consumption"]["total"],
oh.column_names["emission"]["gas"],
oh.column_names["emission"]["electricity"],
oh.column_names["emission"]["total"],
]
]
.sum()
.reset_index()
)
# Pie for electricity
fig.add_trace(
go.Pie(
values=data[oh.column_names["emission"]["electricity"]].values,
customdata=data[oh.column_names["consumption"]["electricity"]],
labels=data["Nom du bien"],
scalegroup="one",
name="Électricité",
),
1,
1,
)
# Pie for gaz
fig.add_trace(
go.Pie(
labels=data["Nom du bien"],
values=data[oh.column_names["emission"]["gas"]].values,
customdata=data[oh.column_names["consumption"]["gas"]],
scalegroup="one",
name="Gaz",
),
1,
2,
)
# Pie for total
fig.add_trace(
go.Pie(
labels=data["Nom du bien"],
values=data[oh.column_names["emission"]["total"]].values,
customdata=data[oh.column_names["consumption"]["total"]],
scalegroup="one",
name="Emission totale",
),
1,
3,
)
# Figure settings
fig.update_traces(
hole=0.4,
textposition="inside",
hovertemplate="%{label}<br>%{value:.0f} kgCO2e<br>%{customdata[0]:.0f} kWh<br>(%{percent})",
)
fig.update_layout(uniformtext_minsize=12, uniformtext_mode="hide")
fig.update_layout(plot_bgcolor="white", template="plotly_white")
return fig | e8210783e3df0b79c00c189f39e6f74151c3993f | 3,629,233 |
def get_next_video_id(database_session: Session):
"""Returns what the next vid id will be"""
return database_session.query(func.max(models.Video.id)).scalar() + 1 | f176cc94f2683df8e6c9cde52f53a4097a2ed2c9 | 3,629,234 |
import os
def system_supports_plotting():
"""
Check if x server is running
Returns
-------
system_supports_plotting : bool
True when on Linux and running an xserver. Returns None when
on a non-linux platform.
"""
try:
if os.environ['ALLOW_PLOTTING'].lower() == 'true':
return True
except KeyError:
pass
try:
p = Popen(["xset", "-q"], stdout=PIPE, stderr=PIPE)
p.communicate()
return p.returncode == 0
except:
return False | 42af0e67348df85bbe7bc32dd50faa65be95b4c5 | 3,629,235 |
def parse_args():
"""Command-line argument parser for testing."""
# New parser
parser = ArgumentParser(description='Noise2Noise adapted to X-ray microtomography')
# Parameters
parser.add_argument('-d', '--data', help='dataset root path', default='../data')
parser.add_argument('--load-ckpt', help='load model checkpoint')
parser.add_argument('-x', '--axis', help='Axis along which slices will be taken', type=int)
parser.add_argument('-cs', '--crop-size', help='Size of the cropped image', type=int)
parser.add_argument('-nc', '--n-crops', help='Number of random crops from a single image', type=int)
parser.add_argument('-b', '--batch-size', help='minibatch size', default=4, type=int)
parser.add_argument('-s', '--seed', help='fix random seed', type=int)
parser.add_argument('--cuda', help='use cuda', action='store_true')
parser.add_argument('--show-output', help='pop up window to display outputs', default=0, type=int)
return parser.parse_args() | eda63b9e9b90f26a02e8cf5c3c6e85d1161385a6 | 3,629,236 |
def sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value=0):
"""Converts a sparse representation into a dense tensor.
Example::
- sparse_to_dense([[0, 0], [1, 1]], [2, 2], [3, 3], 0) = [[3, 0], [0, 3]]
Parameters
----------
sparse_indices : tvm.te.Tensor
A 0-D, 1-D, or 2-D tensor of integers containing location of sparse values.
output_shape : A list of integers
Shape of the dense output tensor.
sparse_values : tvm.te.Tensor
A 0-D or 1-D tensor containing the sparse values for the sparse indices.
default_value : tvm.te.Tensor
A 0-D tensor containing the default value for the remaining locations.
Defaults to 0.
Returns
-------
result : tvm.te.Tensor
Dense tensor of shape output_shape. Has the same type as sparse_values.
"""
return cpp.sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value) | 672a4190086d2086fa9ac93cecc6a8f1f02825ce | 3,629,237 |
def os_supported():
"""Check if current OS is supported
Returns:
bool
"""
return is_os('Windows') | 8d144f884cb4ba0df9d2f27186c8bed0bca48cf1 | 3,629,238 |
def put_path_to_db(req_path: ReqPathPutTransact):
"""Put learning path to DynamoDB"""
try:
transact_items = path_input.transact_update_path(path=req_path)
# return transact_items
res = db.client.transact_write_items(
ReturnConsumedCapacity="INDEXES", TransactItems=transact_items
)
return res
except ClientError as err:
err_message = err.response["Error"]["Message"]
raise HTTPException(status_code=404, detail=err_message)
except BaseException as err:
raise HTTPException(status_code=404, detail=str(err)) | a9e36870c3922362aca050abe683fc681402e7a0 | 3,629,239 |
import zlib
def get_hash(value, max_hash):
"""Calculate split hash factor"""
return zlib.adler32(str(value).encode()) % max_hash + 1 | 55a703997e4a8bc852def35d0cd418f009998f7e | 3,629,240 |
import shutil
def copy_proto_go_source(target, source, env):
"""Copy go source file generated by protobuf into go standard directory. """
shutil.copy2(str(source[0]), str(target[0]))
return None | 33a10c78ec3db952a738bed523fa333e0c60cb4e | 3,629,241 |
def add_credentials(request):
"""
Create credentials for SolarWinds integration.
"""
action_url = reverse('add_credentials')
if request.method == 'POST':
form = SolarWindsConectionForm(request.POST)
if form.is_valid():
form.save()
msg = "The SolarWinds credentials have been saved."
messages.success(request, msg)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
else:
form = SolarWindsConectionForm()
return {
'title': 'Add SolarWinds Connection Info Rest',
'form': form,
'use_ajax': True,
'action_url': action_url,
'top_content': "SolarWinds credentials",
'submit': 'Save',
} | dbf26f7b1c7cc9d9b61a1d2f164849ff93792e2e | 3,629,242 |
def parsePifKey(pif, key):
"""Parse a single pif key for single scalar values; return nan if no scalar found.
:param pif: PIF to access
:type pif: pif
:param key: key to access data
:type key: string
:returns: scalar value or np.nan
:rtype:
"""
if (key in ReadView(pif).keys()):
if 'scalars' in dir(ReadView(pif)[key]):
try:
return ReadView(pif)[key].scalars[0].value
except IndexError:
return np.nan
else:
return np.nan
else:
return np.nan | f7db7ac5b05573bf3d03b268c05d92ccf884a577 | 3,629,243 |
def check_multimetric_scoring(estimator, scoring):
"""Check the scoring parameter in cases when multiple metrics are allowed.
Parameters
----------
estimator : sklearn estimator instance
The estimator for which the scoring will be applied.
scoring : list, tuple or dict
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
See :ref:`multimetric_grid_search` for an example.
Returns
-------
scorers_dict : dict
A dict mapping each scorer name to its validated scorer.
"""
err_msg_generic = (
f"scoring is invalid (got {scoring!r}). Refer to the "
"scoring glossary for details: "
"https://scikit-learn.org/stable/glossary.html#term-scoring"
)
if isinstance(scoring, (list, tuple, set)):
err_msg = (
"The list/tuple elements must be unique " "strings of predefined scorers. "
)
invalid = False
try:
keys = set(scoring)
except TypeError:
invalid = True
if invalid:
raise ValueError(err_msg)
if len(keys) != len(scoring):
raise ValueError(
f"{err_msg} Duplicate elements were found in"
f" the given list. {scoring!r}"
)
elif len(keys) > 0:
if not all(isinstance(k, str) for k in keys):
if any(callable(k) for k in keys):
raise ValueError(
f"{err_msg} One or more of the elements "
"were callables. Use a dict of score "
"name mapped to the scorer callable. "
f"Got {scoring!r}"
)
else:
raise ValueError(
f"{err_msg} Non-string types were found "
f"in the given list. Got {scoring!r}"
)
scorers = {
scorer: check_scoring(estimator, scoring=scorer) for scorer in scoring
}
else:
raise ValueError(f"{err_msg} Empty list was given. {scoring!r}")
elif isinstance(scoring, dict):
keys = set(scoring)
if not all(isinstance(k, str) for k in keys):
raise ValueError(
"Non-string types were found in the keys of "
f"the given dict. scoring={scoring!r}"
)
if len(keys) == 0:
raise ValueError(f"An empty dict was passed. {scoring!r}")
scorers = {
key: check_scoring(estimator, scoring=scorer)
for key, scorer in scoring.items()
}
else:
raise ValueError(err_msg_generic)
return scorers | 8ca3e82e63631d3e8642cf88e5a0c5d4faa9f522 | 3,629,244 |
def dummy_workflow():
"""Return dummy Snakemake workflow object"""
mock_workflow = MagicMock()
return mock_workflow | 8400c6fbd1851e666676945e84413735090371c0 | 3,629,245 |
from distributed import MultiLock
import contextlib
def get_multi_lock_or_null_context(multi_lock_context, *args, **kwargs):
"""Return either a MultiLock or a NULL context
Parameters
----------
multi_lock_context: bool
If True return MultiLock context else return a NULL context that
doesn't do anything
*args, **kwargs:
Arguments parsed to the MultiLock creation
Returns
-------
context: context
Either `MultiLock(*args, **kwargs)` or a NULL context
"""
if multi_lock_context:
return MultiLock(*args, **kwargs)
else:
return contextlib.nullcontext() | 962f612367158c3f27364b5ec9cd4460c208d248 | 3,629,246 |
import os
def load_cme_scenarios():
"""
Load in the CME scenarios from their HDF5 file and return them in a dictionary.
"""
project_dirs = get_project_dirs()
datafile_path = os.path.join(project_dirs['out_data'], 'CME_scenarios.hdf5')
datafile = h5py.File(datafile_path, 'r')
cme_scenarios = {}
for key in datafile.keys():
cme = datafile[key]
speed = cme['speed'][()] * u.Unit(cme['speed'].attrs['unit'])
width = cme['width'][()] * u.Unit(cme['width'].attrs['unit'])
cme_scenarios[key] = {'speed': speed, 'width': width}
datafile.close()
return cme_scenarios | 7081870b8bfcc28e5132a1a5e1bbdff4f36752c3 | 3,629,247 |
def argument_decorator(f):
"""Decorates a function to create an annotation for adding parameters
to qualify another.
.. literalinclude:: /../examples/argdeco.py
:lines: 5-25
"""
return parser.use_mixin(
DecoratedArgumentParameter, kwargs={'decorator': f}) | 2ce86145d605cbf211d9fc7170b6721311b8c146 | 3,629,248 |
async def get_open_api_endpoint(api_key: APIKey = Depends(get_api_key)):
"""To check if my authorisation process was succesfull"""
return "Certification is accepted" | ecfb36f6f9262425dc06f005569e927c5e7803fd | 3,629,249 |
def ReadMaleResp2015():
"""Reads respondent data from NSFG Cycle 9.
returns: DataFrame
"""
usecols = ['caseid', 'mardat01', 'cmdivw', 'cmbirth', 'cmintvw',
'evrmarry', 'wgt2013_2015',
'marend01', 'rmarital', 'fmarno', 'mar1diss']
df = ReadResp('2013_2015_MaleSetup.dct',
'2013_2015_MaleData.dat.gz',
usecols=usecols)
df['cmmarrhx'] = df.mardat01
df['evrmarry'] = (df.evrmarry==1)
df['divorced'] = (df.marend01==1)
df['separated'] = (df.marend01==2)
df['widowed'] = (df.marend01==3)
df['stillma'] = (df.fmarno == 1) & (df.rmarital==1)
df['finalwgt'] = df.wgt2013_2015
df['cycle'] = 9
CleanResp(df)
return df | 47b89995c8064126af9ff3d91d8f303dd8145628 | 3,629,250 |
def keyword_list(request):
"""
This is a view that will show all the keywords.
This view should also show the number of datasets for each keyword.
Maybe that is a template problem.
"""
k = Keyword.objects.all()
if "q" in request.GET:
q = request.GET["q"]
keyword_list = k.filter(
keyword__icontains=q).order_by("keyword")
else:
keyword_list = k.order_by("keyword")
total_keywords = len(keyword_list)
context = {"keyword_list": keyword_list, "total_keywords": total_keywords}
template_name = "keywords/keyword_list.html"
return render(request, template_name, context) | 0548dec3226c20b69157780701ccb72782615f4f | 3,629,251 |
import dateutil
def parse_date(datestr):
""" Parses an ISO 8601 formatted date from Gophish """
return dateutil.parser.parse(datestr) | 6063266dae4264b1c889d0570f23c1a4cf6cd26c | 3,629,252 |
def block_resnet152(pretrained=False, progress=True, device='cpu', **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _block_resnet('block_resnet152', BlockBottleneck, [3, 8, 36, 3], pretrained, progress, device,
4, **kwargs) | 8a2c4c8f4d579729432b81412eceae8295c12aad | 3,629,253 |
def encode_categorical_features(dataframe, categorical_features):
"""Encode categorical features and add to the column of the dataframe."""
transformations = []
# Ordinal encoding
ordinal_encoded_output_cols = ["ordinal_indexed_"+categorical_feature for categorical_feature in categorical_features]
indexer = StringIndexer(inputCols=categorical_features, outputCols=ordinal_encoded_output_cols, handleInvalid="keep")
dataframe = indexer.fit(dataframe).transform(dataframe)
transformations.append(indexer)
# One-Hot-Encoding
one_hot_encoded_output_cols = ["one_hot_encoded_"+categorical_feature for categorical_feature in categorical_features]
encoder = OneHotEncoder(inputCols=ordinal_encoded_output_cols,
outputCols=one_hot_encoded_output_cols)
model = encoder.fit(dataframe)
dataframe = model.transform(dataframe)
transformations.append(encoder)
return dataframe, one_hot_encoded_output_cols, transformations | 4a80a2d75f3f44a08e2fcb463385d1c775e84e46 | 3,629,254 |
def gen_nondeferred_mock(return_value=_sentinel, func_dict=None, spec=None, name='NDMock',
side_effect=_sentinel):
"""
Get a mock which cannot be mistaken for a Deferred
@param return_value : A return value, passed directly to the Mock constructor if set
@param func_dict: A dict of function-name : return_value to attach to the mock
"""
kwargs = {'name' : name}
if not return_value == _sentinel:
kwargs['return_value'] = return_value
if not side_effect == _sentinel:
kwargs['side_effect'] = side_effect
if spec is not None:
kwargs['spec'] = spec
m = MagicMock(**kwargs)
#pylint: disable=unused-argument
def notimpl(*args, **kwargs):
raise NotImplementedError('You treated a Mock like a Deferred!')
m.addCallback = notimpl
m.addErrback = notimpl
m.addBoth = notimpl
m.addCallbacks = notimpl
if func_dict is not None:
for fn, res in func_dict.items():
setattr(m, fn, gen_nondeferred_mock(return_value=res))
return m | 2efe00e3bf0d24d12b506d67515633883cf88b3b | 3,629,255 |
import os
def npm_localfile(package, version):
"""Get the local filename of a npm package"""
return os.path.join("npm2", npm_filename(package, version)) | 35edec7ef271425d5211cd546a246dd8f5ddbcb0 | 3,629,256 |
import re
def url2domain(url):
""" extract domain from url
"""
parsed_uri = urlparse.urlparse(url)
domain = '{uri.netloc}'.format(uri=parsed_uri)
domain = re.sub("^.+@", "", domain)
domain = re.sub(":.+$", "", domain)
return domain | 193521f9beded8ad22999f42dd2e8c1476cc1534 | 3,629,257 |
def rhythm_track(file_path: PathType) -> dict:
"""Perform rhythm track analysis of given audio file.
Args:
file_path: Path to audio file.
Returns:
Rhythm track parameters and data.
"""
snd = load_audio(file_path)
onsets = FluxOnsetDetector(snd.data, snd.fps)
segs = segment.by_onsets(snd.data, 2**11, onsets.index())
spctr = Spectrum(segs, snd.fps, window='hamming')
onsets_features = {
'peaks': onsets.peaks,
'index': onsets.index(),
'times': onsets.times(snd.fps)
}
track_data = {
'meta': {'source': file_path, 'time_stamp': time_stamp()},
'params': {'onsets': onsets.params(), 'spectrum': spctr.params()},
'features': {'onsets': onsets_features,
'spectrum': spctr.extract().as_dict()}
}
return track_data | c7ed8e9ed1af3584da781ecb122f71eec3ba9b63 | 3,629,258 |
import os
def get_package_dbpath():
"""Return the default database path"""
return os.path.join(os.path.abspath(os.path.dirname(__file__)), DBNAME) | 04641c162e66c1e50f1638493257ded893925910 | 3,629,259 |
def repo(request, repo_id):
"""Show repo page and handle POST request to decrypt repo.
"""
repo = get_repo(repo_id)
if not repo:
raise Http404
if request.method == 'GET':
return render_repo(request, repo)
elif request.method == 'POST':
form = RepoPassowrdForm(request.POST)
next = get_next_url_from_request(request) or reverse('repo',
args=[repo_id])
if form.is_valid():
return HttpResponseRedirect(next)
else:
return render_to_response('decrypt_repo_form.html', {
'repo': repo,
'form': form,
'next': next,
'force_server_crypto': FORCE_SERVER_CRYPTO,
}, context_instance=RequestContext(request)) | 8adac803f4bf7785e5c79db56447824ecd2c6a2e | 3,629,260 |
def plot_predicted_treatment_effect(cf, figsize, npoints, num_workers):
"""Plot the predicted treatment effect from a Causal Forest.
Args:
cf (CausalForest): Fitted Causal Forest.
figsize (tuple): The figure size.
npoints (int): Number of points for meshgrid.
num_workers (int): Number of workers for parallelization.
Returns:
ax (matplotlib.axis): The finished plot.
"""
X, Y = _construct_meshgrid(npoints=npoints)
Z = _predicted_treatment_effect_on_meshgrid(X, Y, cf, num_workers)
ax = plot_3d_func(X, Y, Z, "Predicted Treatment Effect", figsize)
return ax | b36ef7359bd4d899c6b8b65330684659d6627758 | 3,629,261 |
import sys
def create_progress_bar(total, desc, **kwargs):
"""Helper creating a progress bar instance for a given parameters set.
The bar should be closed by calling close() method.
"""
return ProgressBar(
total=total,
desc=desc,
# XXX: click.get_text_stream or click.get_binary_stream don't
# work well with tqdm on Windows and Python 3
file=sys.stdout,
# helps to update bars on resizing terminal
dynamic_ncols=True,
# miniters improves progress on erratic updates caused by network
miniters=1,
**kwargs
) | eaf49d5644f59cbce5c8ffb433c71602f3c6433d | 3,629,262 |
def central_angle_names(zma):
""" distance coordinate names
"""
return vmat.central_angle_names(vmatrix(zma)) | 1fbb687e01e77489af822d9533bd784b8035300a | 3,629,263 |
def cylinder_circles(nodeA, nodeB, radius, element_number=10):
"""
Return list of two circles with defined parameters.
"""
vector = (np.array(nodeA) - np.array(nodeB)).tolist()
ptsA = circle(nodeA, vector, radius, element_number)
ptsB = circle(nodeB, vector, radius, element_number)
return ptsA, ptsB | a5d60cc5f1db67f8c9afef0ecc6d504734e65cfd | 3,629,264 |
def get_at_index(obj, index):
"""Возвращает объект списка с определенным индексом.
Индексация списка 1...n
"""
try:
return obj[index - 1]
except IndexError:
return None | 8a70a6b7cff6bcaff173a5ebd258d74d271964ca | 3,629,265 |
def mass_distance_matrix(ts, query, w):
"""
Computes a distance matrix using mass that is used in mpdist_vector
algorithm.
Parameters
----------
ts : array_like
The time series to compute the matrix for.
query : array_like
The time series to compare against.
w : int
The window size.
Returns
-------
array_like : dist_matrix
The MASS distance matrix.
"""
subseq_num = len(query) - w + 1
distances = []
for i in range(subseq_num):
distances.append(np.real(mass2(ts, query[i:i + w])))
return np.array(distances) | e301f3a28ac08893623fff5034c2d319705c6844 | 3,629,266 |
def create_tube(inner_radius=0.5, outer_radius=1.0, height=1.0, slices=64, stacks=64):
"""generates the vertices, normals, and indices for a tube mesh
:param inner_radius: tube inner radius
:type inner_radius: float
:param outer_radius: tube outer radius
:type outer_radius: float
:param height: tube height
:type height: float
:param slices: number of radial segments to use
:type slices: int
:param stacks: number of height segments to use
:type stacks: int
:return: The vertices, normals and index array of the mesh
:rtype: Mesh
"""
inner_cylinder = create_cylinder(inner_radius, height, slices, stacks, closed=False)
outer_cylinder = create_cylinder(outer_radius, height, slices, stacks, closed=False)
v_1 = outer_cylinder.vertices
v_2 = inner_cylinder.vertices
n_1 = outer_cylinder.normals
n_2 = inner_cylinder.normals
i_1 = outer_cylinder.indices
# fix face windings for inner cylinder
temp = inner_cylinder.indices.reshape(-1, 3)
i_2 = temp[:, ::-1].flatten()
vertex_count = slices * (stacks + 1)
vertices = np.vstack((v_1, v_2,
v_1[:slices, :], v_2[:slices, :], # vertices for top face
v_1[-slices:, :], v_2[-slices:, :])) # vertices for bottom face
normals = np.vstack((n_1, -1 * n_2,
np.tile([0.0, 1.0, 0.0], (slices * 2, 1)), # normals for top face
np.tile([0.0, -1.0, 0.0], (slices * 2, 1)))) # normals for bottom face
indices = np.concatenate((i_1, vertex_count + i_2))
vertex_count *= 2
# Add caps to the pipe
for x in range(2):
a = vertex_count + np.arange(slices)
b = slices + a
d = vertex_count + (np.arange(1, slices + 1) % slices)
c = slices + d
order = [d, b, a, d, c, b] if x == 0 else [a, b, d, b, c, d]
temp = np.column_stack(order).flatten()
indices = np.concatenate((indices, temp))
vertex_count += slices * 2
return Mesh(vertices.astype(np.float32), indices, normals.astype(np.float32)) | ebf1f125e1e060fb0082351e3958316059408d86 | 3,629,267 |
import requests
import json
def get_room_id(room_name):
"""
This function will find the Spark room id based on the {room_name}
Call to Spark - /rooms
:param room_name: The Spark room name
:return: the Spark room Id
"""
payload = {'title': room_name}
room_number = None
url = SPARK_URL + '/rooms'
header = {'content-type': 'application/json', 'authorization': SPARK_AUTH}
room_response = requests.get(url, data=json.dumps(payload), headers=header, verify=False)
room_list_json = room_response.json()
room_list = room_list_json['items']
for rooms in room_list:
if rooms['title'] == room_name:
room_number = rooms['id']
return room_number | f859212e994ae39612a0510adf86735269b69992 | 3,629,268 |
def openstack_ceilometer(today, **kwargs):
"""
Pricing plugin for openstack ceilometer.
"""
clear_ceilometer_stats(today)
new = total = 0
for site in settings.OPENSTACK_CEILOMETER:
logger.info(
"Processing OpenStack ceilometer {}".format(site['WAREHOUSE'])
)
try:
warehouse = Warehouse.objects.get(name=site['WAREHOUSE'])
except Warehouse.DoesNotExist:
logger.error('Invalid warehouse: {}'.format(
site['WAREHOUSE']
))
continue
usages = get_ceilometer_usages(today, site['CEILOMETER_CONNECTION'])
site_new, site_total = save_ceilometer_usages(usages, today, warehouse)
logger.info(
'{} new, {} total ceilometer usages saved for {}'.format(
site_new,
site_total,
site['WAREHOUSE'],
)
)
new += site_new
total += site_total
return True, 'Ceilometer usages: {} new, {} total'.format(new, total) | e3eed79b8231f8d718f5e51d824eb93e565fb7c2 | 3,629,269 |
def spkezp_vector(k1, in11, str1, str2, k2):
"""spkezp_vector(SpiceInt k1, ConstSpiceDouble * in11, ConstSpiceChar * str1, ConstSpiceChar * str2, SpiceInt k2)"""
return _cspyce0.spkezp_vector(k1, in11, str1, str2, k2) | 5d044d5c9cc8cc329dd2d8d7ad5352898c963469 | 3,629,270 |
import math
def rowSpacing(beta, sazm, lat, lng, tz, hour, minute):
"""
This method determines the horizontal distance D between rows of PV panels
(in PV module/panel slope lengths) for no shading on December 21 (north
hemisphere) June 21 (south hemisphere) for a module tilt angle beta and
surface azimuth sazm, and a given latitude, longitude, and time zone and
for the time passed to the method (typically 9 am).
(Ref: the row-to-row spacing is then ``D + cos(beta)``)
8/21/2015
Parameters
----------
beta : double
Tilt from horizontal of the PV modules/panels (deg)
sazm : double
Surface azimuth of the PV modules/panels (deg)
lat : double
Site latitude (deg)
lng : double
Site longitude (deg)
tz : double
Time zone (hrs)
hour : int
hour for no shading criteria
minute: double
minute for no shading
Returns
-------
D : numeric
Horizontal distance between rows of PV panels (in PV panel slope
lengths)
"""
beta = beta * DTOR # Tilt from horizontal of the PV modules/panels, in radians
sazm = sazm * DTOR # Surface azimuth of PV module/pamels, in radians
if lat >= 0:
[azm, zen, elv, dec, sunrise, sunset, Eo, tst] = solarPos (2014, 12, 21, hour, minute, lat, lng, tz)
else:
[azm, zen, elv, dec, sunrise, sunset, Eo, tst] = solarPos (2014, 6, 21, hour, minute, lat, lng, tz)
tst = 8.877 ##DLL Forced value
minute -= 60.0 * (tst - hour); # Adjust minute so sun position is calculated for a tst equal to the
# time passed to the function
if lat >= 0:
[azm, zen, elv, dec, sunrise, sunset, Eo, tst] = solarPos(2014, 12, 21, hour, minute, lat, lng, tz)
else:
[azm, zen, elv, dec, sunrise, sunset, Eo, tst] = solarPos(2014, 6, 21, hour, minute, lat, lng, tz)
# Console.WriteLine("tst = {0} azm = {1} elv = {2}", tst, azm * 180.0 / Math.PI, elv * 180.0 / Math.PI);
D = math.cos(sazm - azm) * math.sin(beta) / math.tan(elv)
return D | 3df45c808e6ba99a10a743f30f0eea37d46bcaa6 | 3,629,271 |
def _check_buildopts_arches(mmd, arches):
"""
Returns buildopts arches if valid, or otherwise the arches provided.
:param mmd: Module MetaData
:param arches: list of architectures
:return: list of architectures
"""
buildopts = mmd.get_buildopts()
if not buildopts:
return arches
try:
buildopts_arches = buildopts.get_arches()
except AttributeError:
# libmodulemd version < 2.8.3
return arches
# Must be a subset of the input module arches
unsupported_arches = set(buildopts_arches) - set(arches)
if unsupported_arches:
raise ValidationError("The following buildopts arches are not supported with these "
"buildrequires: %r" % unsupported_arches)
if buildopts_arches:
log.info("Setting build arches of %s to %r based on the buildopts arches." % (
mmd.get_nsvc(), buildopts_arches))
return buildopts_arches
return arches | 79a27005bbae82378cf4307ec6c6dcd58007ecdb | 3,629,272 |
def hopwise_qry_encoder(qry_seq_emb,
qry_input_ids,
qry_input_mask,
is_training,
bert_config,
qa_config,
suffix="",
project=True,
project_dim=None):
"""Embed query into vectors for dense retrieval for a hop."""
dropout = qa_config.dropout if is_training else 0.0
if project and not project_dim:
project_dim = qa_config.projection_dim
attention_mask = modeling.create_attention_mask_from_input_mask(
qry_input_ids, qry_input_mask)
# hop-wise question encoder
with tf.variable_scope("qry/hop" + suffix, reuse=tf.AUTO_REUSE):
hopwise_qry_seq_emb = modeling.transformer_model(
input_tensor=qry_seq_emb,
attention_mask=attention_mask,
hidden_size=bert_config.hidden_size,
num_hidden_layers=1,
num_attention_heads=bert_config.num_attention_heads,
intermediate_size=bert_config.intermediate_size,
intermediate_act_fn=modeling.get_activation(bert_config.hidden_act),
hidden_dropout_prob=dropout,
attention_probs_dropout_prob=dropout,
initializer_range=bert_config.initializer_range,
do_return_all_layers=False)
hopwise_qry_seq_emb = tf.squeeze(hopwise_qry_seq_emb[:, 0:1, :], axis=1)
if project and project_dim is not None:
with tf.variable_scope("projection"):
hopwise_qry_seq_emb = contrib_layers.fully_connected(
hopwise_qry_seq_emb,
project_dim,
activation_fn=tf.nn.tanh,
reuse=tf.AUTO_REUSE,
scope="qry_projection")
return hopwise_qry_seq_emb | 1923129f58d31b96cf985731785f96280cf0d814 | 3,629,273 |
def construct_acyclic_matching_along_gradients(morse_complex,
delta=np.inf):
""" Finds an acyclic matching in filtration order along gradients
Inspired by [MN13] p. 344 MorseReduce
:param morse_complex: A morse complex
:param delta: Only construct matches with a filtration difference smaller than delta
:return: Matching on morse_complex with approximate filtration.
The approximate filtration will be less or equal than the old filtration.
"""
new_filtration = ApproxFiltration(morse_complex.filtration.copy(),
exact=morse_complex.filtration)
morse_complex = morse_complex.copy(filtration=new_filtration).sort_by_filtration()
matching = Matching(morse_complex)
reduced = np.full(morse_complex.size, False) # Array to mark cells as reduced
while not np.all(reduced):
grow_gradient_path(matching, reduced, delta)
return matching | d6a98b49a9d2e10d5c3d349c6ecf0bbea0baf845 | 3,629,274 |
def r2tth(x,dist):
"""Convert a numpy array of azimuthal radii to 2 theta"""
return np.arctan(x/dist)*180/np.pi | 2d500e8543507de561994d92ea8ed0a03ad28186 | 3,629,275 |
def is_notebook():
"""
code from https://stackoverflow.com/questions/15411967/how-can-i-check-if-code-is-executed-in-the-ipython-notebook
"""
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except:
return False | 1db42ba465db00b0cf3f633b389bb251a5f99363 | 3,629,276 |
def crypto_lettre(dico: dict, lettre: str) -> str:
"""
Fonction qui renvoie une lettre cryptée d'après le dictionnaire associé
:param ASCIIrandom:
:param lettre: lettre MAJUSCULE
:return: la lettre cryptée en MAJUSCULE
"""
return dico[lettre] | af46af6e3221587731b1c522bf50b8b75563835b | 3,629,277 |
def read_tusc(path):
"""Helper function to read in the Tuscany shapefile"""
path_shapefiles, regions, provinces, territories, municipalities, crs = read_files.read_shapefile_data(path, 'shape_files_path.json')
df_reg_tus = read_files.read_shapefiles_in(True, path_shapefiles, regions, crs)
return df_reg_tus | e4d3bc257e545eb8b1246b2b10685fe6d7f00fbd | 3,629,278 |
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
"""
losses = []
for epoch in range(epochs):
for images, labels in get_batches_fn(batch_size):
_, loss = sess.run([train_op, cross_entropy_loss],
feed_dict={input_image: images,
correct_label: labels,
keep_prob: KEEP_PROB,
learning_rate: LEARNING_RATE})
losses.append(loss)
print("Epoch {0}/{1}: Loss {2:f}".format(epoch+1, epochs, loss))
return losses | 6ddaa1826fd84e6280cf50c41d1dc9277f79122b | 3,629,279 |
import warnings
def preprocess_config(config, path, all_cols=True, model_cols=None, modify_config=None):
"""Preprocess the config
Args:
* config: original config
* path: path to load column names
Returns:
* config: preprocess config.
If design_all_cols is true, then read col_names from train_load
If model_cols is not None, pick all design_col_names that belongs to model_cols
Add design_col_names to the csv_col_names in order and remove duplicates
"""
if config.csv_col_names is None:
config.csv_col_names = []
if all_cols:
config.design_col_names = list(pd.read_csv(path, nrows=1).columns)
for col in config.csv_col_names:
if col in config.design_col_names:
config.design_col_names.remove(col)
if model_cols is not None:
tmp_cols = model_cols.copy()
for col in model_cols:
if col not in config.design_col_names:
tmp_cols.remove(col)
ext_set = set(config.design_col_names) - set(tmp_cols)
if len(ext_set) > 0:
warnings.warn("[%s] are not in the model properties. "
"Ignored." % ','.join(ext_set))
config.design_col_names = tmp_cols
if config.design_col_names is not None:
for col in config.design_col_names:
if col in config.csv_col_names:
config.csv_col_names.remove(col)
config.csv_col_names += config.design_col_names.copy()
config.master_worker = (mgw.rank() == 0) if config.enable_multi_gpu else True
if modify_config is not None:
modify_config(config)
return config | 9f23547ffdc685f971877635fd12bcb7d72ad8dc | 3,629,280 |
def monoscale(array, color=MONOSCALE_SHADOWLEVEL, as_type=None, slice_size=SLICE_SIZE):
"""
Converts a grayscale array into a monoscale array.
This deletes shadow level 1 and sets the other shadow values to a uniform value.
The Poisson spot markings remain unchanged.
Note: After conversion, the optical-array may not contain any shaded pixels.
Also, empty image slices can occur in the image.
If the optical-array does not contain shaded pixels, the function returns None.
:param array: optical-array (particle image)
:type array: numpy-array (1 or 2 dimensional) or list or string
--- optional params ---
:param color: value of monoscale color
:type color: char or integer
:param as_type: type of returned optical-array (1d array, 2d array, list or string)
:type as_type: string (values: "STRING", "LIST", "ARRAY", "ARRAY2D")
:param slice_size: width of the optical-array (number of diodes)
:type slice_size: integer
:return: new monoscale optical-array
"""
array, data_type, slice_size = check_array_type(array, slice_size)
new_array = np.zeros(len(array), dtype=int)
for y in range(int(len(array)/slice_size)):
for x in range(slice_size):
if array[y*slice_size+x] not in [0, 1, '0', '1']:
if array[y*slice_size+x] in [2, 3]:
new_array[y*slice_size+x] = color
elif array[y * slice_size + x] in ['2', '3']:
new_array[y * slice_size + x] = str(color)
else:
new_array[y*slice_size+x] = array[y*slice_size+x]
return convert_array_to_type(new_array, as_type=as_type if as_type else data_type,
slice_size=slice_size) if max(new_array) else None | de92f24ba539b35f60abaa9c2e09b59fb2198d52 | 3,629,281 |
def _mergeChannels(st):
"""
function to find longest continuous data chunck and discard the rest
"""
st1 = st.copy()
st1.merge(fill_value=0.0)
start = max([x.stats.starttime for x in st1])
end = min([x.stats.endtime for x in st1])
try:
st1.trim(starttime=start, endtime=end)
except ValueError: # if stream too factured end is larger than start
return obspy.Stream()
ar_len = min([len(x.data) for x in st1])
ar = np.ones(ar_len)
for tr in st1:
ar *= tr.data
trace = obspy.Trace(data=np.ma.masked_where(ar == 0.0, ar))
trace.stats.starttime = start
trace.stats.sampling_rate = st1[0].stats.sampling_rate
if (ar == 0.0).any():
try:
st2 = trace.split()
except Exception:
return obspy.Stream()
times = np.array([[x.stats.starttime, x.stats.endtime] for x in st2])
df = pd.DataFrame(times, columns=['start', 'stop'])
df['duration'] = df['stop'] - df['start']
max_dur = df[df.duration == df['duration'].max()].iloc[0]
st.trim(starttime=max_dur.start, endtime=max_dur.stop)
else:
st = st1
return st | c656417eca68a97c8e4f152fb0b82b23ab61d48c | 3,629,282 |
from typing import List
import re
def create_pattern(templates: List[str], input_str: str, pretty: bool = False):
"""
create all patterns based on list of input templates using the input string.
Args:
templates: list of templates/stencils
input_str: string to apply templates on to create patterns
pretty: used to pretty print patterns
"""
res = defaultdict(int)
for template in templates:
if re.search(template, input_str) is None:
continue
if not pretty:
res[re.sub(template, template, input_str)] += 1
else:
res[re.sub(template, "@", input_str)] += 1
return res | 8ac0af7d5a55291804a3c98ec7d3176667cc729c | 3,629,283 |
import numpy
def dehaze(
image: xpArray,
size: int = 21,
downscale: int = 4,
minimal_zero_level: float = 0,
correct_max_level: bool = True,
in_place: bool = True,
internal_dtype=None,
):
"""
Dehazes an image by means of a non-linear low-pass rejection filter.
Parameters
----------
image : image to filter
size : filter size
downscale : downscale factor for speeding up computation of the haze map.
minimal_zero_level : minimal zero level to substract
in_place : True if the input image may be modified in-place.
internal_dtype : internal dtype for computation
Returns
-------
Dehazed image
"""
sp = Backend.get_sp_module()
xp = Backend.get_xp_module()
if internal_dtype is None:
internal_dtype = image.dtype
if type(Backend.current()) is NumpyBackend:
internal_dtype = numpy.float32
original_dtype = image.dtype
image = Backend.to_backend(image, dtype=internal_dtype, force_copy=not in_place)
# original_image = image.copy()
minimal_zero_level = xp.asarray(minimal_zero_level, dtype=internal_dtype)
# get rid of low values due to noise:
image_zero_level = sp.ndimage.maximum_filter(image, size=3)
# downscale to speed up the rest of the computation:
downscaled_image = sp.ndimage.zoom(image_zero_level, zoom=1 / downscale, order=0)
# find min values:
image_zero_level = sp.ndimage.minimum_filter(downscaled_image, size=max(1, size // downscale))
# expand reach of these min values:
image_zero_level = sp.ndimage.maximum_filter(image_zero_level, size=max(1, size // downscale))
# smooth out:
image_zero_level = sp.ndimage.gaussian_filter(image_zero_level, sigma=max(1, size // (2 * downscale)))
# scale up again:
image_zero_level = sp.ndimage.zoom(image_zero_level, zoom=downscale, order=1)
# Padding to recover original image size:
image_zero_level = fit_to_shape(image_zero_level, shape=image.shape)
# Ensure that we remove at least the minimum zero level:
if minimal_zero_level > 0:
image_zero_level = xp.maximum(image_zero_level, minimal_zero_level)
# remove zero level:
image -= image_zero_level
del image_zero_level
# clip:
image = xp.maximum(image, 0, out=image)
if correct_max_level:
# get image max level before:
# twice filtering is to match the extent reached for the zero_level image
# (see above combination of min then max filters)
downscaled_image = sp.ndimage.maximum_filter(downscaled_image, size=max(1, size // downscale))
image_max_level_before = sp.ndimage.gaussian_filter(downscaled_image, sigma=max(1, size // downscale))
# get image max level after:
downscaled_image_after = sp.ndimage.maximum_filter(image, size=3)
downscaled_image_after = sp.ndimage.zoom(downscaled_image_after, zoom=1 / downscale, order=0)
image_max_level_after = sp.ndimage.maximum_filter(downscaled_image_after, size=max(1, size // downscale))
image_max_level_after = sp.ndimage.gaussian_filter(image_max_level_after, sigma=max(1, size // downscale))
# Correction ratio:
epsilon = xp.asarray(1e-6, dtype=internal_dtype)
correction_ratio = image_max_level_before
correction_ratio /= image_max_level_after + epsilon
correction_ratio = nan_to_zero(correction_ratio, copy=False)
del image_max_level_after
correction_ratio = sp.ndimage.zoom(correction_ratio, zoom=downscale, order=1)
correction_ratio = fit_to_shape(correction_ratio, shape=image.shape)
image *= correction_ratio
del correction_ratio
# convert back to original dtype
image = image.astype(dtype=original_dtype, copy=False)
# from napari import gui_qt, Viewer
# with gui_qt():
# def _c(array):
# return backend.to_numpy(array)
# viewer = Viewer()
# viewer.add_image(_c(image), name='original_image')
# viewer.add_image(_c(image_zero_level), name='image_zero_level')
# viewer.add_image(_c(image), name='dehazed')
return image | 14b8624ce88c6b4efac4b20ef98bd4d2a12eece1 | 3,629,284 |
from datetime import datetime
import requests
def token_price_chart_arken(token, symbol):
"""Lookup for prices via Arken API and return price chart image"""
timeframe = get_timeframe(INTERVAL) * 50 # Number of candlesticks
end_time = datetime.now()
# Convert datetime to Unix time
start_time = int(datetime.timestamp(end_time - timedelta(minutes=timeframe)))
end_time = int(datetime.timestamp(end_time))
# Contact API
try:
for network in NETWORKS:
url = f"https://api.arken.finance/chart/{network}/{token}?from={start_time}&interval=T{INTERVAL}&to={end_time}"
response = requests.get(url)
if response.status_code == 200: # Successful response
break
except requests.RequestException:
return None
# Parse response
try:
prices = response.json()["chartBars"] # Open-high-low-close price, timestamp and volume
# Format data
prices_dict = {key: [dic[key] for dic in prices] for key in prices[0]}
# Create and format Dataframe
prices_df = pd.DataFrame(prices_dict, dtype=float)
prices_df = prices_df.rename(columns={"timestamp":"datetime"})
prices_df["datetime"] = prices_df["datetime"].apply(lambda x: datetime.fromtimestamp(int(x)))
prices_df.set_index("datetime", inplace=True)
return plot_candlestick_chart(prices_df, symbol)
except (KeyError, TypeError, ValueError):
return None | c3a69a52099ec793e19e19d87f16dcec82181ec9 | 3,629,285 |
def code_info(x, version, is_pypy=False):
"""Formatted details of methods, functions, or code."""
return format_code_info(get_code_object(x), version, is_pypy=is_pypy) | b9f1d6343e15eabfc2903f933d59d670c7bf7dc8 | 3,629,286 |
def substrings(a, b, n):
"""Return substrings of length n in both a and b"""
substrings = []
a_substrings = get_substrings(a, n)
b_substrings = get_substrings(b, n)
# For every substring in list of substrings a:
for a_substring in a_substrings:
# For every substring in list of substrings b:
for b_substring in b_substrings:
# Check if substring from a is equivalent to substring from b
# Check if substring from a is not already in substrings' list
if a_substring == b_substring and a_substring not in substrings:
# If not, append to substrings
substrings.append(a_substring)
return substrings | cfe89690ce518b55ed3662153c69890a1fdf79c1 | 3,629,287 |
import sys
def all_ids(conn, protein=False, verbose=False):
"""
Get all the available IDs in the database
:param conn: the database connection
:param protein: Whether the object refers to protein (True) or DNA (False). Default=DNA
:param verbose: More output
:param verbose: More output
:return: A list of taxonomy ids
"""
global data
cur = conn.cursor()
cur.execute("select * from nodes")
sys.stderr.write(f"{bcolors.YELLOW}Collecting all the data. Please stand by.\n{bcolors.ENDC}")
sys.stderr.write(f"{bcolors.RED}Warning, this will take a long time!!.\n{bcolors.ENDC}")
for p in cur.fetchall():
t = TaxonNode(*p)
data['node'][p[0]] = t
cur.execute("select * from names where tax_id = ?", [p[0]])
n = TaxonName(p[0])
for r in cur.fetchall():
if r[2]:
n.unique = r[2]
n.set_name(r[3], r[1])
data['name'][p[0]] = n
sys.stderr.write(f"{bcolors.GREEN}Done.\n{bcolors.ENDC}")
return t, n | 7bf0d4ef30a75d281bf70cfa2a2cc2159ae8a88c | 3,629,288 |
import torch
def mc_control_epsilon_greedy(env, gamma, n_episode, epsilon):
"""
Obtain the optimal policy with on-policy MC control with epsilon_greedy
@param env: OpenAI Gym environment
@param gamma: discount factor
@param n_episode: number of episodes
@param epsilon: the trade-off between exploration and exploitation
@return: the optimal Q-function, and the optimal policy
"""
n_action = env.action_space.n
G_sum = defaultdict(float)
N = defaultdict(int)
Q = defaultdict(lambda: torch.empty(n_action))
for episode in range(n_episode):
states_t, actions_t, rewards_t = run_episode(env, Q, epsilon, n_action)
return_t = 0
G = {}
for state_t, action_t, reward_t in zip(states_t[::-1], actions_t[::-1], rewards_t[::-1]):
return_t = gamma * return_t + reward_t
G[(state_t, action_t)] = return_t
for state_action, return_t in G.items():
state, action = state_action
if state[0] <= 21:
G_sum[state_action] += return_t
N[state_action] += 1
Q[state][action] = G_sum[state_action] / N[state_action]
policy = {}
for state, actions in Q.items():
policy[state] = torch.argmax(actions).item()
return Q, policy | 5706961696f23c961906ce52125dcee5d6c0e0a7 | 3,629,289 |
import logging
import os
def v_slide(params):
"""
"""
paths = Paths()
try:
try:
scn_file = OpenSlide(paths.slice_80)
except OpenSlideUnsupportedFormatError:
logging.error("OpenSlideUnsupportedFormatError!")
return
except OpenSlideError:
logging.error("OpenSlideError!")
return
start_point = params["start_point"]
x0 = start_point[0]
y0 = start_point[1]
bound_y = params["bound_y"]
tile_path = params["tile_path"]
save_tiles = params["save_tiles"]
q = params["queue"]
AVG_THRESHOLD = 170
pid = os.getpid()
data = {}
while y0 < bound_y:
img = scn_file.read_region((x0, y0), 0, (299, 299))
green_c_avg = np.average(np.array(img)[:, :, 1])
if green_c_avg < AVG_THRESHOLD:
sufix = "_" + str(x0) + "_" + \
str(y0) + ".png"
file_name = "scn80" + sufix
img = np.array(img)
img = img[:, :, 0:3]
data['pred'] = img
data['xlabel'] = np.array([x0])
data['ylabel'] = np.array([y0])
q.put(dict(data))
if save_tiles:
img.save(os.path.join(tile_path, file_name))
y0 += 150
return pid
finally:
scn_file.close() | 470b0cbcae0f92f78e9666e9754685a98f1c6d3b | 3,629,290 |
def svn_wc_maybe_set_repos_root(*args):
"""
svn_wc_maybe_set_repos_root(svn_wc_adm_access_t adm_access, char path, char repos,
apr_pool_t pool) -> svn_error_t
"""
return _wc.svn_wc_maybe_set_repos_root(*args) | 10d4e3d3fe55279d32982ab1921640787b1cdf48 | 3,629,291 |
def graph_semantics(g):
"""Convert a networkx.DiGraph to compounds and reactions for grid_land."""
compounds = {}
reactions = {}
for node, attributes in g.nodes.items():
if attributes.get("reaction"):
reactants = [e[0] for e in g.in_edges(node)]
products = [e[1] for e in g.out_edges(node)]
reactions[node] = create_reaction(reactants, products, attributes)
if not attributes.get("reaction"):
compounds[node] = create_compound(attributes)
return compounds, reactions | 34508120a21992b3260001e795a27fec7f22025e | 3,629,292 |
def fetching_latest_quiz_statistics(request_ctx, course_id, quiz_id, all_versions, **request_kwargs):
"""
This endpoint provides statistics for all quiz versions, or for a specific
quiz version, in which case the output is guaranteed to represent the
_latest_ and most current version of the quiz.
<b>200 OK</b> response code is returned if the request was successful.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param quiz_id: (required) ID
:type quiz_id: string
:param all_versions: (required) Whether the statistics report should include all submissions attempts.
:type all_versions: boolean
:return: Fetching the latest quiz statistics
:rtype: requests.Response (with void data)
"""
path = '/v1/courses/{course_id}/quizzes/{quiz_id}/statistics'
payload = {
'all_versions' : all_versions,
}
url = request_ctx.base_api_url + path.format(course_id=course_id, quiz_id=quiz_id)
response = client.get(request_ctx, url, payload=payload, **request_kwargs)
return response | 60be2d9b0eacc6b9d9f6863c86f9ee569cc9fa59 | 3,629,293 |
import os
def make_api_key(size=32):
"""Generate a random API key, should be as random as possible
(not predictable)
:param size: the size in byte to generate
note that it will be encoded in base58 manner,
the length will be longer than the aksed size
"""
# TODO: os.urandom collect entropy from devices in linux,
# it might block when there is no enough entropy
# attacker might use this to perform a DOS attack
# maybe we can use another way to avoid such situation
# however, this is good enough currently
random = os.urandom(size)
return b58encode(random) | 0d847406ee56b52194d99341aab5b974dc252172 | 3,629,294 |
def relocate_estimates_data(ws):
"""
"""
### Country group
ws = relocate(ws, 'B', 7, 11, 'B', 34)
ws = relocate(ws, 'D', 7, 11, 'C', 34)
ws = format_numbers(ws, ['C'], (34,40), 'Comma [0]', 3)
ws = relocate(ws, 'B', 7, 11, 'E', 34)
ws = relocate(ws, 'E', 7, 11, 'F', 34)
ws = format_numbers(ws, ['F'], (34,40), 'Comma [0]', 0)
ws = relocate(ws, 'B', 7, 11, 'H', 34)
ws = relocate(ws, 'G', 7, 11, 'I', 34)
ws = format_numbers(ws, ['I'], (34,40), 'Comma [0]', 3)
ws = relocate(ws, 'B', 7, 11, 'B', 42)
ws = relocate(ws, 'I', 7, 11, 'C', 42)
ws = format_numbers(ws, ['C'], (42,48), 'Percent', 3)
### Income group
ws = relocate(ws, 'B', 16, 19, 'B', 50)
ws = relocate(ws, 'D', 16, 19, 'C', 50)
ws = format_numbers(ws, ['C'], (51,53), 'Comma [0]', 3)
ws = relocate(ws, 'B', 16, 19, 'E', 50)
ws = relocate(ws, 'E', 16, 19, 'F', 50)
ws = format_numbers(ws, ['F'], (51,53), 'Comma [0]', 0)
ws = relocate(ws, 'B', 16, 19, 'H', 50)
ws = relocate(ws, 'G', 16, 19, 'I', 50)
ws = format_numbers(ws, ['I'], (51,53), 'Comma [0]', 3)
ws = relocate(ws, 'B', 16, 19, 'B', 57)
ws = relocate(ws, 'I', 16, 19, 'C', 57)
ws = format_numbers(ws, ['C'], (58,60), 'Percent', 3)
### Region group
ws = relocate(ws, 'B', 23, 30, 'B', 64)
ws = relocate(ws, 'D', 23, 30, 'C', 64)
ws = format_numbers(ws, ['C'], (65,72), 'Comma [0]', 3)
ws = relocate(ws, 'B', 23, 30, 'E', 64)
ws = relocate(ws, 'E', 23, 30, 'F', 64)
ws = format_numbers(ws, ['F'], (65,72), 'Comma [0]', 0)
ws = relocate(ws, 'B', 23, 30, 'H', 64)
ws = relocate(ws, 'G', 23, 30, 'I', 64)
ws = format_numbers(ws, ['I'], (65,72), 'Comma [0]', 3)
ws = relocate(ws, 'B', 23, 30, 'B', 75)
ws = relocate(ws, 'I', 23, 30, 'C', 75)
ws = format_numbers(ws, ['C'], (76,83), 'Percent', 3)
return ws | 0f36e12275250b8642f117a03a6018425cdb72d6 | 3,629,295 |
def replace_channel_in_key(meta,new_band_id):
"""
Replace the band id in the CX header key.
Parameters
----------
meta : str
CX line header.
new_band_id : int
id for the new (zoom) band.
Returns
-------
new_meta : str
new CX line header.
Notes
-----
|
| **Configuration:**
|
| INDEX_KEY_CHANNEL: const_mapred.py (location of the channel id in the key (SF_SEP), to be replaced by new channel id [zoom])
|
|
| **TO DO:**
|
| Create general funcionts to create and read key.
"""
meta_split = meta.split(SF_SEP)
new_meta = SF_SEP.join(meta_split[0:INDEX_KEY_CHANNEL])+SF_SEP+str(new_band_id)+\
FIELD_SEP+meta_split[INDEX_KEY_CHANNEL:][0].split(FIELD_SEP)[1]
return(new_meta) | 3d6e365b83985262690a5482c44a8d7afe5cda2f | 3,629,296 |
def check_cells_fit(cell_no, min_cell_distance, space_range=[[0,10],[0,10],None]):
""" given the number of cells (cell_no), and the minimal distance
between the cells and the space_ranges (x,y,z) it returns True if the
cells can fit within this range and False if not. If any of the
dimensions does not exist, type: None"""
dim1, dim2, dim3 = space_range
full_dim = 1.
for dim in [dim1, dim2, dim3]:
if dim != None:
dim = dim[1]-dim[0]
full_dim = full_dim*dim
return full_dim / min_cell_distance >= cell_no | b2fa2cd1d7d84d6ef74a408c10293e88299987cf | 3,629,297 |
import argparse
import os
def parse_args() -> argparse.Namespace:
"""Parse CLI arguments.
:return: Namespace object holding parsed arguments as attributes.
This object may be directly used by garminexport/garminbackup.py.
"""
parser = argparse.ArgumentParser(
prog="garminbackup",
description=(
"Performs incremental backups of activities for a "
"given Garmin Connect account. Only activities that "
"aren't already stored in the backup directory will "
"be downloaded."))
# positional args
parser.add_argument(
"username", metavar="<username>", type=str, help="Account user name.")
# optional args
parser.add_argument(
"--password", type=str, help="Account password.")
parser.add_argument(
"--backup-dir", metavar="DIR", type=str,
help="Destination directory for downloaded activities. Default: ./activities/",
default=os.path.join(".", "activities"))
parser.add_argument(
"--log-level", metavar="LEVEL", type=str,
help="Desired log output level (DEBUG, INFO, WARNING, ERROR). Default: INFO.",
default="INFO")
parser.add_argument(
"-f", "--format", choices=supported_export_formats,
default=None, action='append',
help="Desired output formats ({}). Default: ALL.".format(', '.join(supported_export_formats)))
parser.add_argument(
"-E", "--ignore-errors", action='store_true',
help="Ignore errors and keep going. Default: FALSE")
parser.add_argument(
"--max-retries", metavar="NUM", default=DEFAULT_MAX_RETRIES,
type=int,
help=("The maximum number of retries to make on failed attempts to fetch an activity. "
"Exponential backoff will be used, meaning that the delay between successive attempts "
"will double with every retry, starting at one second. DEFAULT: {}").format(DEFAULT_MAX_RETRIES))
return parser.parse_args() | 383e75d019eb0d744041b66db981374c15c30a71 | 3,629,298 |
import io
def _read_sub_atoms(atom: tuple) -> list:
"""
A special method for parsing a stream that isn't from the
original file.
:param atom: a tuple containing atom data
:return: a set of sub atoms
"""
byte_stream = io.BytesIO(atom[2])
atoms = _read_atoms(byte_stream, atom[0] - 8)
return atoms | 36faa5ae07f52671b2b9f407b690feb074321cdf | 3,629,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.