content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
from typing import List
def convert_country_codes(source_codes: List[str], source_format: str, target_format: str,
throw_error: bool = False) -> List[str]:
"""
Convert country codes, e.g., from ISO_2 to full name.
Parameters
----------
source_codes: List[str]
List of codes to convert.
source_format: str
Format of the source codes (alpha_2, alpha_3, name, ...)
target_format: str
Format to which code must be converted (alpha_2, alpha_3, name, ...)
throw_error: bool (default: False)
Whether to throw an error if an attribute does not exist.
Returns
-------
target_codes: List[str]
List of converted codes.
"""
target_codes = []
for code in source_codes:
try:
country_codes = pyc.countries.get(**{source_format: code})
if country_codes is None:
raise KeyError(f"Data is not available for code {code} of type {source_format}.")
target_code = getattr(country_codes, target_format)
except (KeyError, AttributeError) as e:
if throw_error:
raise e
target_code = np.nan
target_codes += [target_code]
return target_codes
|
7589dec9ccec5edc7bf5ea356b40fac3898c7c77
| 3,643,200
|
def get_simple_lca_length(std_tree, test_gold_dict, node1, node2):
"""
get the corresponding node of node1 and node2 on std tree.
calculate the lca distance between them
Exception:
Exception("[Error: ] std has not been lca initialized yet")
std tree need to be initialized before running this function
example:
std_tree.get_lca_preprocess()
"""
if std_tree.depth_array is None:
raise Exception("[Error: ] std has not been lca initialized yet")
std_id_node_dict = std_tree.get_id_node_dict()
tmp_node1 = node1
tmp_node2 = test_gold_dict[node2]
if tmp_node2 is None:
raise Exception("[Error: ]Can not find the corresponding node in std tree. ")
lca_id = std_tree.get_lca(tmp_node1.get_id(), tmp_node2.get_id())
if lca_id == -1:
return config_utils.DINF
lca_node = std_id_node_dict[lca_id]
return tmp_node1.root_length + tmp_node2.root_length - 2 * lca_node.root_length
|
8433259814fe656bdbdd6997ca613b30c458f8b8
| 3,643,201
|
def edit_catagory(catagory_id):
"""edit catagory"""
name = request.form.get('name')
guest_id = session['guest_id']
exists = db.session.query(Catalogs).filter_by(name=name,
guest_id=guest_id).scalar()
if exists:
return abort(404)
if name == '':
return redirect(url_for('home.home'))
catagory = db.session.query(Catalogs).filter_by(id=catagory_id).one()
oldname = catagory.name
catagory.name = name
db.session.add(catagory)
db.session.commit()
flash(f"Catagory {oldname} has been updated to {catagory.name}")
return redirect(url_for('home.home'))
|
a0d342490881968f39cf4636fb424176c6608e4a
| 3,643,202
|
def match_patterns(name, name_w_pattern, patterns):
"""March patterns to filename.
Given a SPICE kernel name, a SPICE Kernel name with patterns, and the
possible patterns, provide a dictionary with the patterns as keys and
the patterns values as value after matching it between the SPICE Kernel
name with patterns and without patterns.
For example, given the following arguments:
* name: ``insight_v01.tm``
* name_w_pattern: ``insight_v$VERSION.tm``
The function will return: ``{VERSION: '01'}``
:param name: Name of the SPICE Kernel
:type name: str
:param name_w_pattern: Name of the SPICE Kernel with patterns
:type name_w_pattern: str
:param patterns: List of the possible patterns present in the
SPICE Kernel name with patterns
:type patterns: list
:return: Dictionary providing the patterns and their value as defined
by the SPICE kernel
:rtype: dict
"""
#
# This list will help us determine the order of the patterns in the file
# name because later on the patterns need to be correlated with the
# pattern values.
#
pattern_name_order = {}
name_check = name_w_pattern
for pattern in patterns:
pattern_name_order[pattern["#text"]] = name_w_pattern.find(pattern["#text"])
name_check = name_check.replace(
"$" + pattern["#text"], "$" * int(pattern["@length"])
)
#
# Convert the pattern_name_order_dictionary into an ordered lis
#
pattern_name_order = list(
{
k: v
for k, v in sorted(pattern_name_order.items(), key=lambda item: item[1])
}.keys()
)
#
# Generate a list of values extracted from the comparison of the
# original file and the file with patterns.
#
values_list = []
value = ""
value_bool = False
for i in range(len(name_check)):
if (name_check[i] == name[i]) and (not value_bool):
continue
if (name_check[i] == name[i]) and value_bool:
value_bool = False
values_list.append(value)
value = ""
elif (name_check[i] == "$") and (not value_bool):
value_bool = True
value += name[i]
elif (name_check[i] == "$") and value_bool:
value += name[i]
else:
raise
#
# Correlate the values with their position in the file name with
# patterns.
#
values = {}
for i in range(len(values_list)):
values[pattern_name_order[i]] = values_list[i]
return values
|
a54b7f1fcda67b5649f92a21f4711874dd226ee9
| 3,643,203
|
def _generate_good_delivery_token_email(request, good_delivery, msg=''):
"""
Send an email to user with good_delivery activation URL
and return the token
:type request: HttpRequest
:type good_delivery: GoodDelivery
:type msg: String
:param structure_slug: current HttpRequest
:param structure_slug: good delivery to confirm
:param structure: message to send
:return: generated token
"""
if good_delivery.delivered_to.email:
# build good_delivery jwt
token = good_delivery.build_jwt()
# build absolute URI, attach token and send email
uri = request.build_absolute_uri(reverse('good_delivery:user_use_token'))
mail_params = {'hostname': settings.HOSTNAME,
'user': good_delivery.delivered_to,
'url': '{}?token={}'.format(uri, token),
'added_text': msg
}
m_subject = _('{} - {}').format(settings.HOSTNAME, good_delivery)
send_custom_mail(subject=m_subject,
recipients=[good_delivery.delivered_to],
body=settings.NEW_DELIVERY_WITH_TOKEN_CREATED,
params=mail_params)
return token
|
4567c3d0ad3f2d65c850ed5291e602cb552b11cb
| 3,643,204
|
def get_flavor(disk=None, min_disk=None, min_ram=None, name=None, ram=None, region=None, rx_tx_factor=None, swap=None, vcpus=None):
"""
Use this data source to get the ID of an available OpenStack flavor.
"""
__args__ = dict()
__args__['disk'] = disk
__args__['minDisk'] = min_disk
__args__['minRam'] = min_ram
__args__['name'] = name
__args__['ram'] = ram
__args__['region'] = region
__args__['rxTxFactor'] = rx_tx_factor
__args__['swap'] = swap
__args__['vcpus'] = vcpus
__ret__ = pulumi.runtime.invoke('openstack:compute/getFlavor:getFlavor', __args__)
return GetFlavorResult(
is_public=__ret__.get('isPublic'),
region=__ret__.get('region'),
id=__ret__.get('id'))
|
f6689712d4bde04ae43bb4c999b04df34b1db089
| 3,643,205
|
from typing import Tuple
def deal_hands(deck: Deck) -> Tuple[Deck, Deck, Deck, Deck]:
"""Deal the cards in the deck into four hands"""
return (deck[0::4], deck[1::4], deck[2::4], deck[3::4])
|
151def07061a23df6c80f2be6d9015e3efbd515e
| 3,643,206
|
import select
def add_new_publication_group(project):
"""
Create a new publication_group
POST data MUST be in JSON format
POST data SHOULD contain the following:
name: name for the group
published: publication status for the group, 0 meaning unpublished
"""
request_data = request.get_json()
if not request_data:
return jsonify({"msg": "No data provided."}), 400
groups = get_table("publication_group")
connection = db_engine.connect()
insert = groups.insert()
new_group = {
"name": request_data.get("name", None),
"published": request_data.get("published", 0)
}
try:
result = connection.execute(insert, **new_group)
new_row = select([groups]).where(groups.c.id == result.inserted_primary_key[0])
new_row = dict(connection.execute(new_row).fetchone())
result = {
"msg": "Created new group with ID {}".format(result.inserted_primary_key[0]),
"row": new_row
}
return jsonify(result), 201
except Exception as e:
result = {
"msg": "Failed to create new group",
"reason": str(e)
}
return jsonify(result), 500
finally:
connection.close()
|
0890b71f972149bd860768dfeb5a377bd4fd28b0
| 3,643,207
|
def test_gmres_against_graph_scipy(n, tensor_type, dtype, error, preconditioner, solve_method):
"""
Feature: ALL TO ALL
Description: test cases for [N x N] X [N X 1]
Expectation: the result match scipy in graph
"""
if not _is_valid_platform(tensor_type):
return
# Input CSRTensor of gmres in mindspore graph mode is not supported, just ignored it.
if tensor_type == "CSRTensor":
return
class TestNet(nn.Cell):
def __init__(self, solve_method):
super(TestNet, self).__init__()
self.solve_method = solve_method
def construct(self, a, b, x0, tol, restart, maxiter, m, atol):
return msp.sparse.linalg.gmres(a, b, x0, tol=tol, restart=restart, maxiter=maxiter, M=m,
atol=atol, solve_method=self.solve_method)
onp.random.seed(0)
a = create_full_rank_matrix((n, n), dtype)
b = onp.random.rand(n).astype(dtype)
x0 = onp.zeros_like(b).astype(dtype)
m = _fetch_preconditioner(preconditioner, a)
tol = float(onp.finfo(dtype=dtype).eps)
atol = tol
restart = n
maxiter = None
scipy_output, _ = osp.sparse.linalg.gmres(a, b, x0, tol=tol, restart=restart, maxiter=maxiter, M=m, atol=atol)
# Graph Mode
context.set_context(mode=context.GRAPH_MODE)
a = to_tensor((a, tensor_type))
b = Tensor(b)
x0 = Tensor(x0)
m = to_tensor((m, tensor_type)) if m is not None else m
# Not in graph's construct
ms_output, _ = msp.sparse.linalg.gmres(a, b, x0, tol=tol, restart=restart, maxiter=maxiter,
M=m, atol=atol)
assert onp.allclose(scipy_output, ms_output.asnumpy(), rtol=error, atol=error)
# With in graph's construct
ms_net_output, _ = TestNet(solve_method)(a, b, x0, tol, restart, maxiter, m, atol)
assert onp.allclose(scipy_output, ms_net_output.asnumpy(), rtol=error, atol=error)
|
47fde403d403138dd1746cab532f7c7cf9b2f5a3
| 3,643,208
|
def wtime() -> float:
"""
:return: the current time as a floating point number.
"""
return MPI.Wtime()
|
862b18fc688fd5e34ccfc7a2f986bdb2ceb98ed4
| 3,643,209
|
def survival_df(data, t_col="t", e_col="e", label_col="Y", exclude_col=[]):
"""
Transform original DataFrame to survival dataframe that would be used in model
training or predicting.
Parameters
----------
data: DataFrame
Survival data to be transformed.
t_col: str
Column name of data indicating time.
e_col: str
Column name of data indicating events or status.
label_col: str
Name of new label in transformed survival data.
exclude_col: list
Columns to be excluded.
Returns
-------
DataFrame:
Transformed survival data. Negtive values in label are taken as right censored.
"""
x_cols = [c for c in data.columns if c not in [t_col, e_col] + exclude_col]
# Negtive values are taken as right censored
data.loc[:, label_col] = data.loc[:, t_col]
data.loc[data[e_col] == 0, label_col] = - data.loc[data[e_col] == 0, label_col]
return data[x_cols + [label_col]]
|
8d35c27a75340d5c6535727e0e419fc0548d6094
| 3,643,210
|
from datetime import datetime
def get_date_today():
"""Get date today in str format such as 20201119. """
return datetime.today().strftime('%Y%m%d')
|
d5e69607dbf4b8c829cfe30ea0335f46c7d2512a
| 3,643,211
|
def check_model_in_dict(name, model_dict):
"""
Check whether the new model, name, exists in all previously considered models,
held in model_lists.
[previously in construct_models]
If name has not been previously considered, False is returned.
"""
# Return true indicates it has not been considered and so can be added
al_name = alph(name)
n_qub = get_num_qubits(name)
if al_name in model_dict[n_qub]:
return True # todo -- make clear if in legacy or running db
else:
return False
|
9c3cf9be0c973872ab63e8de57f6b5a26ea53838
| 3,643,212
|
import json
def generate_api_key(request):
"""Handles AJAX requests for a new API key."""
new_key = ApiUser.objects.get_unique_key()
return HttpResponse(json.dumps({'token' : new_key}), content_type="application/javascript")
|
9e29a82c3a967d7a98537ec680a0a2bfe068a88c
| 3,643,213
|
def input_output_details(interpreter):
"""
input_output_details:
Used to get the details from the interperter
"""
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
return input_details, output_details
|
3024f2a6c91a533c3aff858ee3a1db11d360bb25
| 3,643,214
|
def charge_drone_battery(drone):
"""Handle the drone battery charging operation."""
battery_level = drone["State"]["Battery"]
if float(battery_level) < 95:
# Increase battery level
drone["State"]["Battery"] = float(battery_level) + 5
else:
# If battery >= 95 set battery level to 100%
drone["State"]["Battery"] = 100
dronelog = gen_DroneLog("Drone %s" % (
str(drone["DroneID"])), "charging complete, returning to Active state")
send_dronelog(dronelog)
drone["State"]["Status"] = "Active"
http_api_log = gen_HttpApiLog("Drone %s" % (
str(drone["DroneID"])), "PUT DroneLog", "Controller")
send_http_api_log(http_api_log)
return drone
|
2f7d955a44310215883ac5bed57fb27463a66315
| 3,643,215
|
def expirations(self, symbol, useDatetime=True, block: bool = True):
"""Gets list of available expiration dates for a symbol.
Calls the 'market/options/expirations.json' endpoint to get list of all
exp_dates available for some given equity.
Args:
symbol: Specify the stock symbol against which to query
useDatetime: Specify whether to return datetime objects, or strings
block: Specify whether to block thread if request exceeds rate limit
Returns:
List of dates (datetime obj, or string)
Raises:
RateLimitException: If block=False, rate limit problems will be raised
Example:
.. code-block:: python
a.expirations('spy')
# [ datetime.datetime(2022, 3, 18, 0, 0), ... ]
a.expirations('spy', useDatetime = False)
# [ '2022-03-18', ... ]
"""
# Create request
req = Expirations(
auth=self.auth, account_nbr=self.account_nbr, block=block, symbol=symbol
)
# Add in the extra information
req.useDatetime = useDatetime
# result
result = req.request()
return result
|
24a33bdac8da42be9433400b32c16fa4fb860766
| 3,643,216
|
from re import T
def track(name, x, direction=None):
"""
An identity function that registers hooks to
track the value and gradient of the specified tensor.
Here is an example of how to track an intermediate output ::
input = ...
conv1 = nnt.track('op', nnt.Conv2d(shape, 4, 3), 'all')
conv2 = nnt.Conv2d(conv1.output_shape, 5, 3)
intermediate = conv1(input)
output = nnt.track('conv2_output', conv2(intermediate), 'all')
loss = T.sum(output ** 2)
loss.backward(retain_graph=True)
d_inter = T.autograd.grad(loss, intermediate, retain_graph=True)
d_out = T.autograd.grad(loss, output)
tracked = nnt.eval_tracked_variables()
testing.assert_allclose(tracked['conv2_output'], nnt.utils.to_numpy(output))
testing.assert_allclose(np.stack(tracked['grad_conv2_output']), nnt.utils.to_numpy(d_out[0]))
testing.assert_allclose(tracked['op'], nnt.utils.to_numpy(intermediate))
for d_inter_, tracked_d_inter_ in zip(d_inter, tracked['grad_op_output']):
testing.assert_allclose(tracked_d_inter_, nnt.utils.to_numpy(d_inter_))
:param name:
name of the tracked tensor.
:param x:
tensor or module to be tracked.
If module, the output of the module will be tracked.
:param direction:
there are 4 options
``None``: tracks only value.
``'forward'``: tracks only value.
``'backward'``: tracks only gradient.
``'all'``: tracks both value and gradient.
Default: ``None``.
:return: `x`.
"""
assert isinstance(name, str), 'name must be a string, got %s' % type(name)
assert isinstance(x, (T.nn.Module, T.Tensor)), 'x must be a Torch Module or Tensor, got %s' % type(x)
assert direction in (
'forward', 'backward', 'all', None), 'direction must be None, \'forward\', \'backward\', or \'all\''
if isinstance(x, T.nn.Module):
if direction in ('forward', 'all', None):
def _forward_hook(module, input, output):
_TRACKS[name] = output.detach()
hooks[name] = x.register_forward_hook(_forward_hook)
if direction in ('backward', 'all'):
def _backward_hook(module, grad_input, grad_output):
_TRACKS['grad_' + name + '_output'] = tuple([grad_out.detach() for grad_out in grad_output])
hooks['grad_' + name + '_output'] = x.register_backward_hook(_backward_hook)
else:
if direction in ('forward', 'all', None):
_TRACKS[name] = x.detach()
if direction in ('backward', 'all'):
def _hook(grad):
_TRACKS['grad_' + name] = tuple([grad_.detach() for grad_ in grad])
hooks['grad_' + name] = x.register_hook(_hook)
return x
|
81ae80bc8b77c16d493befdb209fe648e9a07c96
| 3,643,217
|
from pathlib import Path
from typing import Callable
from datetime import datetime
def expected_l1_ls8_folder(
l1_ls8_folder: Path,
offset: Callable[[Path, str], str] = relative_offset,
organisation="usgs.gov",
collection="1",
l1_collection="1",
lineage=None,
):
"""
:param collection: The collection of the current scene
:param l1_collection: The collection of the original landsat l1 scene
:return:
"""
org_code = organisation.split(".")[0]
product_name = f"{org_code}_ls8c_level1_{collection}"
if collection == "2":
processing_datetime = datetime(2020, 9, 7, 19, 30, 5)
cloud_cover = 93.28
points_model = 125
points_version = 5
rmse_model_x = 4.525
rmse_model_y = 5.917
software_version = "LPGS_15.3.1c"
uuid = "d9221c40-24c3-5356-ab22-4dcac2bf2d70"
quality_tag = "QA_PIXEL"
else:
processing_datetime = datetime(2017, 4, 5, 11, 17, 36)
cloud_cover = 93.22
points_model = 66
points_version = 4
rmse_model_x = 4.593
rmse_model_y = 5.817
software_version = "LPGS_2.7.0"
uuid = "a780754e-a884-58a7-9ac0-df518a67f59d"
quality_tag = "BQA"
processing_date = processing_datetime.strftime("%Y%m%d")
return {
"$schema": "https://schemas.opendatacube.org/dataset",
"id": uuid,
"label": f"{product_name}-0-{processing_date}_090084_2016-01-21",
"product": {
"name": product_name,
"href": f"https://collections.dea.ga.gov.au/product/{product_name}",
},
"properties": {
"datetime": datetime(2016, 1, 21, 23, 50, 23, 54435),
# The minor version comes from the processing date (as used in filenames to distinguish reprocesses).
"odc:dataset_version": f"{collection}.0.{processing_date}",
"odc:file_format": "GeoTIFF",
"odc:processing_datetime": processing_datetime,
"odc:producer": organisation,
"odc:product_family": "level1",
"odc:region_code": "090084",
"eo:cloud_cover": cloud_cover,
"eo:gsd": 15.0,
"eo:instrument": "OLI_TIRS",
"eo:platform": "landsat-8",
"eo:sun_azimuth": 74.007_443_8,
"eo:sun_elevation": 55.486_483,
"landsat:collection_category": "T1",
"landsat:collection_number": int(l1_collection),
"landsat:data_type": "L1TP",
"landsat:geometric_rmse_model_x": rmse_model_x,
"landsat:geometric_rmse_model_y": rmse_model_y,
"landsat:ground_control_points_model": points_model,
"landsat:ground_control_points_version": points_version,
"landsat:landsat_product_id": f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1",
"landsat:landsat_scene_id": "LC80900842016021LGN02",
"landsat:processing_software_version": software_version,
"landsat:station_id": "LGN",
"landsat:wrs_path": 90,
"landsat:wrs_row": 84,
},
"crs": "epsg:32655",
"geometry": {
"coordinates": [
[
[879307.5, -3776885.4340469087],
[879307.5, -3778240.713151076],
[839623.3108524992, -3938223.736900397],
[832105.7835592609, -3953107.5],
[831455.8296215904, -3953107.5],
[831453.7930575205, -3953115.0],
[819969.5411349908, -3953115.0],
[641985.0, -3906446.160824098],
[641985.0, -3889797.3351159613],
[685647.6920251067, -3717468.346156044],
[688909.3673333039, -3714585.0],
[708011.4230769231, -3714585.0],
[879315.0, -3761214.3020833335],
[879315.0, -3776857.8139976147],
[879307.5, -3776885.4340469087],
]
],
"type": "Polygon",
},
"grids": {
"default": {
"shape": (60, 60),
"transform": (
3955.5,
0.0,
641_985.0,
0.0,
-3975.500_000_000_000_5,
-3_714_585.0,
0.0,
0.0,
1.0,
),
},
"panchromatic": {
"shape": (60, 60),
"transform": (
3955.25,
0.0,
641_992.5,
0.0,
-3975.25,
-3_714_592.5,
0.0,
0.0,
1.0,
),
},
},
"measurements": {
"coastal_aerosol": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B1.TIF",
)
},
"blue": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B2.TIF",
)
},
"green": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B3.TIF",
)
},
"red": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B4.TIF",
)
},
"nir": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B5.TIF",
)
},
"swir_1": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B6.TIF",
)
},
"swir_2": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B7.TIF",
)
},
"panchromatic": {
"grid": "panchromatic",
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B8.TIF",
),
},
"cirrus": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B9.TIF",
)
},
"lwir_1": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B10.TIF",
)
},
"lwir_2": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_B11.TIF",
)
},
"quality": {
"path": offset(
l1_ls8_folder,
f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_{quality_tag}.TIF",
)
},
},
"accessories": {
"metadata:landsat_mtl": {
"path": f"LC08_L1TP_090084_20160121_{processing_date}_0{l1_collection}_T1_MTL.txt"
}
},
"lineage": lineage or {},
}
|
16b934d3ea6c4a3daee3bde2a37bd5a7a48856b9
| 3,643,218
|
def fetchPackageNames(graphJson):
"""Parses serialized graph and returns all package names it uses
:param graphJson: Serialized graph
:type graphJson: dict
:rtyoe: list(str)
"""
packages = set()
def worker(graphData):
for node in graphData["nodes"]:
packages.add(node["package"])
for inpJson in node["inputs"]:
packages.add(inpJson['package'])
for outJson in node["inputs"]:
packages.add(outJson['package'])
if "graphData" in node:
worker(node["graphData"])
worker(graphJson)
return packages
|
ccac1cfa1305d5d318cf3e2e3ed85d00fff7e56b
| 3,643,219
|
def get_nc_BGrid_POP(grdfile, name='POP_NEP', \
xrange=(170,270), yrange=(240, 350)):
"""
grd = get_nc_BGrid_POP(grdfile)
Load Bgrid object for POP from netCDF file
"""
nc = pycnal.io.Dataset(grdfile)
lon_t = nc.variables['TLONG'][:]
lat_t = nc.variables['TLAT'][:]
lon_u = nc.variables['ULONG'][:]
lat_u = nc.variables['ULAT'][:]
angle = nc.variables['ANGLET'][:]
h_t = nc.variables['HT'][:]
h_u = nc.variables['HU'][:]
z_t = nc.variables['z_t'][:]
z_w_top = nc.variables['z_w_top'][:]
z_w_bot = nc.variables['z_w_bot'][:]
z_w = np.zeros(z_t.size + 1)
z_w[:-1] = z_w_top
z_w[-1] = z_w_bot[-1]
return BGrid_POP(lon_t, lat_t, lon_u, lat_u, angle, h_t, h_u, z_t, z_w, \
name, xrange, yrange)
|
3ce9eec34f3332d21fce1ceaca2862faa69443d1
| 3,643,220
|
def types_and_shorthands():
"""a mapping from type names in the json doc to their
one letter short hands in the output of 'attr'
"""
return {
'int': 'i',
'uint': 'u',
'bool': 'b',
'decimal': 'd',
'color': 'c',
'string': 's',
'regex': 'r',
'SplitAlign': 'n',
'LayoutAlgorithm': 'n',
'font': 'f',
'Rectangle': 'R',
'WindowID': 'w',
}
|
39f364677a8e2ee1d459599ba2574a8a4f4cd49e
| 3,643,221
|
def _make_event_from_message(message):
"""Turn a raw message from the wire into an event.Event object
"""
if 'oslo.message' in message:
# Unpack the RPC call body and discard the envelope
message = rpc_common.deserialize_msg(message)
tenant_id = _get_tenant_id_for_message(message)
crud = event.UPDATE
router_id = None
if message.get('method') == 'router_deleted':
crud = event.DELETE
router_id = message.get('args', {}).get('router_id')
else:
event_type = message.get('event_type', '')
# Router id is not always present, but look for it as though
# it is to avoid duplicating this line a few times.
router_id = message.get('payload', {}).get('router', {}).get('id')
if event_type.startswith('routerstatus.update'):
# We generate these events ourself, so ignore them.
return None
if event_type == 'router.create.end':
crud = event.CREATE
elif event_type == 'router.delete.end':
crud = event.DELETE
router_id = message.get('payload', {}).get('router_id')
elif event_type in _INTERFACE_NOTIFICATIONS:
crud = event.UPDATE
router_id = message.get(
'payload', {}
).get('router.interface', {}).get('id')
elif event_type in _INTERESTING_NOTIFICATIONS:
crud = event.UPDATE
elif event_type.endswith('.end'):
crud = event.UPDATE
elif event_type.startswith('akanda.rug.command'):
LOG.debug('received a command: %r', message.get('payload'))
# If the message does not specify a tenant, send it to everyone
pl = message.get('payload', {})
tenant_id = pl.get('tenant_id', '*')
router_id = pl.get('router_id')
crud = event.COMMAND
if pl.get('command') == commands.POLL:
return event.Event(
tenant_id='*',
router_id='*',
crud=event.POLL,
body={},
)
else:
# LOG.debug('ignoring message %r', message)
return None
return event.Event(tenant_id, router_id, crud, message)
|
daaba3567fbb4e95d311a12f58852bb0b81c6f05
| 3,643,222
|
def to_region(obj):
"""Convert `obj` to instance of Region."""
if obj is not None and not isinstance(obj, Region):
return Region(*obj)
else:
return obj
|
da5412adcc182c97950465e3c4e3248be00f242b
| 3,643,223
|
import PIL
def create_textures():
""" Create a list of images for sprites based on the global colors.
!!! SHOULD be able to add custom images in here instead of the general colors."""
texture_list = []
for color in colors:
image = PIL.Image.new('RGB', (WIDTH, HEIGHT), color)
texture_list.append(arcade.Texture(str(color), image=image))
return texture_list
|
2652812d96157fc6f7d502e6ca39f4c4eee32dea
| 3,643,224
|
import psutil
def check_if_process_is_running(process_name):
""""
Check if there is any running process that contains the given name process_name.
"""
# Iterate over the all the running process
for process in psutil.process_iter():
try:
# Check if process name contains the given name string.
if process_name.lower() in process.name().lower():
return True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False
|
372cd2183b7250ce738157c986adb9a3abfdca84
| 3,643,225
|
import cProfile
import pstats
import io
from pstats import SortKey # type: ignore
import optparse
def exec_main_with_profiler(options: "optparse.Values") -> int:
"""Enable profiler."""
profile = cProfile.Profile()
profile.enable()
ret = exec_main(options)
profile.disable()
string_io = io.StringIO()
sortby = SortKey.TIME
print_stats = pstats.Stats(profile, stream=string_io).sort_stats(sortby)
print_stats.print_stats(40)
print(string_io.getvalue())
return ret
|
8eefc801319d94081364218fc80500b710610f31
| 3,643,226
|
def put_text(image, text, point, scale, color, thickness):
"""Draws text in image.
# Arguments
image: Numpy array.
text: String. Text to be drawn.
point: Tuple of coordinates indicating the top corner of the text.
scale: Float. Scale of text.
color: Tuple of integers. RGB color coordinates.
thickness: Integer. Thickness of the lines used for drawing text.
# Returns
Numpy array with shape ``[H, W, 3]``. Image with text.
"""
# cv2.putText returns an image in contrast to other drawing cv2 functions.
return cv2.putText(image, text, point, FONT, scale, color, thickness, LINE)
|
aeab690da16577e7eff27b515cf9b682110716e9
| 3,643,227
|
from typing import List
from pathlib import Path
def squash_dimensions(
dimensions: List[Dimension], check_path_changes=True
) -> Dimension:
"""Squash a list of nested Dimensions into a single one.
Args:
dimensions: The Dimensions to squash, from slowest to fastest moving
check_path_changes: If True then check that nesting the output
Dimension within other Dimensions will provide the same path
as nesting the input Dimension within other Dimensions
See Also:
`why-squash-can-change-path`
>>> dimx = Dimension({"x": np.array([1, 2])}, snake=True)
>>> dimy = Dimension({"y": np.array([3, 4])})
>>> squash_dimensions([dimy, dimx]).midpoints
{'y': array([3, 3, 4, 4]), 'x': array([1, 2, 2, 1])}
"""
path = Path(dimensions)
# Comsuming a Path of these dimensions performs the squash
# TODO: dim.tile might give better performance but is much longer
squashed = path.consume()
# Check that the squash is the same as the original
if dimensions and dimensions[0].snake:
squashed.snake = True
# The top level is snaking, so this dimension will run backwards
# This means any non-snaking axes will run backwards, which is
# surprising, so don't allow it
if check_path_changes:
non_snaking = [k for d in dimensions for k in d.axes() if not d.snake]
if non_snaking:
raise ValueError(
f"Cannot squash non-snaking Specs in a snaking Dimension "
f"otherwise {non_snaking} would run backwards"
)
elif check_path_changes:
# The top level is not snaking, so make sure there is an even
# number of iterations of any snaking axis within it so it
# doesn't jump when this dimension is iterated a second time
for i, dim in enumerate(dimensions):
# A snaking dimension within a non-snaking top level must repeat
# an even number of times
if dim.snake and np.product(path._lengths[:i]) % 2:
raise ValueError(
f"Cannot squash snaking Specs in a non-snaking Dimension "
f"when they do not repeat an even number of times "
f"otherwise {dim.axes()} would jump in position"
)
return squashed
|
301304cf32115d103bb53a209df85880f27fcf53
| 3,643,228
|
def CreateRootRelativePath(self, path):
"""
Generate a path relative from the root
"""
result_path = self.engine_node.make_node(path)
return result_path.abspath()
|
79053bb1bcb724e8ddf9bfc4b5b13b67be9227f0
| 3,643,229
|
import os
def resolve_font(name):
"""Sloppy way to turn font names into absolute filenames
This isn't intended to be a proper font lookup tool but rather a
dirty tool to not have to specify the absolute filename every
time.
For example::
>>> path = resolve_font('IndUni-H-Bold')
>>> fontdir = os.path.join(os.path.dirname(__file__), 'fonts')
>>> indunih_path = os.path.join(fontdir, 'IndUni-H-Bold.ttf')
>>> assert path == indunih_path
This isn't case-sensitive::
>>> assert resolve_font('induni-h') == indunih_path
Raises :exc:`FontNotFound` on failure::
>>> resolve_font('blahahaha')
Traceback (most recent call last):
...
FontNotFound: Can't find 'blahahaha' :'( Try adding it to ~/.fonts
"""
for fontdir, fontfiles in get_font_files():
for fontfile in fontfiles:
if name.lower() in fontfile.lower():
return os.path.join(fontdir, fontfile)
raise FontNotFound("Can't find %r :'( Try adding it to ~/.fonts")
|
b5716cb4390ebdf3ce8103efb33cd6b197e171a3
| 3,643,230
|
import json
from typing import Mapping
def to_shape(shape_ser):
""" Deserializes a shape into a Shapely object - can handle WKT, GeoJSON,
Python dictionaries and Shapely types.
"""
if isinstance(shape_ser, str):
try:
# Redirecting stdout because there's a low level exception that
# prints.
with redirect_stderr("/dev/null"):
shape_obj = wkt_loads(shape_ser)
except WKTReadingError:
try:
shape_obj = shape(json.loads(shape_ser))
except Exception:
raise TypeError(
"{} is not serializable to a shape.".format(str(shape_ser))
)
elif isinstance(shape_ser, Mapping):
shape_obj = shape(shape_ser)
elif isinstance(
shape_ser,
(
Point,
MultiPoint,
LineString,
MultiLineString,
Polygon,
MultiPolygon,
),
):
shape_obj = shape_ser
else:
raise TypeError(
"{} is not serializable to a shape.".format(str(shape_ser))
)
return shape_obj
|
d9a0975696ee48d816d5b61e7cc57c0547ff5033
| 3,643,231
|
import inspect
def _from_module(module, object):
"""
Return true if the given object is defined in the given module.
"""
if module is None:
return True
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
|
564157a4eb10b887b6b56c82d17d74557c233104
| 3,643,232
|
def _escape_pgpass(txt):
"""
Escape a fragment of a PostgreSQL .pgpass file.
"""
return txt.replace('\\', '\\\\').replace(':', '\\:')
|
3926f683a2715ff1d41d8433b525793e8214f7a9
| 3,643,233
|
import re
import os
def harvest_dirs(path):
"""Return a list of versioned directories under working copy
directory PATH, inclusive."""
# 'svn status' output line matcher, taken from the Subversion test suite
rm = re.compile('^([!MACDRUG_ ][MACDRUG_ ])([L ])([+ ])([S ])([KOBT ]) ' \
'([* ]) [^0-9-]*(\d+|-|\?) +(\d|-|\?)+ +(\S+) +(.+)')
dirs = []
fp = os.popen('%s status --verbose %s' % (SVN_BINARY, path))
while 1:
line = fp.readline()
if not line:
break
line = line.rstrip()
if line.startswith('Performing'):
break
match = rm.search(line)
if match:
stpath = match.group(10)
try:
if os.path.isdir(stpath):
dirs.append(stpath)
except:
pass
return dirs
|
8c48558e29952513b521685bee013679a52902ce
| 3,643,234
|
from typing import Dict
from typing import Set
from typing import List
import math
def gen_index(doc_term_map: Dict[PT.Word, Set[PT.IndexNum]],
dependency_map: Dict[PT.IndexNum, Count],
i: PT.IndexNum,
words: List[PT.Word]
) -> PT.PkgIndex:
"""Generate package index by scoring each word / term."""
word_freq: Dict[PT.Word, Count] = utils.count_freq(words)
total_docs = len(doc_term_map)
pkg_index: PT.PkgIndex = dict()
for word in word_freq:
doc_inverse_freq = get_doc_inverse_freq(total_docs,
len(doc_term_map[word]))
dependency_freq = get_dependency_freq(i, dependency_map)
pkg_index[word] = math.log(word_freq[word] *
doc_inverse_freq *
dependency_freq)
return pkg_index
|
4d82498309019f9bdc55fec8b6917576b2b0ff22
| 3,643,235
|
from typing import OrderedDict
def arr_to_dict(arr, ref_dict):
"""
Transform an array of data into a dictionary keyed by the same keys in
ref_dict, with data divided into chunks of the same length as in ref_dict.
Requires that the length of the array is the sum of the lengths of the
arrays in each entry of ref_dict. The other dimensions of the input
array and reference dict can differ.
Arguments
---------
arr : array
Input array to be transformed into dictionary.
ref_dict : dict
Reference dictionary containing the keys used to construct the output
dictionary.
Returns
-------
out : dict
Dictionary of values from arr keyed with keys from ref_dict.
"""
out = OrderedDict()
idx = 0
assert len(arr) == sum([len(v) for v in ref_dict.values()])
for k, bd in ref_dict.items():
out[k] = arr[idx : idx + len(bd)]
idx += len(bd)
return out
|
55339447226cdd2adafe714fa12e144c6b38faa2
| 3,643,236
|
def test_makecpt_truncated_zlow_zhigh(position):
"""
Use static color palette table that is truncated to z-low and z-high.
"""
fig = Figure()
makecpt(cmap="rainbow", truncate=[0.15, 0.85], series=[0, 1000])
fig.colorbar(cmap=True, frame=True, position=position)
return fig
|
f615d425d6433a6e4bc6dc0d8d5e18f2a0aa60c7
| 3,643,237
|
import os
def extract_boar_teloFISH_as_list(path):
"""
FUNCTION FOR PULLING KELLY'S TELOFISH DATA FOR 40 BOARS into a LIST.. TO BE MADE INTO A DATAFRAME & JOINED W/
MAIN DATAFRAME if possible
These excel files take forever to load.. the objective here is to synthesize all the excel files for
telomere FISH data into one dataframe, then save that dataframe to csv file to be retrieved later
loading one whole csv file containing all the data will be much, much faster than loading the parts of the whole
Along the way, we'll normalize the teloFISH data using controls internal to each excel file
"""
boar_teloFISH_list = []
for file in os.scandir(path):
if 'Hyb' in file.name:
print(f'Handling {file.name}...')
full_name = path + file.name
# making a dict of excel sheets, where KEY:VALUE pairs are SAMPLE ID:TELO DATA
telo_excel_dict = pd.read_excel(full_name, sheet_name=None, skiprows=4, usecols=[3], nrows=5000)
if 'Telomere Template' in telo_excel_dict.keys():
del telo_excel_dict['Telomere Template']
excel_file_list = []
for sample_id, telos in telo_excel_dict.items():
telos_cleaned = clean_individ_telos(telos)
if sample_id != 'Control':
excel_file_list.append([sample_id, telos_cleaned.values, np.mean(telos_cleaned)])
elif sample_id == 'Control':
control_value = np.mean(telos_cleaned)
#normalize teloFISH values by control value
for sample in excel_file_list:
sample_data = sample
#normalize individual telos
sample_data[1] = np.divide(sample_data[1], control_value)
#normalize telo means
sample_data[2] = np.divide(sample_data[2], control_value)
boar_teloFISH_list.append(sample_data)
print('Finished collecting boar teloFISH data')
return boar_teloFISH_list
|
a431282679f916a71cab7cc002dce26a39de147e
| 3,643,238
|
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
|
9226c23c13ad86cc09f2c08ce1ffb44f324a1044
| 3,643,239
|
def login():
"""
Handles user authentication.
The hash of the password the user entered is compared to the hash in the database.
Also saves the user_id in the user's session.
"""
form = SignInForm()
banned = None
reason = None
if form.validate_on_submit():
user_id = form.user_id.data
password = form.password.data
db = get_db()
user = db.execute("""SELECT * FROM users
where user_id = ?;""", (user_id,)).fetchone()
if user is None:
form.user_id.errors.append("Unkown user id")
elif not check_password_hash(user["password"], password):
form.password.errors.append("Incorrect password!")
elif user["isBanned"] == 1:
banned = "You have been banned"
reason = user["bannedReason"]
else:
session.clear()
session["user_id"] = user_id
next_page = request.args.get("next")
if not next_page:
next_page = url_for("chat")
return redirect(next_page)
return render_template("login.html", form=form, maincolor=g.maincolor, secondcolor=g.secondcolor, textcolor=g.textcolor, banned=banned, reason=reason)
|
d2cea08572c7b1461cda490f063009bc139c7c3a
| 3,643,240
|
def create_ipu_strategy(num_ipus,
fp_exceptions=False,
enable_recomputation=True,
min_remote_tensor_size=50000,
max_cross_replica_sum_buffer_size=10*1024*1024):
"""
Creates an IPU config and returns an IPU strategy ready to run
something on IPUs
:param num_ipus: Int representing the number of IPUs required.
:param fp_exceptions: Bool, if True floating point exceptions will
be raised.
:param enable_recomputation: Bool, if True recomputation will be
enabled.
:param min_remote_tensor_size: The minimum size (in bytes) a tensor
must be in order to be considered for being stored in remote
memory.
:param max_cross_replica_sum_buffer_size: The maximum number of bytes
that can be waiting before a cross replica sum op is scheduled.
Represents an always-live vs not-always-live trade off. The
default used here is effective for BERT.
:return: An IPU strategy
"""
ipu_config = ipu.config.IPUConfig()
ipu_config.auto_select_ipus = num_ipus
ipu_config.allow_recompute = enable_recomputation
ipu_config.floating_point_behaviour.inv = fp_exceptions
ipu_config.floating_point_behaviour.div0 = fp_exceptions
ipu_config.floating_point_behaviour.oflo = fp_exceptions
ipu_config.floating_point_behaviour.nanoo = fp_exceptions
ipu_config.optimizations.minimum_remote_tensor_size = min_remote_tensor_size
ipu_config.optimizations.merge_infeed_io_copies = True
ipu_config.optimizations.maximum_cross_replica_sum_buffer_size = max_cross_replica_sum_buffer_size
ipu_config.device_connection.type = ipu.config.DeviceConnectionType.ON_DEMAND
ipu_config.device_connection.enable_remote_buffers = True
ipu_config.configure_ipu_system()
strategy = ipu.ipu_strategy.IPUStrategy()
return strategy
|
2aaa76de946e43305cdb7e50b673e39a08e19a50
| 3,643,241
|
def run_pipeline(context, func, ast, func_signature,
pipeline=None, **kwargs):
"""
Run a bunch of AST transformers and visitors on the AST.
"""
# print __import__('ast').dump(ast)
pipeline = pipeline or context.numba_pipeline(context, func, ast,
func_signature, **kwargs)
return pipeline, pipeline.run_pipeline()
|
559d9c44ae143e49ff505fd76df0393bae56f012
| 3,643,242
|
from typing import Union
from typing import Optional
def convert_acc_data_to_g(
data: Union[AccDataFrame, ImuDataFrame], inplace: Optional[bool] = False
) -> Optional[Union[AccDataFrame, ImuDataFrame]]:
"""Convert acceleration data from :math:`m/s^2` to g.
Parameters
----------
data : :class:`~biopsykit.utils.datatype_helper.AccDataFrame` or \
:class:`~biopsykit.utils.datatype_helper.ImuDataFrame`
dataframe containing acceleration data.
inplace : bool, optional
whether to perform the operation inplace or not. Default: ``False``
Returns
-------
:class:`~biopsykit.utils.datatype_helper.AccDataFrame` or :class:`~biopsykit.utils.datatype_helper.ImuDataFrame`
acceleration data converted to g
"""
if not inplace:
data = data.copy()
acc_cols = data.filter(like="acc").columns
data.loc[:, acc_cols] = data.loc[:, acc_cols] / 9.81
if inplace:
return None
return data
|
e9c60ebdc143cd8243fa7ec7ebde27f0ad5beceb
| 3,643,243
|
def replace_by_one_rule(specific_rule: dict, sentence: str):
"""
This function replace a sentence with the given specific replacement dict.
:param specific_rule: A dict containing the replacement rule, where the keys are the words to use, the values will
be replaced by the keys.
:param sentence: A string to be replaced by the dict and given rule.
:return: The string after replaced by the rules.
"""
original = sentence.lower()
for key in specific_rule.keys():
for word in specific_rule[key]:
original = original.replace(word, key)
original = " ".join([i if i != 'be' else 'is' for i in original.split(' ')])
return original.replace('(s)', '').replace('is at there', 'been there').replace('(es)', ''). \
replace('is in there', 'been there').replace('is there', 'been there').replace('possess', 'have')
|
31a5bd58ef77d76c968c353dd493ba3357d5b506
| 3,643,244
|
def get_os(platform):
"""
Return the icon-name of the OS.
@type platform: C{string}
@param platform: A string that represents the platform of the
relay.
@rtype: C{string}
@return: The icon-name version of the OS of the relay.
"""
if platform:
for os in __OS_LIST:
if os in platform:
if os == 'Windows' and 'Server' in platform:
return 'WindowsServer'
else:
return os
return 'NotAvailable'
|
1610c373076a8fd9b647dad22c5ff39732d14fa7
| 3,643,245
|
def get_loglikelihood_fn(dd_s, f_l=f_l, f_h=f_h, n_f=n_f):
"""
x: parameter point
dd_s: signal system
"""
fs = jnp.linspace(f_l, f_h, n_f)
pad_low, pad_high = get_match_pads(fs)
def _ll(x):
# Unpack parameters into dark dress ones
gamma_s, rho_6T, M_chirp_MSUN, log10_q = x
M_chirp = M_chirp_MSUN * MSUN
q = 10 ** log10_q
rho_6 = rho_6T_to_rho6(rho_6T)
f_c = get_f_isco(get_m_1(M_chirp, q))
dd_h = DynamicDress(
gamma_s, rho_6, M_chirp, q, dd_s.Phi_c, dd_s.tT_c, dd_s.dL, f_c
)
return loglikelihood_fft(dd_h, dd_s, fs, pad_low, pad_high)
return _ll
|
da3843fd069a9c3a4646aa282c854bbd5557d74b
| 3,643,246
|
def to_module_name(field):
"""_to_module_name(self, field: str) -> str
Convert module name to match syntax used in https://github.com/brendangregg/FlameGraph
Examples:
[unknown] -> [unknown]'
/usr/bin/firefox -> [firefox]
"""
if field != '[unknown]':
field = '[{}]'.format(field.split('/')[-1])
return field
|
75e3fbb9a45710ea6dacecf5ecc34a5b9409606a
| 3,643,247
|
def ApplyMomentum(variable, accumulation, learning_rate, gradient, momentum, use_nesterov=False, gradient_scale=1.0):
"""apply momentum"""
return apply_momentum.apply_momentum(variable, gradient, accumulation, learning_rate,
momentum, use_nesterov=use_nesterov, grad_scale=gradient_scale)
|
f86b923e707c7f98d55cbf23a7ac17040bf2929c
| 3,643,248
|
def init():
"""Return True if the plugin has loaded successfully."""
ok = True
if ok:
#g.registerHandler('start2',onStart2)
g.plugin_signon(__name__)
#serve_thread()
#g.app.remoteserver = ss = LeoSocketServer()
return ok
|
74cc6395310d648b809b6df965700ca708581b5e
| 3,643,249
|
def _fftconvolve_14(in1, in2, int2_fft, mode="same"):
"""
scipy routine scipy.signal.fftconvolve with kernel already fourier transformed
"""
in1 = signaltools.asarray(in1)
in2 = signaltools.asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return signaltools.array([])
s1 = signaltools.array(in1.shape)
s2 = signaltools.array(in2.shape)
shape = s1 + s2 - 1
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [signaltools._next_regular(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
ret = signaltools.irfftn(signaltools.rfftn(in1, fshape) *
int2_fft, fshape)[fslice].copy()
#np.fft.rfftn(in2, fshape)
if mode == "full":
return ret
elif mode == "same":
return signaltools._centered(ret, s1)
elif mode == "valid":
return signaltools._centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid'," " 'same', or 'full'.")
|
a32d85bb24fe0d1e64b12cdae14391c0c8e9e111
| 3,643,250
|
def solve_step(previous_solution_space, phase_space_position, step_num):
"""
Solves the differential equation across the full spectrum of trajectory angles and neutrino energies
:param previous_solution_space: solution to previous step of the differential equation
across all angles and energies, includes phase space values in first 2 columns (ndarray)
:param phase_space_position: which cosine / neutrino energy slice for which to compute the solution
(int, row index of previous solution)
:param step_num: step number in discretized radial distance away from the initial core (int)
:return: solution (ndarray)
"""
euler_solution = solve_fixed_energy_angle(
previous_solution_space=previous_solution_space,
phase_space_position=phase_space_position,
step_num=step_num
)
return previous_solution_space[phase_space_position][0], previous_solution_space[phase_space_position][1], euler_solution
|
6678a9482453f2a43942f04085c892e8484e75cc
| 3,643,251
|
def gradientDescentMulti(X, y, theta, alpha, num_iters):
"""
Performs gradient descent to learn theta
theta = gradientDescent(x, y, theta, alpha, num_iters) updates theta by
taking num_iters gradient steps with learning rate alpha
"""
# Initialize some useful values
J_history = []
m = y.size # number of training examples
for i in range(num_iters):
# ====================== YOUR CODE HERE ======================
# Instructions: Perform a single gradient step on the parameter vector
# theta.
#
# Hint: While debugging, it can be useful to print out the values
# of the cost function (computeCost) and gradient here.
#
theta -= alpha * (X.T.dot(X.dot(theta) - y)) / float(m)
# ============================================================
# Save the cost J in every iteration
J_history.append(computeCostMulti(X, y, theta))
return theta, J_history
|
72b5512e115c405216fe262724b85a02df984c6d
| 3,643,252
|
def execute(*args, **kw):
"""Wrapper for ``Cursor#execute()``."""
return _m.connection["default"].cursor().execute(*args, **kw)
|
8bb11436046e479c580c48eb42d4cb2b37372945
| 3,643,253
|
def get_login_client():
"""
Returns a LinodeLoginClient configured as per the config module in this
example project.
"""
return LinodeLoginClient(config.client_id, config.client_secret)
|
786d3d6981f81afa95ed7de62544896982b17c58
| 3,643,254
|
import re
def detect_ascii_slice(lines):
# type: (List[str]) -> slice
"""
Given a list of strings, this will return the most likely positions of byte
positions. They are returned slice which should be able to extract the
columns from each line.
"""
for line in lines:
# if the content contains a ":" character, it contains the byte offset
# in the beginning. This is the case for libsnmp command output using
# the "-d" switch. We need to remove the offset
match = re.match(r"^\d{4}:", line)
if ":" in line:
return slice(6, 56)
else:
return slice(0, 50)
return slice(0, -1)
|
06cf3b9faa24f46aef37ae94495cbe129851bd7c
| 3,643,255
|
import torch
def inception_model_pytorch():
"""The InceptionBlocks model the WebGME folks provided as a test case for deepforge."""
class InceptionBlocks(nn.Module):
def __init__(self):
super().__init__()
self.asymmetric_pad = nn.ZeroPad2d((0, 1, 0, 1))
self.conv2d = nn.Conv2d(
in_channels=5, out_channels=64, kernel_size=(5, 5), padding=2, bias=True
)
self.prelu = nn.PReLU(init=0.0)
self.averagepooling2d = nn.AvgPool2d((2, 2), stride=2, padding=0)
self.conv2d2 = nn.Conv2d(
in_channels=64,
out_channels=48,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu2 = nn.PReLU(init=0.0)
self.conv2d3 = nn.Conv2d(
in_channels=48,
out_channels=64,
kernel_size=(3, 3),
padding=1,
bias=True,
)
self.prelu3 = nn.PReLU(init=0.0)
self.conv2d4 = nn.Conv2d(
in_channels=64,
out_channels=48,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu4 = nn.PReLU(init=0.0)
self.averagepooling2d2 = nn.AvgPool2d((2, 2), stride=1)
self.conv2d5 = nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu5 = nn.PReLU(init=0.0)
self.conv2d6 = nn.Conv2d(
in_channels=64,
out_channels=48,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu6 = nn.PReLU(init=0.0)
self.conv2d7 = nn.Conv2d(
in_channels=48,
out_channels=64,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu7 = nn.PReLU(init=0.0)
self.conv2d8 = nn.Conv2d(
in_channels=240,
out_channels=64,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.conv2d9 = nn.Conv2d(
in_channels=240,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.conv2d10 = nn.Conv2d(
in_channels=240,
out_channels=64,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu8 = nn.PReLU(init=0.0)
self.conv2d11 = nn.Conv2d(
in_channels=64,
out_channels=92,
kernel_size=(5, 5),
padding=2,
bias=True,
)
self.prelu9 = nn.PReLU(init=0.0)
self.prelu10 = nn.PReLU(init=0.0)
self.averagepooling2d3 = nn.AvgPool2d((2, 2), stride=1, padding=0)
self.conv2d12 = nn.Conv2d(
in_channels=240,
out_channels=64,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu11 = nn.PReLU(init=0.0)
self.conv2d13 = nn.Conv2d(
in_channels=64,
out_channels=92,
kernel_size=(3, 3),
padding=1,
bias=True,
)
self.prelu12 = nn.PReLU(init=0.0)
self.prelu13 = nn.PReLU(init=0.0)
self.averagepooling2d4 = nn.AvgPool2d((2, 2), stride=2, padding=0)
self.conv2d14 = nn.Conv2d(
in_channels=340,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu14 = nn.PReLU(init=0.0)
self.conv2d15 = nn.Conv2d(
in_channels=92,
out_channels=128,
kernel_size=(5, 5),
padding=2,
bias=True,
)
self.prelu15 = nn.PReLU(init=0.0)
self.conv2d16 = nn.Conv2d(
in_channels=340,
out_channels=128,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu16 = nn.PReLU(init=0.0)
self.conv2d17 = nn.Conv2d(
in_channels=340,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu17 = nn.PReLU(init=0.0)
self.averagepooling2d5 = nn.AvgPool2d((2, 2), stride=1, padding=0)
self.conv2d18 = nn.Conv2d(
in_channels=340,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu18 = nn.PReLU(init=0.0)
self.conv2d19 = nn.Conv2d(
in_channels=92,
out_channels=128,
kernel_size=(3, 3),
padding=1,
bias=True,
)
self.prelu19 = nn.PReLU(init=0.0)
self.conv2d20 = nn.Conv2d(
in_channels=476,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu20 = nn.PReLU(init=0.0)
self.conv2d21 = nn.Conv2d(
in_channels=92,
out_channels=128,
kernel_size=(3, 3),
padding=1,
bias=True,
)
self.prelu21 = nn.PReLU(init=0.0)
self.conv2d22 = nn.Conv2d(
in_channels=476,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu22 = nn.PReLU(init=0.0)
self.averagepooling2d6 = nn.AvgPool2d((2, 2), stride=1, padding=0)
self.conv2d23 = nn.Conv2d(
in_channels=476,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu23 = nn.PReLU(init=0.0)
self.conv2d24 = nn.Conv2d(
in_channels=92,
out_channels=128,
kernel_size=(5, 5),
padding=2,
bias=True,
)
self.prelu24 = nn.PReLU(init=0.0)
self.conv2d25 = nn.Conv2d(
in_channels=476,
out_channels=128,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu25 = nn.PReLU(init=0.0)
self.averagepooling2d7 = nn.AvgPool2d((2, 2), stride=2, padding=0)
self.conv2d26 = nn.Conv2d(
in_channels=476,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu26 = nn.PReLU(init=0.0)
self.averagepooling2d8 = nn.AvgPool2d((2, 2), stride=1, padding=0)
self.conv2d27 = nn.Conv2d(
in_channels=476,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu27 = nn.PReLU(init=0.0)
self.conv2d28 = nn.Conv2d(
in_channels=92,
out_channels=128,
kernel_size=(3, 3),
padding=1,
bias=True,
)
self.prelu28 = nn.PReLU(init=0.0)
self.conv2d29 = nn.Conv2d(
in_channels=476,
out_channels=128,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu29 = nn.PReLU(init=0.0)
self.dense = nn.Linear(22273, 1096, bias=True)
self.prelu30 = nn.PReLU(init=0.0)
self.dense2 = nn.Linear(1096, 1096, bias=True)
self.prelu31 = nn.PReLU(init=0.0)
self.dense3 = nn.Linear(1096, 180, bias=True)
def forward(self, galaxy_images_output, ebv_output):
conv2d_output = self.conv2d(galaxy_images_output)
prelu_output = self.prelu(conv2d_output)
averagepooling2d_output = self.averagepooling2d(prelu_output)
conv2d_output2 = self.conv2d2(averagepooling2d_output)
prelu_output2 = self.prelu2(conv2d_output2)
conv2d_output3 = self.conv2d3(prelu_output2)
prelu_output3 = self.prelu3(conv2d_output3)
conv2d_output4 = self.conv2d4(averagepooling2d_output)
prelu_output4 = self.prelu4(conv2d_output4)
prelu_output4 = self.asymmetric_pad(prelu_output4)
averagepooling2d_output2 = self.averagepooling2d2(prelu_output4)
conv2d_output5 = self.conv2d5(averagepooling2d_output)
prelu_output5 = self.prelu5(conv2d_output5)
conv2d_output6 = self.conv2d6(averagepooling2d_output)
prelu_output6 = self.prelu6(conv2d_output6)
conv2d_output7 = self.conv2d7(prelu_output6)
prelu_output7 = self.prelu7(conv2d_output7)
concatenate_output = torch.cat(
(prelu_output5, prelu_output3, prelu_output7, averagepooling2d_output2),
dim=1,
)
conv2d_output8 = self.conv2d8(concatenate_output)
conv2d_output9 = self.conv2d9(concatenate_output)
conv2d_output10 = self.conv2d10(concatenate_output)
prelu_output8 = self.prelu8(conv2d_output10)
conv2d_output11 = self.conv2d11(prelu_output8)
prelu_output9 = self.prelu9(conv2d_output11)
prelu_output10 = self.prelu10(conv2d_output8)
prelu_output10 = self.asymmetric_pad(prelu_output10)
averagepooling2d_output3 = self.averagepooling2d3(prelu_output10)
conv2d_output12 = self.conv2d12(concatenate_output)
prelu_output11 = self.prelu11(conv2d_output12)
conv2d_output13 = self.conv2d13(prelu_output11)
prelu_output12 = self.prelu12(conv2d_output13)
prelu_output13 = self.prelu13(conv2d_output9)
concatenate_output2 = torch.cat(
(
prelu_output13,
prelu_output12,
prelu_output9,
averagepooling2d_output3,
),
dim=1,
)
averagepooling2d_output4 = self.averagepooling2d4(concatenate_output2)
conv2d_output14 = self.conv2d14(averagepooling2d_output4)
prelu_output14 = self.prelu14(conv2d_output14)
conv2d_output15 = self.conv2d15(prelu_output14)
prelu_output15 = self.prelu15(conv2d_output15)
conv2d_output16 = self.conv2d16(averagepooling2d_output4)
prelu_output16 = self.prelu16(conv2d_output16)
conv2d_output17 = self.conv2d17(averagepooling2d_output4)
prelu_output17 = self.prelu17(conv2d_output17)
prelu_output17 = self.asymmetric_pad(prelu_output17)
averagepooling2d_output5 = self.averagepooling2d5(prelu_output17)
conv2d_output18 = self.conv2d18(averagepooling2d_output4)
prelu_output18 = self.prelu18(conv2d_output18)
conv2d_output19 = self.conv2d19(prelu_output18)
prelu_output19 = self.prelu19(conv2d_output19)
concatenate_output3 = torch.cat(
(
prelu_output16,
prelu_output19,
prelu_output15,
averagepooling2d_output5,
),
dim=1,
)
conv2d_output20 = self.conv2d20(concatenate_output3)
prelu_output20 = self.prelu20(conv2d_output20)
conv2d_output21 = self.conv2d21(prelu_output20)
prelu_output21 = self.prelu21(conv2d_output21)
conv2d_output22 = self.conv2d22(concatenate_output3)
prelu_output22 = self.prelu22(conv2d_output22)
prelu_output22 = self.asymmetric_pad(prelu_output22)
averagepooling2d_output6 = self.averagepooling2d6(prelu_output22)
conv2d_output23 = self.conv2d23(concatenate_output3)
prelu_output23 = self.prelu23(conv2d_output23)
conv2d_output24 = self.conv2d24(prelu_output23)
prelu_output24 = self.prelu24(conv2d_output24)
conv2d_output25 = self.conv2d25(concatenate_output3)
prelu_output25 = self.prelu25(conv2d_output25)
concatenate_output4 = torch.cat(
(
prelu_output25,
prelu_output21,
prelu_output24,
averagepooling2d_output6,
),
dim=1,
)
averagepooling2d_output7 = self.averagepooling2d7(concatenate_output4)
conv2d_output26 = self.conv2d26(averagepooling2d_output7)
prelu_output26 = self.prelu26(conv2d_output26)
prelu_output26 = self.asymmetric_pad(prelu_output26)
averagepooling2d_output8 = self.averagepooling2d8(prelu_output26)
conv2d_output27 = self.conv2d27(averagepooling2d_output7)
prelu_output27 = self.prelu27(conv2d_output27)
conv2d_output28 = self.conv2d28(prelu_output27)
prelu_output28 = self.prelu28(conv2d_output28)
conv2d_output29 = self.conv2d29(averagepooling2d_output7)
prelu_output29 = self.prelu29(conv2d_output29)
concatenate_output5 = torch.cat(
(prelu_output29, prelu_output28, averagepooling2d_output8), dim=1
)
flatten_output = torch.flatten(concatenate_output5)
concatenate_output6 = torch.cat((flatten_output, ebv_output), dim=0)
dense_output = self.dense(concatenate_output6)
prelu_output30 = self.prelu30(dense_output)
dense_output2 = self.dense2(prelu_output30)
prelu_output31 = self.prelu31(dense_output2)
dense_output3 = self.dense3(prelu_output31)
return dense_output3
torch.manual_seed(0)
model = InceptionBlocks()
model.eval()
return model
|
c70e4ea71eb38ae0d81b6985076a0c1588758df2
| 3,643,256
|
def get_trained_coefficients(X_train, y_train):
"""
Create and train a model based on the training_data_file data.
Return the model, and the list of coefficients for the 'X_columns' variables in the regression.
"""
# TODO: create regression model and train.
# The following codes are adapted from https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html .
model = LinearRegression(fit_intercept = False, normalize = False, copy_X = True, n_jobs = None, positive = False)
model = model.fit(X_train, y_train, sample_weight = None)
coefficients = model.coef_
return model, coefficients
|
bfafbd3370bc48fa64144842fa2400bbe629cf3e
| 3,643,257
|
def register():
"""Handles the creation of a new user"""
form = dds_web.forms.RegistrationForm()
# Validate form - validators defined in form class
if form.validate_on_submit():
# Create new user row by loading form data into schema
try:
new_user = user_schemas.NewUserSchema().load(form.data)
except marshmallow.ValidationError as valerr:
flask.current_app.logger.warning(valerr)
raise
except (sqlalchemy.exc.SQLAlchemyError, sqlalchemy.exc.IntegrityError) as sqlerr:
raise ddserr.DatabaseError from sqlerr
# Go to two factor authentication setup
# TODO: Change this after email is introduced
flask_login.login_user(new_user)
return flask.redirect(flask.url_for("auth_blueprint.two_factor_setup"))
# Go to registration form
return flask.render_template("user/register.html", form=form)
|
ed0c65daf3dfaa7fce315eb510c59c171d9f16d0
| 3,643,258
|
def project_in_2D(K, camera_pose, mesh, resolution_px):
"""
Project all 3D triangle vertices in the mesh into
the 2D image of given resolution
Parameters
----------
K: ndarray
Camera intrinsics matrix, 3x3
camera_pose: ndarray
Camera pose (inverse of extrinsics), 4x4
mesh: ndarray
Triangles to be projected in 2d, (Nx3x3)
resolution_px: tuple
Resolution of image in pixel
Returns
-------
coords_projected_2D: ndarray
Triangle vertices projected in 2D and clipped to
image resolution
"""
resolution_x_px, resolution_y_px = resolution_px # image resolution in pixels
# Decompose camera pose into rotation and translation
RT = camera_pose[:-1, :] # remove homogeneous row
R = RT[:, :-1] # rotation matrix 3x3
T = RT[:, -1:] # translation vector 3x1
# Invert the camera pose matrix to get the camera extrinsics
# Due to the particular matrix geometry we can avoid raw inversion
Rc = tf.matrix_transpose(R)
Tc = tf.matmul(-Rc, T)
RT = tf.concat([Rc, Tc], axis=-1) # camera extrinsics
# Correct reference system of extrinsics matrix
# y is down: (to align to the actual pixel coordinates used in digital images)
# right-handed: positive z look-at direction
correction_factor = tf.constant(value=np.array([[1., 0., 0.],
[0., -1., 0.],
[0., 0., -1.]]), dtype=tf.float32)
RT = tf.matmul(correction_factor, RT)
# Compose whole camera projection matrix (3x4)
P = tf.matmul(K, RT)
mesh_flat = tf.reshape(mesh, shape=(-1, 3))
len_mesh_flat = tf.shape(mesh_flat)[0]
# Create constant tensor to store 3D model coordinates
coords_3d_h = tf.concat([mesh_flat, tf.ones(shape=(len_mesh_flat, 1), dtype=tf.float32)], axis=-1) # n_triangles, 4
coords_3d_h = tf.transpose(coords_3d_h, perm=[1, 0]) # 4, n_triangles
# Project 3D vertices into 2D
coords_projected_2D_h = tf.transpose(tf.matmul(P, coords_3d_h), perm=[1, 0]) # n_triangles, 3
coords_projected_2D = coords_projected_2D_h[:, :2] / (coords_projected_2D_h[:, 2:3] + 1e-8)
# Clip indexes in image range
coords_projected_2D_x_clip = tf.clip_by_value(coords_projected_2D[:, 0:0 + 1],
clip_value_min=-1, clip_value_max=resolution_x_px)
coords_projected_2D_y_clip = tf.clip_by_value(coords_projected_2D[:, 1:1 + 1],
clip_value_min=-1, clip_value_max=resolution_y_px)
return tf.concat([coords_projected_2D_x_clip, coords_projected_2D_y_clip], axis=-1)
|
9615d940fe083853e0bc179b79e1a19b7f9304bf
| 3,643,259
|
from typing import Any
def forbidden(description: Any) -> APIGatewayProxyResult:
"""Return a response with FORBIDDEN status code."""
error = ForbiddenError(description)
return _build_response(error, HTTPStatus.FORBIDDEN)
|
7b87e41081f1f7fa8f1e140a2c4d5ee597222193
| 3,643,260
|
from datetime import datetime
def str_2_datetime(p_str, fmt="%Y-%m-%d %H:%M:%S"):
""" 将字符串转换成日期
:param p_str: 原始时间字符串
:param fmt: 时间格式
:rtype: datetime.datetime
"""
# don't need to transform
if isinstance(p_str, datetime.datetime):
return p_str
if not isinstance(p_str, str):
raise TypeError("params `p_str` must be type of str")
return datetime.datetime.strptime(p_str, fmt)
|
0fa86e0aebcf2c2ff53ceb26ae93ed762175ef03
| 3,643,261
|
def traitement(l):
"""Permet de retirer les cartes blanches inutiles"""
while l[-1][1] == 'nan':
del l[-1]
return l
|
d21a7d493a35fc53195315da9b824b0ca3c8ba25
| 3,643,262
|
import os
def save_audio(text: str, filename: str, dir: str):
"""
Converts text to audio and saves
Notes
-----
If the .mp3 file extension is missing in the filename, it will be added
If a file with the same name exists, it will not save, only notify the user
Returns
_______
Path : str
"""
# Make the path to the folder
path = '{0}/{1}'.format(dir, filename)
if not filename.endswith('.mp3'):
path += '.mp3'
# Generates and saves audio file
tts = gTTS(text=text, lang='en')
# Only saves when file does not exist
if os.path.isfile(path):
print("File named {0} already exist, will not safe".format(path))
else:
tts.save(path)
return path
|
a8306dfb56cbb00f99ca3dc658991ba7e42fa021
| 3,643,263
|
import os
import logging
def build_report(test_controller):
"""Report on the test results."""
options = test_controller.options
citest_log_dir = os.path.join(options.log_dir, 'citest_logs')
if not os.path.exists(citest_log_dir):
logging.warning('%s does not exist -- no citest logs.', citest_log_dir)
return
response = run_quick(
'cd {log_dir}'
'; python -m citest.reporting.generate_html_report --index *.journal'
.format(log_dir=citest_log_dir))
if response.returncode != 0:
logging.error('Error building report: %s', response.stdout)
logging.info('Logging information is in %s', options.log_dir)
return test_controller.build_summary()
|
4cfe128cae57575c35983aad26cd0bf846617c3a
| 3,643,264
|
from datetime import datetime
import bisect
def group_frames_by_track_date(frames):
"""Classify frames by track and date."""
hits = {}
grouped = {}
dates = {}
footprints = {}
metadata = {}
for h in frames:
if h['_id'] in hits: continue
fields = h['fields']['partial'][0]
#print("h['_id'] : %s" %h['_id'])
# get product url; prefer S3
prod_url = fields['urls'][0]
if len(fields['urls']) > 1:
for u in fields['urls']:
if u.startswith('s3://'):
prod_url = u
break
#print("prod_url : %s" %prod_url)
hits[h['_id']] = "%s/%s" % (prod_url, fields['metadata']['archive_filename'])
match = SLC_RE.search(h['_id'])
#print("match : %s" %match)
if not match:
raise RuntimeError("Failed to recognize SLC ID %s." % h['_id'])
day_dt = datetime(int(match.group('start_year')),
int(match.group('start_month')),
int(match.group('start_day')),
0, 0, 0)
#print("day_dt : %s " %day_dt)
bisect.insort(grouped.setdefault(fields['metadata']['trackNumber'], {}) \
.setdefault(day_dt, []), h['_id'])
slc_start_dt = datetime(int(match.group('start_year')),
int(match.group('start_month')),
int(match.group('start_day')),
int(match.group('start_hour')),
int(match.group('start_min')),
int(match.group('start_sec')))
#print("slc_start_dt : %s" %slc_start_dt)
slc_end_dt = datetime(int(match.group('end_year')),
int(match.group('end_month')),
int(match.group('end_day')),
int(match.group('end_hour')),
int(match.group('end_min')),
int(match.group('end_sec')))
#print("slc_end_dt : %s" %slc_end_dt)
dates[h['_id']] = [ slc_start_dt, slc_end_dt ]
footprints[h['_id']] = fields['location']
metadata[h['_id']] = fields['metadata']
#break
#print("grouped : %s" %grouped)
logger.info("grouped keys : %s" %grouped.keys())
return {
"hits": hits,
"grouped": grouped,
"dates": dates,
"footprints": footprints,
"metadata": metadata,
}
|
327a6357c5ce8fc9a54d27107e2cb43424dd7630
| 3,643,265
|
def generate_violin_figure(dataframe, columns, ytitle, legend_title=None):
""" Plot 2 columns of data as violin plot, grouped by block.
:param dataframe: Variance of projections.
:type dataframe: pandas.DataFrame
:param columns: 2 columns for the negative and the positive side of the violins.
:type columns: list
:param ytitle: Title of Y-axis. What is being plotted? What are the units of the data?
:type ytitle: str
:param legend_title: What's the common denominator of the columns?
:type legend_title: str
:return: Figure object of graph.
:rtype: plotly.graph_objs.Figure
"""
legend = go.layout.Legend(
xanchor='right',
yanchor='top',
orientation='v',
title=legend_title,
)
fig = go.Figure()
fig.layout.update(xaxis_title='Task',
yaxis_title=ytitle,
legend=legend,
margin=theme['graph_margins'])
if dataframe.empty:
return fig
# Make sure we plot only 2 columns, left and right.
columns = columns[:2]
sides = ('negative', 'positive')
grouped = dataframe.groupby('task')
for name, group_df in grouped:
for i, col in enumerate(columns):
fig.add_trace(go.Violin(x=group_df['task'].map(task_order),
y=group_df[col],
legendgroup=col, scalegroup=col, name=col,
side=sides[i],
pointpos=i - 0.5,
line_color=theme[col],
text=[f"{col}<br />participant: {j['user']}<br />"
f"block: {j['block']}<br />condition: {j['condition']}"
for _, j in group_df.iterrows()],
hoverinfo='y+text',
spanmode='hard',
showlegend=bool(name == dataframe['task'].unique()[0]), # Only 1 legend.
)
)
# update characteristics shared by all traces
fig.update_traces(meanline={'visible': True, 'color': 'dimgray'},
box={'visible': True, 'width': 0.5, 'line_color': 'dimgray'},
points='all', # Show all points.
jitter=0.1, # Add some jitter on points for better visibility.
scalemode='count') # Scale violin plot area with total count.
fig.update_layout(violingap=0, violingroupgap=0, violinmode='overlay', hovermode='closest')
fig.update_xaxes(tickvals=task_order[dataframe['task'].unique()],
ticktext=task_order[dataframe['task'].unique()].index)
fig.update_yaxes(zeroline=True, zerolinewidth=2, zerolinecolor='LightPink')
return fig
|
23baa052cf835ba55a43ffa496d606cccadb0c5b
| 3,643,266
|
def measure_single(state, bit):
"""
Method one qubit one time
:param state:
:param bit:
:return:
"""
n = len(state.shape)
axis = list(range(n))
axis.remove(n - 1 - bit)
probs = np.sum(np.abs(state) ** 2, axis=tuple(axis))
rnd = np.random.rand()
# measure single bit
if rnd < probs[0]:
out = 0
prob = probs[0]
else:
out = 1
prob = probs[1]
# collapse single bit
if out == 0:
matrix = np.array([[1.0 / np.sqrt(prob), 0.0],
[0.0, 0.0]], complex)
else:
matrix = np.array([[0.0, 0.0],
[0.0, 1.0 / np.sqrt(prob)]], complex)
state = transfer_state(state, matrix, [bit])
return out, state
|
ff2c18039e6900febaa731f9a3db9f16b797e18b
| 3,643,267
|
def anchor_to_offset(anchors, ground_truth):
"""Encodes the anchor regression predictions with the
ground truth.
Args:
anchors: A numpy array of shape (N, 6) representing
the generated anchors.
ground_truth: A numpy array of shape (6,) containing
the label boxes in the anchor format.
Returns:
anchor_offsets: A numpy array of shape (N, 6)
encoded/normalized with the ground-truth, representing the
offsets.
"""
fc.check_anchor_format(anchors)
anchors = np.asarray(anchors).reshape(-1, 6)
ground_truth = np.reshape(ground_truth, (6,))
# t_x_gt = (x_gt - x_anch)/dim_x_anch
t_x_gt = (ground_truth[0] - anchors[:, 0]) / anchors[:, 3]
# t_y_gt = (y_gt - y_anch)/dim_y_anch
t_y_gt = (ground_truth[1] - anchors[:, 1]) / anchors[:, 4]
# t_z_gt = (z_gt - z_anch)/dim_z_anch
t_z_gt = (ground_truth[2] - anchors[:, 2]) / anchors[:, 5]
# t_dx_gt = log(dim_x_gt/dim_x_anch)
t_dx_gt = np.log(ground_truth[3] / anchors[:, 3])
# t_dy_gt = log(dim_y_gt/dim_y_anch)
t_dy_gt = np.log(ground_truth[4] / anchors[:, 4])
# t_dz_gt = log(dim_z_gt/dim_z_anch)
t_dz_gt = np.log(ground_truth[5] / anchors[:, 5])
anchor_offsets = np.stack((t_x_gt,
t_y_gt,
t_z_gt,
t_dx_gt,
t_dy_gt,
t_dz_gt), axis=1)
return anchor_offsets
|
3aced37f0838d2ab4f90ce0e212747111fc87876
| 3,643,268
|
def horizontal_flip(img_array):
"""Flip image horizontally."""
img_array = cv2.flip(img_array, 1)
return img_array
|
7f53442b072127e5c02253aefabcc8e7bd422504
| 3,643,269
|
def chunker(file_path):
"""
Read a block of lines from a file
:param file_path:
:return:
"""
words = []
with open(file_path, 'r') as file_object:
for word in file_object:
word = word.strip()
if word:
words.append(word)
return words
|
a60b6f3cc7003955ae6acd8ac5e74574cdbd5976
| 3,643,270
|
import subprocess
def c2c_dist(commande,octree_lvl=0):
"""
Commande CC cloud2cloud distance
"""
if octree_lvl==0:
commande+=" -C2C_DIST -split_xyz -save_clouds"
else:
commande+=" -C2C_DIST -split_xyz -octree_level "+str(octree_lvl)+" -save_clouds"
subprocess.call(commande)
return True
|
723082644cb6d8b24cc27b634a3bca2b8caabe4a
| 3,643,271
|
def legalize_names(varnames):
"""returns a dictionary for conversion of variable names to legal
parameter names.
"""
var_map = {}
for var in varnames:
new_name = var.replace("_", "__").replace("$", "_").replace(".", "_")
assert new_name not in var_map
var_map[var] = new_name
return var_map
|
ad8e9ef3394d4ac3cfa80198f488c1834bd227fc
| 3,643,272
|
def _IsUidUsed(uid):
"""Check if there is any process in the system running with the given user-id
@type uid: integer
@param uid: the user-id to be checked.
"""
pgrep_command = [constants.PGREP, "-u", uid]
result = utils.RunCmd(pgrep_command)
if result.exit_code == 0:
return True
elif result.exit_code == 1:
return False
else:
raise errors.CommandError("Running pgrep failed. exit code: %s"
% result.exit_code)
|
8a4e529a98298ec4c2d9df30c6fc28a91c124edd
| 3,643,273
|
def mobilenetv3_large(data_channel):
"""
Constructs a MobileNetV3-Large model
"""
cfgs = [
# k, t, c, SE, NL, s
[3, 16, 16, 0, 0, 1],
[3, 64, 24, 0, 0, 2],
[3, 72, 24, 0, 0, 1],
[5, 72, 40, 1, 0, 2],
[5, 120, 40, 1, 0, 1],
[5, 120, 40, 1, 0, 1],
[3, 240, 80, 0, 1, 2],
[3, 200, 80, 0, 1, 1],
[3, 184, 80, 0, 1, 1],
[3, 184, 80, 0, 1, 1],
[3, 480, 112, 1, 1, 1],
[3, 672, 112, 1, 1, 1],
[5, 672, 160, 1, 1, 1],
[5, 672, 160, 1, 1, 2],
[5, 960, 160, 1, 1, 1]
]
model = MobileNetV3(cfgs, mode='large', data_channel=data_channel)
model.set_name(BackboneName.MobileNetv3_large)
return model
|
ec14d6628bf9e69f05a79f4207a32e00fbae1b8b
| 3,643,274
|
def ravel_lom_dims(tensor, name='ravel_lom_dims'):
"""Assumes LOM is in the last 3 dims."""
return tf.reshape(tensor, tensor.shape_as_list()[:-3] + [-1], name=name)
|
f0f56c4f747b4a40e63bddbb7d6dd3452044151f
| 3,643,275
|
def run_cv(cfg, df, horiz, freq, cv_start, cv_stride=1, dc_dict=None,
metric="smape"):
"""Run a sliding-window temporal cross-validation (aka backtest) using a
given forecasting function (`func`).
"""
y = df["demand"].values
# allow only 1D time-series arrays
assert(y.ndim == 1)
params, func = cfg
if len(y) == 1:
y = np.pad(y, [1, 0], constant_values=1)
# the cross-val horizon length may shrink depending on the length of
# historical data; shrink the horizon if it is >= the timeseries
if horiz >= len(y):
cv_horiz = len(y) - 1
else:
cv_horiz = horiz
if len(df) == len(y):
ts = df.index
else:
assert len(y) > len(df)
diff = len(y) - len(df)
ts = np.append(
pd.date_range(end=df.index[0], freq=freq, periods=diff+1), df.index)
# sliding window horizon actuals
Y = sliding_window_view(y[cv_start:], cv_horiz)[::cv_stride,:]
Ycv = []
# | y | horiz |..............|
# | y | horiz |.............|
# | y | horiz |............|
# ::
# ::
# | y | horiz |
for i in range(cv_start, len(y)-cv_horiz+1, cv_stride):
yp = func(y[:i], cv_horiz, freq, dc=dc_dict[i])
Ycv.append(yp)
# keep the backtest forecasts at each cv_stride
Ycv = np.vstack(Ycv)
# keep the backtest forecast time indices
Yts = sliding_window_view(ts[cv_start:], cv_horiz)[::cv_stride,:]
assert Yts.shape == Y.shape
assert Yts.shape == Ycv.shape
assert not np.any(np.isnan(Ycv))
assert Ycv.shape == Y.shape
# calc. error metrics
df_results = calc_metrics(Y, Ycv, metric)
df_results.insert(0, "model_type", params.split("|")[0])
df_results.insert(1, "params", params)
# store the final backtest window actuals and predictions
df_results["y_cv"] = [Y]
df_results["yp_cv"] = [Ycv]
df_results["ts_cv"] = [Yts]
# generate the final forecast (1-dim)
df_results["yhat"] = [func(y, horiz, freq, dc=dc_dict[len(y)-1])]
return df_results
|
cb95657aeaa4cb74c6252d46029db88c6be18ddb
| 3,643,276
|
import pyarrow as pa
def ST_Area(geos):
"""
Calculate the 2D Cartesian (planar) area of geometry.
:type geos: Series(dtype: object)
:param geos: Geometries in WKB form.
:rtype: Series(dtype: float64)
:return: The value that represents the area of geometry.
:example:
>>> import pandas
>>> import arctern
>>> data = ["POLYGON((0 0,1 0,1 1,0 1,0 0))", "POLYGON((0 0,0 8,8 8,8 0,0 0))"]
>>> data = pandas.Series(data)
>>> rst = arctern.ST_Area(arctern.ST_GeomFromText(data1))
>>> print(rst)
0 1.0
1 64.0
dtype: float64
"""
arr_geos = pa.array(geos, type='binary')
return arctern_caller(arctern_core_.ST_Area, arr_geos)
|
ae69ec90c9e5c54c5f6afa468d6cb1212e64eaf4
| 3,643,277
|
import subprocess
def check_conda_packages(edit_mode=False, packages=None):
"""Check conda inslalled packages information filtering for packages.
It is Python/Conda environment dependent.
Returns:
dict(str): Dictionary filled with respective information.
"""
info = {'CONDA PACKAGES': {}}
all_packages = ''
try:
if not edit_mode:
all_packages = _run_subprocess_split(['conda', 'list', '--no-pip', '--export'])
else:
all_packages = _run_subprocess_split(['conda', 'list', '--no-pip',
'--export', '--develop'])
except (subprocess.CalledProcessError, FileNotFoundError2and3):
info['CONDA PACKAGES']['Status'] = 'Conda not available!'
else:
# split lines and remove head
line_packages = all_packages.split("\n")[3:]
# clean spaces, create a list and insert in the dictionary
for line in line_packages:
splitted = line.split('=')
cleaned = ' '.join(splitted).split()
info['CONDA PACKAGES'][cleaned[0]] = cleaned[1]
if packages:
info['CONDA PACKAGES'] = _filter(info['CONDA PACKAGES'], packages)
return info
|
0a55356b71e692068e85b05902a1f9c2d495fa3f
| 3,643,278
|
from operator import matmul
def P_from_K_R_t(K, R, t):
"""Returns the 3x4 projection matrix P = K [R | t]."""
K = K.astype(np.float64)
R = R.astype(np.float64)
t = t.astype(np.float64)
return matmul(K, np.column_stack((R, t)))
|
0304ea513df81a67e653ba1e3516c39ec38f94ad
| 3,643,279
|
from typing import List
from typing import Union
from typing import Callable
from typing import Type
from typing import OrderedDict
from typing import Any
def multi_value_precondition(parameter_selector: List[Union[int, str]], predicate: Callable[..., bool],
exception_factory: Union[Type[BaseException], Callable[[OrderedDict], BaseException]]
=PreconditionViolatedError) -> Any:
"""
This is a factory that will create a decorator for a method based on a parameter selector and a predicate. The
decorator will cause the method to raise an Exception (PreConditionViolatedError) if the selected parameters do not
satisfy the predicate.
:param parameter_selector: a selector that indicates which parameters of the method should be checked. This may
be ints for positional parameters or strings for keyword parameters. The parameter_selector will indicate some
parameters, these will be passed (positionally in the listed order) to the predicate.
:param predicate: a predicate that evaluates parameters (function that returns True or False)
:param exception_factory: Either an Exception class or a Callable that can create the desired Exception (defaults
to PreconditionViolatedError)
:return: a decorator based on the passed parameter selector and predicate
"""
def decorator(decorated_function):
"""
This decorator adds a check to this function that one of its parameters matches a predicate
:param decorated_function: The function to be decorated
:return: The decorated function
"""
_signature = signature(decorated_function)
_verify_decorator_correctness(_signature, parameter_selector, exception_factory)
@wraps(decorated_function)
def function_with_condition(*args, **kwargs):
"""
a decorated function that checks parameter values of the original match a given predicate.
If the parameters do not match, the original function is never called.
:param args: The positional arguments for the original function
:param kwargs: The keyword arguments for the original function
:return: The result of the function if the parameters matched the predicate
:raises: PreConditionViolatedError if the parameters of the function do not match the predicate
"""
arguments = _get_bound_arguments(_signature, *args, **kwargs)
selected_parameters = _get_key_value_pairs(arguments, parameter_selector)
if not predicate(*selected_parameters.values()):
if isinstance(exception_factory, type) and issubclass(exception_factory, BaseException):
parameter_descriptions = map(lambda key_value: _parameter_description(*key_value),
selected_parameters.items())
descriptions = ', '.join(parameter_descriptions).capitalize()
message = f"{descriptions} failed to pass precondition {predicate.__name__}"
raise exception_factory(message)
elif isinstance(exception_factory, FunctionType):
raise exception_factory(selected_parameters)
else:
raise MalformedDecoratorError(f'Incorrect type for exception_factory: {type(exception_factory)}')
return decorated_function(*args, **kwargs)
return function_with_condition
return decorator
|
97632d8e77858e3d854a8f610fae772a8a6c3c5b
| 3,643,280
|
from operator import itemgetter
import json
def activity_list_retrieve_view(request): # activityListRetrieve
"""
Retrieve activity so we can populate the news page
:param request:
:return:
"""
status = ''
activity_list = []
activity_manager = ActivityManager()
activity_notice_seed_list = []
activity_post_list = []
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
voter_friend_we_vote_id_list = []
voter_we_vote_id = ''
activity_tidbit_we_vote_id_list = request.GET.getlist('activity_tidbit_we_vote_id_list[]')
activity_tidbit_we_vote_id_list = list(filter(None, activity_tidbit_we_vote_id_list))
if positive_value_exists(voter_device_id):
voter_we_vote_id = fetch_voter_we_vote_id_from_voter_device_link(voter_device_id)
if not positive_value_exists(voter_we_vote_id):
status += "RETRIEVE_ACTIVITY_LIST_MISSING_VOTER_WE_VOTE_ID "
json_data = {
'status': status,
'success': False,
'activity_list': activity_list,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
# Retrieve the NOTICE_FRIEND_ENDORSEMENTS_SEED and the ActivityPost entries below
results = activity_manager.retrieve_activity_notice_seed_list_for_recipient(
recipient_voter_we_vote_id=voter_we_vote_id,
kind_of_seed_list=[NOTICE_FRIEND_ENDORSEMENTS_SEED],
limit_to_activity_tidbit_we_vote_id_list=activity_tidbit_we_vote_id_list)
if results['success']:
activity_notice_seed_list = results['activity_notice_seed_list']
voter_friend_we_vote_id_list = results['voter_friend_we_vote_id_list']
else:
status += results['status']
status += "RETRIEVE_ACTIVITY_LIST_FAILED "
for activity_notice_seed in activity_notice_seed_list:
new_positions_entered_count = 0
position_name_list = []
position_we_vote_id_list = []
# In this scenario we want to return both friends and public values
# Position names
if positive_value_exists(activity_notice_seed.position_names_for_friends_serialized):
position_name_list += json.loads(activity_notice_seed.position_names_for_friends_serialized)
if positive_value_exists(activity_notice_seed.position_names_for_public_serialized):
position_name_list += json.loads(activity_notice_seed.position_names_for_public_serialized)
# Position we_vote_ids
if positive_value_exists(activity_notice_seed.position_we_vote_ids_for_friends_serialized):
position_we_vote_id_list += json.loads(activity_notice_seed.position_we_vote_ids_for_friends_serialized)
if positive_value_exists(activity_notice_seed.position_we_vote_ids_for_public_serialized):
position_we_vote_id_list += json.loads(activity_notice_seed.position_we_vote_ids_for_public_serialized)
new_positions_entered_count += len(position_we_vote_id_list)
if not positive_value_exists(activity_notice_seed.we_vote_id):
try:
activity_notice_seed.save()
except Exception as e:
status += "COULD_NOT_UPDATE_SEED_WE_VOTE_ID: " + str(e) + ' '
activity_notice_seed_dict = {
'date_created': activity_notice_seed.date_of_notice.strftime('%Y-%m-%d %H:%M:%S'),
'date_last_changed': activity_notice_seed.date_last_changed.strftime('%Y-%m-%d %H:%M:%S'),
'date_of_notice': activity_notice_seed.date_of_notice.strftime('%Y-%m-%d %H:%M:%S'),
'id': activity_notice_seed.id, # We normalize to generate activityTidbitKey
'activity_notice_seed_id': activity_notice_seed.id,
'kind_of_activity': "ACTIVITY_NOTICE_SEED",
'kind_of_seed': activity_notice_seed.kind_of_seed,
'new_positions_entered_count': new_positions_entered_count,
'position_name_list': position_name_list,
'position_we_vote_id_list': position_we_vote_id_list,
'speaker_name': activity_notice_seed.speaker_name,
'speaker_organization_we_vote_id': activity_notice_seed.speaker_organization_we_vote_id,
'speaker_voter_we_vote_id': activity_notice_seed.speaker_voter_we_vote_id,
'speaker_profile_image_url_medium': activity_notice_seed.speaker_profile_image_url_medium,
'speaker_profile_image_url_tiny': activity_notice_seed.speaker_profile_image_url_tiny,
'speaker_twitter_handle': activity_notice_seed.speaker_twitter_handle,
'speaker_twitter_followers_count': activity_notice_seed.speaker_twitter_followers_count,
'we_vote_id': activity_notice_seed.we_vote_id,
}
activity_list.append(activity_notice_seed_dict)
# ####################################################
# Retrieve entries directly in the ActivityPost table
results = activity_manager.retrieve_activity_post_list_for_recipient(
recipient_voter_we_vote_id=voter_we_vote_id,
limit_to_activity_tidbit_we_vote_id_list=activity_tidbit_we_vote_id_list,
voter_friend_we_vote_id_list=voter_friend_we_vote_id_list)
if results['success']:
activity_post_list = results['activity_post_list']
else:
status += results['status']
status += "RETRIEVE_ACTIVITY_POST_LIST_FAILED "
for activity_post in activity_post_list:
date_created_string = ''
if activity_post.date_created:
date_created_string = activity_post.date_created.strftime('%Y-%m-%d %H:%M:%S')
if not positive_value_exists(activity_post.we_vote_id):
try:
activity_post.save()
except Exception as e:
status += "COULD_NOT_UPDATE_POST_WE_VOTE_ID: " + str(e) + ' '
activity_post_dict = {
'date_created': date_created_string,
'date_last_changed': activity_post.date_last_changed.strftime('%Y-%m-%d %H:%M:%S'),
'date_of_notice': date_created_string,
'id': activity_post.id, # We normalize to generate activityTidbitKey
'activity_post_id': activity_post.id,
'kind_of_activity': 'ACTIVITY_POST',
'kind_of_seed': '',
'new_positions_entered_count': 0,
'position_name_list': [],
'position_we_vote_id_list': [],
'speaker_name': activity_post.speaker_name,
'speaker_organization_we_vote_id': activity_post.speaker_organization_we_vote_id,
'speaker_voter_we_vote_id': activity_post.speaker_voter_we_vote_id,
'speaker_profile_image_url_medium': activity_post.speaker_profile_image_url_medium,
'speaker_profile_image_url_tiny': activity_post.speaker_profile_image_url_tiny,
'speaker_twitter_handle': activity_post.speaker_twitter_handle,
'speaker_twitter_followers_count': activity_post.speaker_twitter_followers_count,
'statement_text': activity_post.statement_text,
'visibility_is_public': activity_post.visibility_is_public,
'we_vote_id': activity_post.we_vote_id,
}
activity_list.append(activity_post_dict)
# Now cycle through these activities and retrieve all related comments
activity_list_with_comments = []
for activity_tidbit_dict in activity_list:
results = activity_manager.retrieve_activity_comment_list(
parent_we_vote_id=activity_tidbit_dict['we_vote_id'])
activity_comment_list = []
if results['success']:
activity_comment_object_list = results['activity_comment_list']
for activity_comment in activity_comment_object_list:
# Retrieve the Child comments
child_results = activity_manager.retrieve_activity_comment_list(
parent_comment_we_vote_id=activity_comment.we_vote_id)
child_comment_list = []
if results['success']:
child_comment_object_list = child_results['activity_comment_list']
for child_comment in child_comment_object_list:
child_comment_dict = {
'date_created': child_comment.date_created.strftime('%Y-%m-%d %H:%M:%S'),
'date_last_changed': child_comment.date_last_changed.strftime('%Y-%m-%d %H:%M:%S'),
'commenter_name': child_comment.commenter_name,
'commenter_organization_we_vote_id': child_comment.commenter_organization_we_vote_id,
'commenter_voter_we_vote_id': child_comment.commenter_voter_we_vote_id,
'commenter_profile_image_url_medium': child_comment.commenter_profile_image_url_medium,
'commenter_profile_image_url_tiny': child_comment.commenter_profile_image_url_tiny,
'commenter_twitter_handle': child_comment.commenter_twitter_handle,
'commenter_twitter_followers_count': child_comment.commenter_twitter_followers_count,
'parent_we_vote_id': child_comment.parent_we_vote_id,
'parent_comment_we_vote_id': child_comment.parent_comment_we_vote_id,
'statement_text': child_comment.statement_text,
'visibility_is_public': child_comment.visibility_is_public,
'we_vote_id': child_comment.we_vote_id,
}
child_comment_list.append(child_comment_dict)
activity_comment_dict = {
'comment_list': child_comment_list,
'date_created': activity_comment.date_created.strftime('%Y-%m-%d %H:%M:%S'),
'date_last_changed': activity_comment.date_last_changed.strftime('%Y-%m-%d %H:%M:%S'),
'commenter_name': activity_comment.commenter_name,
'commenter_organization_we_vote_id': activity_comment.commenter_organization_we_vote_id,
'commenter_voter_we_vote_id': activity_comment.commenter_voter_we_vote_id,
'commenter_profile_image_url_medium': activity_comment.commenter_profile_image_url_medium,
'commenter_profile_image_url_tiny': activity_comment.commenter_profile_image_url_tiny,
'commenter_twitter_handle': activity_comment.commenter_twitter_handle,
'commenter_twitter_followers_count': activity_comment.commenter_twitter_followers_count,
'parent_we_vote_id': activity_comment.parent_we_vote_id,
'parent_comment_we_vote_id': activity_comment.parent_comment_we_vote_id,
'statement_text': activity_comment.statement_text,
'visibility_is_public': activity_comment.visibility_is_public,
'we_vote_id': activity_comment.we_vote_id,
}
activity_comment_list.append(activity_comment_dict)
activity_tidbit_dict['activity_comment_list'] = activity_comment_list
activity_list_with_comments.append(activity_tidbit_dict)
# Order entries in the activity_list by "date_created"
activity_list_ordered = sorted(activity_list_with_comments, key=itemgetter('date_created'), reverse=True)
json_data = {
'status': status,
'success': True,
'activity_list': activity_list_ordered,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
|
4ab7d7e2cd5c0a3c812b6ec1729efda19fb87ced
| 3,643,281
|
import argparse
from textwrap import dedent
def arg_parser() -> argparse.Namespace:
""" Reads command line arguments.
:returns: Values of accepted command line arguments.
"""
_parser = argparse.ArgumentParser(
description=dedent(
"""Find all recipes in a directory,
build them and push all their images to an sregistry.
Recipes are identified by the suffix ".recipe".
The image name will be taken from the recipe name
using everything from the first character till the first "." occurrence.
The version will be taken from the recipe name
using everything from the first "." till the suffix ".recipe".
The collection name will be taken from the recipes parent folder.
"""
)
)
_parser.add_argument(
'--path',
'-p',
type=str,
help="Base path to search recipes.",
required=True
)
_parser.add_argument(
'--image_type',
'-i',
type=str,
help="The type of image to be build."
)
_parser.add_argument(
'--build_log_dir',
'-b',
type=str,
help="The directory, that should contain the build logs. Will be created if not existent."
)
return _parser.parse_args()
|
6bf2f99cbab0674fb5b127b58cdb0e28ad87ef0a
| 3,643,282
|
import http
from typing import Optional
def edit_action(
request: http.HttpRequest,
pk: int,
workflow: Optional[models.Workflow] = None,
action: Optional[models.Action] = None,
) -> http.HttpResponse:
"""Invoke the specific edit view.
:param request: Request object
:param pk: Action PK
:param workflow: Workflow being processed,
:param action: Action being edited (set by the decorator)
:return: HTML response
"""
del pk
return services.ACTION_PROCESS_FACTORY.process_edit_request(
request,
workflow,
action)
|
43f128dfe2abd47c1c6cf78d667343d9086aeb98
| 3,643,283
|
from typing import Any
from typing import Set
from typing import KeysView
def to_set(data: Any) -> Set[Any]:
"""Convert data to a set. A single None value will be converted to the empty set.
```python
x = fe.util.to_set(None) # set()
x = fe.util.to_set([None]) # {None}
x = fe.util.to_set(7) # {7}
x = fe.util.to_set([7, 8]) # {7,8}
x = fe.util.to_set({7}) # {7}
x = fe.util.to_set((7)) # {7}
```
Args:
data: Input data, within or without a python container. The `data` must be hashable.
Returns:
The input `data` but inside a set instead of whatever other container type used to hold it.
"""
if data is None:
return set()
if not isinstance(data, set):
if isinstance(data, (tuple, list, KeysView)):
data = set(data)
else:
data = {data}
return data
|
df2649d0b7c7c2323984edd3eeea76eff0eab4d2
| 3,643,284
|
def Pei92(wavelength, Av, z, Rv=-99.0, ext_law="smc", Xcut=False):
"""
Extinction laws from Pei 1992 article
Parameters
----------
wavelength: `array` or `float`
wavlength in angstroms
Av: `float`
amount of extinction in the V band
z: `float`
redshift
Rv: `float`, optional, default: -99.
selective attenuation Rv = Av / E(B-V)
if 'd-99.' set values by default from article
if a float is given, use this value instead
ext_law: `str`
type of extinction law to use.
Choices: mw, lmc, smc
Xcut: `boolean`, optional, default: False
Whether to set attenuation to 0 for wavelength below 700 angstrom
Useful when coupling with X-ray data
Returns
-------
[Alambda_over_Av, Trans_dust]
Alambda_over_Av : `array`
atteanuation as a function of wavelength normalise by Av
(attenuation in V band)
Trans_dust: `array`
transmission through dust as a function of wavelength
"""
wvl = wavelength * 1e-4 / (1 + z)
if ext_law.lower() == "smc":
if Rv == -99.:
Rv = 2.93
a = [185, 27, 0.005, 0.010, 0.012, 0.03]
wvl_ = [0.042, 0.08, 0.22, 9.7, 18, 25]
b = [90, 5.50, -1.95, -1.95, -1.80, 0.0]
n = [2.0, 4.0, 2.0, 2.0, 2.0, 2.0]
elif ext_law.lower() == "lmc":
if Rv == -99.:
Rv = 3.16
a = [175, 19, 0.023, 0.005, 0.006, 0.02]
wvl_ = [0.046, 0.08, 0.22, 9.7, 18, 25]
b = [90, 5.5, -1.95, -1.95, -1.8, 0.0]
n = [2.0, 4.5, 2.0, 2.0, 2.0, 2.0]
elif ext_law.lower() == "mw":
if Rv == -99.:
Rv = 3.08
a = [165, 14, 0.045, 0.002, 0.002, 0.012]
wvl_ = [0.046, 0.08, 0.22, 9.7, 18, 25]
b = [90, 4.0, -1.95, -1.95, -1.8, 0.0]
n = [2.0, 6.5, 2.0, 2.0, 2.0, 2.0]
sums = np.zeros(len(wvl))
for i in range(len(a)):
sums += a[i] / ((wvl / wvl_[i]) ** n[i] + (wvl_[i] / wvl) ** n[i] + b[i])
# Need to check whether extrapolation is needed
# outside the range defined in Pei92
# convert Alambda_over_Ab to Alambda_over_Av
Alambda_over_Av = (1.0 / Rv + 1.0) * sums
# Applied a cut for wavelength below 700 angstrom
# Useful when coupling with Xray data
if Xcut:
w = np.where(wvl < 0.07)
Alambda_over_Av[w] = 0
# Return optical depth due to dust reddening in funtion of wavelength
Tau_dust = Av * Alambda_over_Av / 1.086
Trans_dust = np.exp(-Tau_dust)
Trans_dust[Trans_dust < 0] = 0
Trans_dust[Trans_dust > 1] = 1
return [Alambda_over_Av, Trans_dust]
|
9b0b9690f548319ffed7fcc964dc0e651828371f
| 3,643,285
|
import warnings
def plot_profile_avg_with_bounds(
data,
ax=None,
confint_alpha=0.05,
label=None,
xs=None,
axis=0,
bounds: str = "ci",
**kwargs,
):
"""
TODO: Documentation
Parameters
----------
data
ax
confint_alpha
label
kwargs
Returns
-------
"""
with np.errstate(invalid="ignore"):
mean = np.nanmean(data, axis=0)
sem = stats.sem(data)
bounds_map = {
"ci": DescrStatsW(data).tconfint_mean(alpha=confint_alpha),
"sem": (mean - sem, mean + sem),
}
if ax is None:
ax = plt.gca()
if xs is None:
try:
# if the data is an xr.DataArray
xs = data.position
except ValueError:
# if it's a numpy array
xs = np.arange(len(data))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ax.plot(xs, np.nanmean(data, axis=axis), label=label, **kwargs)
lower, upper = bounds_map[bounds]
kwargs.pop("linestyle", None)
kwargs.pop("linewidth", None)
kwargs.pop("lw", None)
ax.fill_between(xs, lower, upper, alpha=0.3, lw=0, **kwargs)
return ax
|
cbce13474608bd710031698120b9ab01c78facc4
| 3,643,286
|
import mimetypes
def get_mimetype(path):
"""
Get (guess) the mimetype of a file.
"""
mimetype, _ = mimetypes.guess_type(path)
return mimetype
|
7677259fcdf052f9647fe41e4b4cb71d83ea50cd
| 3,643,287
|
import select
async def read_clients_epics(
client_id: int = None, session: Session = Depends(get_session)
):
"""Get epics from a client_id"""
statement = (
select(Client.id, Client.name, Epic.name)
.select_from(Client)
.join(Epic)
.where(Client.id == client_id)
)
results = session.exec(statement).all()
return results
|
e5af5d2776a941cde83ea341143732bcdb67da2a
| 3,643,288
|
def id_number_checksum(gd):
"""
Calculates a Swedish ID number checksum, using the Luhn algorithm
"""
n = s = 0
for c in (gd['year'] + gd['month'] + gd['day'] + gd['serial']):
# Letter? It's an interimspersonnummer and we substitute the letter
# with 1.
if c.isalpha():
c = 1
tmp = ((n % 2) and 1 or 2) * int(c)
if tmp > 9:
tmp = sum([int(i) for i in str(tmp)])
s += tmp
n += 1
if (s % 10) == 0:
return 0
return (((s // 10) + 1) * 10) - s
|
bbf0a9fa7f6ed2c2bfc414173fd2ac9e9c1d8835
| 3,643,289
|
def date_loss_l1(pred,
target_min,
target_max,
mask):
"""L1 loss function for dates."""
pred = jnp.squeeze(pred, 0)
loss = 0.
loss += jnp.abs(pred - target_min) * jnp.less(pred, target_min).astype(
pred.dtype)
loss += jnp.abs(pred - target_max) * jnp.greater(pred, target_max).astype(
pred.dtype)
# Mask loss
loss = jnp.multiply(loss, mask.astype(loss.dtype))
return loss
|
12f0d5a1f7efbb8d51501c4d3fe41d192528010d
| 3,643,290
|
def new_single_genres(genres, val):
"""Takes the genres list and returns only one genre back if multiple genres are present
Also has the parameter val with values "high" and "low"
High picks the genres belonging to the existing genres with the highest examples count
Low picks the genres belonging to the existing genres with the least examples count"""
genres_file = "fma_metadata/genres.csv"
reference_genres = pd.read_csv(genres_file)
reference_tracks = reference_genres.iloc[:, 1]
reference_genres = reference_genres.iloc[:, 0]
for index, genre in genres.items():
split = genre.split(",")
if len(split) == 1:
new_genre = split[0]
new_genre = new_genre.strip("[]")
genres[index] = int(new_genre)
elif len(split) > 1:
new_genre = [int(item.strip(" [ ] ")) for item in split]
count = {}
for indices, value in reference_genres.items():
if value in new_genre:
count[value] = reference_tracks[indices]
counts = {k: v for k, v in sorted(count.items(), key=lambda item: item[1])}
if val == "high":
genres[index] = int(list(counts.keys())[-1])
elif val == "low":
genres[index] = int(list(counts.keys())[0])
print("The shape of genres after single is:{}".format(genres.shape))
genres = genres.astype('int')
return genres
|
0cabcb93548007b3cd87dc40740da5d0f4614867
| 3,643,291
|
import sys
import unittest
def run(funcs_to_test=None, tests_to_run=None, verbosity=2):
"""
run testing routine
args:
- funcs_to_test: dict: {lang_name <str>: lang_func <callable>}
of language processing modules to test
if None `corpus.functions` passed
- tests_to_run: list[<str>]: list of tests to run
if None `TESTS_TO_RUN` passed
- verbosity: int: verbosity mode
return: List[str]: list of failing langs
"""
failing_langs = []
funcs_to_test = FUNCS_TO_TEST if funcs_to_test is None else funcs_to_test
tests_to_run = TESTS_TO_RUN if tests_to_run is None else tests_to_run
stream = sys.stderr
log_header = '\n%s\nTESTING: %s_corpus.py\n%s\n\n'
runner = unittest.TextTestRunner(stream, verbosity=verbosity)
for lang, func in funcs_to_test.items():
stream.write(log_header % ('*' * 20, lang, '*' * 20))
lang_fails = []
suite = unittest.TestSuite()
routine = TestLangFunc
routine.set_env(routine, func, lang)
c_test_res = runner.run(routine('test_fetch_data'))
lang_fails.extend(c_test_res.failures + c_test_res.errors)
if routine.fetch_data is not None:
for test in tests_to_run:
suite.addTest(routine(test))
c_test_res = runner.run(suite)
lang_fails.extend(c_test_res.failures + c_test_res.errors)
if lang_fails:
failing_langs.append(lang)
return failing_langs
|
116bbbbb5c20b8dd73b5d45af3bde9cf80ae841f
| 3,643,292
|
def roparameter(cosphi, hist, s_cosphi=0.25):
"""
...
Parameters
----------
cosphi : ...
hist : ...
s_cosphi : ...
Returns
-------
...
"""
perp=(np.abs(cosphi)>1.-s_cosphi).nonzero()
para=(np.abs(cosphi)<s_cosphi).nonzero()
xi=(np.sum(hist[para])-np.sum(hist[perp]))/float(np.sum(hist[para])+np.sum(hist[perp]))
return xi
|
c9f30db90482600c50a9369139fc75c046d57e40
| 3,643,293
|
def execute(model_fn, input_fn, **params):
"""Execute train or eval and/or inference graph writing.
Args:
model_fn: An estimator compatible function taking parameters
(features, labels, mode, params) that returns a EstimatorSpec.
input_fn: An estimator compatible function taking 'params' that returns a
dataset
**params: Dict of additional params to pass to both model_fn and input_fn.
"""
params['is_graph'] = False
if params['write_inference_graph']:
inference_graph.write(model_fn, input_fn, params, params['model_dir'])
params['is_graph'] = True
#todo youxiugai
def estimator_model_fn(features, labels, mode, params):
spec = model_fn(features, labels, mode, params)
return spec
def train_input_fn():
train_params = params.copy()
train_params['input_data'] = params['input_data_train']
train_params['batch_size'] = params['train_batch_size']
if params['randomize_training']:
train_params['randomize_order'] = True
return input_fn(train_params)
def eval_input_fn():
eval_params = params.copy()
eval_params['input_data'] = params['input_data_eval']
eval_params['batch_size'] = params['eval_batch_size']
return input_fn(eval_params)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
max_steps=params['train_steps'])
eval_steps = int(round(params['eval_examples'] / params['eval_batch_size']))
eval_spec = tf.estimator.EvalSpec(
name=params['eval_suffix'], input_fn=eval_input_fn, steps=eval_steps,
throttle_secs=params.get('eval_throttle_secs', 600))
run_config = tf.estimator.RunConfig(
model_dir=params['model_dir'],
save_summary_steps=params['save_summary_steps'],
save_checkpoints_secs=params['save_checkpoints_secs'],
keep_checkpoint_every_n_hours=params['keep_checkpoint_every_n_hours'])
estimator = tf.estimator.Estimator(
model_fn=estimator_model_fn,
params=params,
config=run_config)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
|
44b5cc8105ec663642724a4cbba66c12a81e3b98
| 3,643,294
|
def polyfill_bbox(
min_lng, max_lng, min_lat, max_lat, min_resolution=0, max_resolution=30
):
"""Polyfill a planar bounding box with compact s2 cells between resolution levels"""
check_valid_polyfill_resolution(min_resolution, max_resolution)
rc = s2sphere.RegionCoverer()
rc.min_level = min_resolution
rc.max_level = max_resolution
lower_left = s2sphere.LatLng(radians(min_lat), radians(min_lng))
upper_right = s2sphere.LatLng(radians(max_lat), radians(max_lng))
rect = s2sphere.LatLngRect(lower_left, upper_right)
cell_ids = [int(uint64_to_int64(cell.id())) for cell in rc.get_covering(rect)]
cell_ids_str = '[' + ','.join([str(id) for id in cell_ids]) + ']'
return cell_ids_str
|
d6c2cb0d3f0d7a9eea05a456beda96a1a646e306
| 3,643,295
|
from lvmspec.qa import qalib
import copy
def qa_skysub(param, frame, skymodel, quick_look=False):
"""Calculate QA on SkySubtraction
Note: Pixels rejected in generating the SkyModel (as above), are
not rejected in the stats calculated here. Would need to carry
along current_ivar to do so.
Args:
param : dict of QA parameters : see qa_frame.init_skysub for example
frame : lvmspec.Frame object; Should have been flat fielded
skymodel : lvmspec.SkyModel object
quick_look : bool, optional
If True, do QuickLook specific QA (or avoid some)
Returns:
qadict: dict of QA outputs
Need to record simple Python objects for yaml (str, float, int)
"""
#- QAs
#- first subtract sky to get the sky subtracted frame. This is only for QA. Pipeline does it separately.
tempframe=copy.deepcopy(frame) #- make a copy so as to propagate frame unaffected so that downstream pipeline uses it.
subtract_sky(tempframe,skymodel) #- Note: sky subtract is done to get residuals. As part of pipeline it is done in fluxcalib stage
# Sky residuals first
qadict = qalib.sky_resid(param, tempframe, skymodel, quick_look=quick_look)
# Sky continuum
if not quick_look: # Sky continuum is measured after flat fielding in QuickLook
channel = frame.meta['CAMERA'][0]
wrange1, wrange2 = param[channel.upper()+'_CONT']
skyfiber, contfiberlow, contfiberhigh, meancontfiber, skycont = qalib.sky_continuum(frame,wrange1,wrange2)
qadict["SKYFIBERID"] = skyfiber.tolist()
qadict["SKYCONT"] = skycont
qadict["SKYCONT_FIBER"] = meancontfiber
if quick_look: # The following can be a *large* dict
qadict_snr = qalib.SignalVsNoise(tempframe,param)
qadict.update(qadict_snr)
return qadict
|
3b53f99ae4936fa6870dc8020d677ffddcb2d4ef
| 3,643,296
|
def _code_to_symbol(code):
"""
生成symbol代码标志
"""
if code in ct.INDEX_LABELS:
return ct.INDEX_LIST[code]
else:
if len(code) != 6 :
return ''
else:
return 'sh%s'%code if code[:1] in ['5', '6', '9'] else 'sz%s'%code
|
4b783adad975246c9d6722f6eeeb95a2388d1823
| 3,643,297
|
def gesv(a, b):
"""Solve a linear matrix equation using cusolverDn<t>getr[fs]().
Computes the solution to a system of linear equation ``ax = b``.
Args:
a (cupy.ndarray): The matrix with dimension ``(M, M)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``.
Returns:
cupy.ndarray:
The matrix with dimension ``(M)`` or ``(M, K)``.
"""
if a.ndim != 2:
raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim))
if b.ndim not in (1, 2):
raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim))
if a.shape[0] != a.shape[1]:
raise ValueError('a must be a square matrix.')
if a.shape[0] != b.shape[0]:
raise ValueError('shape mismatch (a: {}, b: {}).'.
format(a.shape, b.shape))
dtype = _numpy.promote_types(a.dtype.char, 'f')
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise ValueError('unsupported dtype (actual:{})'.format(a.dtype))
helper = getattr(_cusolver, t + 'getrf_bufferSize')
getrf = getattr(_cusolver, t + 'getrf')
getrs = getattr(_cusolver, t + 'getrs')
n = b.shape[0]
nrhs = b.shape[1] if b.ndim == 2 else 1
a_data_ptr = a.data.ptr
b_data_ptr = b.data.ptr
a = _cupy.asfortranarray(a, dtype=dtype)
b = _cupy.asfortranarray(b, dtype=dtype)
if a.data.ptr == a_data_ptr:
a = a.copy()
if b.data.ptr == b_data_ptr:
b = b.copy()
handle = _device.get_cusolver_handle()
dipiv = _cupy.empty(n, dtype=_numpy.int32)
dinfo = _cupy.empty(1, dtype=_numpy.int32)
lwork = helper(handle, n, n, a.data.ptr, n)
dwork = _cupy.empty(lwork, dtype=a.dtype)
# LU factrization (A = L * U)
getrf(handle, n, n, a.data.ptr, n, dwork.data.ptr, dipiv.data.ptr,
dinfo.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
getrf, dinfo)
# Solves Ax = b
getrs(handle, _cublas.CUBLAS_OP_N, n, nrhs, a.data.ptr, n,
dipiv.data.ptr, b.data.ptr, n, dinfo.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
getrs, dinfo)
return b
|
333f06bd8f91bdfde5526c80894c284580074bb5
| 3,643,298
|
def del_none(d):
"""
Delete dict keys with None values, and empty lists, recursively.
"""
for key, value in d.items():
if value is None or (isinstance(value, list) and len(value) == 0):
del d[key]
elif isinstance(value, dict):
del_none(value)
return d
|
46cf9e331c633f5f69b980f3b10c96306d3478c2
| 3,643,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.