content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import math
def expanded_velocities_from_line_vortices(
points,
origins,
terminations,
strengths,
ages=None,
nu=0.0,
):
"""This function takes in a group of points, and the attributes of a group of
line vortices. At every point, it finds the induced velocity due to each line
vortex.
Citation: The equations in this function are from "Extended Unsteady
Vortex-Lattice Method for Insect Flapping Wings" (Nguyen et al., 2016)
Note: This function uses a modified version of the Bio-Savart law to create a
smooth induced velocity decay based on a vortex's core radius. The radius is
determined based on a vortex's age and kinematic viscosity. If the age of the
vortex is 0.0 seconds, the radius is set to 0.0 meters. The age of a vortex in
only relevant for vortices that have been shed into the wake.
Note: This function's performance has been highly optimized for unsteady
simulations via Numba. While using Numba dramatically increases unsteady
simulation performance, it does cause a performance drop for the less intense
steady simulations.
:param points: 2D array of floats
This variable is an array of shape (N x 3), where N is the number of points.
Each row contains the x, y, and z float coordinates of that point's position
in meters.
:param origins: 2D array of floats
This variable is an array of shape (M x 3), where M is the number of line
vortices. Each row contains the x, y, and z float coordinates of that line
vortex's origin's position in meters.
:param terminations: 2D array of floats
This variable is an array of shape (M x 3), where M is the number of line
vortices. Each row contains the x, y, and z float coordinates of that line
vortex's termination's position in meters.
:param strengths: 1D array of floats
This variable is an array of shape (, M), where M is the number of line
vortices. Each position contains the strength of that line vortex in meters
squared per second.
:param ages: 1D array of floats, optional
This variable is an array of shape (, M), where M is the number of line
vortices. Each position contains the age of that line vortex in seconds. This
is only relevant for vortices that have been shed into the wake. The default
value is None. If the age of a specific vortex is 0.0 seconds, then the
vortex core radius is set to 0.0 meters.
:param nu: float, optional
This variable is a float that represents the kinematic viscosity of the fluid
in meters squared per second. The default value is 0.0 meters squared per
second.
:return velocities: 3D array of floats
This is an array of shape (N x M x 3), where each row/column pair identifies
the velocity induced at one point by one of the line vortices. The units are
meters per second.
"""
num_vortices = origins.shape[0]
num_points = points.shape[0]
# Initialize an empty array, which we will fill with the induced velocities.
velocities = np.empty((num_points, num_vortices, 3))
# If the user didn't specify any ages, set the age of each vortex to 0.0 seconds.
if ages is None:
ages = np.zeros(num_vortices)
for vortex_id in range(num_vortices):
origin = origins[vortex_id]
termination = terminations[vortex_id]
strength = strengths[vortex_id]
age = ages[vortex_id]
# Calculate the radius of the vortex's core. If the age is 0.0 seconds,
# this will evaluate to be 0.0 meters.
r_c = 2 * math.sqrt(lamb * (nu + squire * abs(strength)) * age)
# The r_0 vector goes from the line vortex's origin to its termination.
r_0_x = termination[0] - origin[0]
r_0_y = termination[1] - origin[1]
r_0_z = termination[2] - origin[2]
# Find the r_0 vector's length.
r_0 = math.sqrt(r_0_x ** 2 + r_0_y ** 2 + r_0_z ** 2)
c_1 = strength / (4 * math.pi)
c_2 = r_0 ** 2 * r_c ** 2
for point_id in range(num_points):
point = points[point_id]
# The r_1 vector goes from the point to the line vortex's origin.
r_1_x = origin[0] - point[0]
r_1_y = origin[1] - point[1]
r_1_z = origin[2] - point[2]
# The r_2 vector goes from the point to the line vortex's termination.
r_2_x = termination[0] - point[0]
r_2_y = termination[1] - point[1]
r_2_z = termination[2] - point[2]
# The r_3 vector is the cross product of the r_1 and r_2 vectors.
r_3_x = r_1_y * r_2_z - r_1_z * r_2_y
r_3_y = r_1_z * r_2_x - r_1_x * r_2_z
r_3_z = r_1_x * r_2_y - r_1_y * r_2_x
# Find the r_1, r_2, and r_3 vectors' lengths.
r_1 = math.sqrt(r_1_x ** 2 + r_1_y ** 2 + r_1_z ** 2)
r_2 = math.sqrt(r_2_x ** 2 + r_2_y ** 2 + r_2_z ** 2)
r_3 = math.sqrt(r_3_x ** 2 + r_3_y ** 2 + r_3_z ** 2)
c_3 = r_1_x * r_2_x + r_1_y * r_2_y + r_1_z * r_2_z
# If part of the vortex is so close to the point that they are touching (
# within machine epsilon), there is a removable discontinuity. In this
# case, set the velocity components to their true values, which are 0.0
# meters per second.
if r_1 < eps or r_2 < eps or r_3 ** 2 < eps:
velocities[point_id, vortex_id, 0] = 0.0
velocities[point_id, vortex_id, 1] = 0.0
velocities[point_id, vortex_id, 2] = 0.0
else:
c_4 = (
c_1
* (r_1 + r_2)
* (r_1 * r_2 - c_3)
/ (r_1 * r_2 * (r_3 ** 2 + c_2))
)
velocities[point_id, vortex_id, 0] = c_4 * r_3_x
velocities[point_id, vortex_id, 1] = c_4 * r_3_y
velocities[point_id, vortex_id, 2] = c_4 * r_3_z
return velocities | 228f7aa527dd1e9386bdc76da7c6e3c58698e58c | 27,800 |
def normcase(path):
"""Normalize the case of a pathname. On Unix and Mac OS X, this returns the
path unchanged; on case-insensitive filesystems, it converts the path to
lowercase. On Windows, it also converts forward slashes to backward slashes."""
return 0 | d52dca00cc9db607d4ba22c12ba38f512a05107b | 27,801 |
def typeof(obj, t):
"""Check if a specific type instance is a subclass of the type.
Args:
obj: Concrete type instance
t: Base type class
"""
try:
return issubclass(obj, t)
except TypeError:
return False | 67fbcf8b1506f44dba8360a4d23705a2e8a69b47 | 27,802 |
def get_all_clusters(cluster_type, client_id):
"""Get a list of (cluster_name, cluster_config)
for the available kafka clusters in the ecosystem at Yelp.
:param cluster_type: kafka cluster type
(ex.'scribe' or 'standard').
:type cluster_type: string
:param client_id: name of the client making the request. Usually
the same client id used to create the Kafka connection.
:type client_id: string
:returns: list of py:class:`yelp_kafka.config.ClusterConfig`
"""
client = get_kafka_discovery_client(client_id)
try:
cluster_names = client.v1.getClustersAll(cluster_type).result()
except HTTPError as e:
log.exception(
"Failure while fetching clusters for cluster type:{clustertype}"
.format(clustertype=cluster_type),
)
raise InvalidClusterType(e.response.text)
return [
get_kafka_cluster(cluster_type, client_id, cluster_name)
for cluster_name in cluster_names
] | 8860435edfde332fd78e3bf01789a2831d884165 | 27,803 |
from .. import getPlottingEngine
def plot(x, y, show=True, **kwargs):
""" Create a 2D scatter plot.
:param x: A numpy array describing the X datapoints. Should have the same number of rows as y.
:param y: A numpy array describing the Y datapoints. Should have the same number of rows as x.
:param color: The color to use.
:param tag: A tag so that all traces of the same type are plotted using same color/label (for e.g. multiple stochastic traces).
:param tags: Like tag, but for multiple traces.
:param name: The name of the trace.
:param name: Like name, but for multiple traces.
:param alpha: Floating point representing the opacity ranging from 0 (transparent) to 1 (opaque).
:param mode: Either 'lines' or 'markers' (defaults to 'lines').
"""
# global _plot_index
return getPlottingEngine().plot(x, y, show=show, **kwargs) | 0b43a2b1e442ae19feaf0fb3a64550cad01b3602 | 27,804 |
def get_transform_ids(workprogress_id, request_id=None, workload_id=None, transform_id=None, session=None):
"""
Get transform ids or raise a NoObject exception.
:param workprogress_id: Workprogress id.
:param session: The database session in use.
:raises NoObject: If no transform is founded.
:returns: list of transform ids.
"""
return orm_transforms.get_transform_ids(workprogress_id=workprogress_id, request_id=request_id,
workload_id=workload_id, transform_id=transform_id, session=session) | 08fbd67eb932c7c48b04528dd0863e67669e526e | 27,805 |
from typing import Optional
def get_link(hub_name: Optional[str] = None,
link_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLinkResult:
"""
The link resource format.
Latest API Version: 2017-04-26.
:param str hub_name: The name of the hub.
:param str link_name: The name of the link.
:param str resource_group_name: The name of the resource group.
"""
pulumi.log.warn("""get_link is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:customerinsights:getLink'.""")
__args__ = dict()
__args__['hubName'] = hub_name
__args__['linkName'] = link_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:customerinsights/latest:getLink', __args__, opts=opts, typ=GetLinkResult).value
return AwaitableGetLinkResult(
description=__ret__.description,
display_name=__ret__.display_name,
id=__ret__.id,
link_name=__ret__.link_name,
mappings=__ret__.mappings,
name=__ret__.name,
operation_type=__ret__.operation_type,
participant_property_references=__ret__.participant_property_references,
provisioning_state=__ret__.provisioning_state,
reference_only=__ret__.reference_only,
source_entity_type=__ret__.source_entity_type,
source_entity_type_name=__ret__.source_entity_type_name,
target_entity_type=__ret__.target_entity_type,
target_entity_type_name=__ret__.target_entity_type_name,
tenant_id=__ret__.tenant_id,
type=__ret__.type) | b94a7d2ed3977afbff0699e861da67e1267581b4 | 27,806 |
import os
import json
def rest_scaffold(context, model, app="", api_root="", **kwargs):
"""
Take name of app and model, return context for template that includes a
single variable: the configuration for the rest scaffold.
"""
# get paging details
is_paged = kwargs.pop("is_paged", None)
if is_paged is None:
rest_framework_config = get_setting("REST_FRAMEWORK") or {}
if rest_framework_config.get("DEFAULT_PAGINATION_CLASS", None):
is_paged = True
else:
is_paged = False
# get model/app
model = get_model(model, app)
if isinstance(model, dict):
return model
app = model._meta.app_label
# field configuration
fields = comma_parse(kwargs.pop("fields", ""))
config_fields = []
mf = model._meta.get_fields()
exclude_from_form = comma_parse(kwargs.pop("exclude_from_form", ""))
exclude_from_table = comma_parse(kwargs.pop("exclude_from_table", ""))
for f in filter(lambda x: not issubclass(type(x), ForeignObjectRel), mf):
if fields and f.name not in fields:
continue
try:
ff = f.formfield()
except AttributeError:
ff = None
# generate unique id
id_for = "rest-scaffold-field-{0}".format(id(f))
if ff:
# extra options
widget_opts = {}
if issubclass(type(ff.widget), widgets.Select):
widget_opts["size"] = 10
html = str(ff.widget.render(f.name, None, {"id": id_for, **widget_opts}))
else:
html = None
field_opts = {"name": str(f.name), "id": id_for, "html": html}
try:
field_opts["title"] = str(f.verbose_name)
except AttributeError:
field_opts["title"] = str(f.name)
if f.name in exclude_from_form:
field_opts["on_form"] = False
if f.name in exclude_from_table:
field_opts["on_table"] = False
if hasattr(f, "choices") and f.choices:
# render the choices
print(f.choices)
field_opts["choices"] = [
[y if isinstance(y, (int, float)) else str(y) for y in x]
for x in f.choices
]
elif isinstance(f, (ForeignKey, OneToOneField, ManyToManyField)):
# TODO: probably should provide a list of choices to the scaffold, but also
# probably want to update them regularly. Maybe we just provide the
# model/API_URL/fk_field/display_field for the relationship and
# rest-scaffold can fetch the options when building the form.
pass
config_fields.append(field_opts)
# ok, have model -- need to give context the model, app, fields, url
api_url = kwargs.pop("api_url", None)
url = api_url or os.path.join("/", api_root, app, model.__name__.lower())
r = {
"title": model._meta.verbose_name_plural.title(),
"subtitle": "{0} / {1}".format(app, model.__name__),
"recordTitle": model._meta.verbose_name.title(),
"pkField": model._meta.pk.name,
"fields": config_fields,
"url": url,
"apiType": "django-paged" if is_paged else "plain",
**kwargs,
}
csrf_token = context.get("csrf_token", None)
if csrf_token:
r["csrfToken"] = str(csrf_token)
return {"configuration": json.dumps(r)} | 72ad9b4970488a78f7fcf6a9359e46c98a05d1c2 | 27,807 |
import random
def selection_elites_random(individuals : list, n : int = 4, island=None) -> list:
"""
Completely random selection.
Args:
individuals (list): A list of Individuals.
n (int): Number to select (default = 4).
island (Island): The Island calling the method (default = None).
Returns:
list: Random n Individuals.
"""
return random.choice(individuals, size=n).tolist() | 2f7df4e8a347bcd9a770d0d15d91b52956dbd26c | 27,808 |
import ctypes
def ekssum(handle, segno):
"""
Return summary information for a specified segment in a specified EK.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekssum_c.html
:param handle: Handle of EK.
:type handle: int
:param segno: Number of segment to be summarized.
:type segno: int
:return: EK segment summary.
:rtype: spicepy.utils.support_types.SpiceEKSegSum
"""
handle = ctypes.c_int(handle)
segno = ctypes.c_int(segno)
segsum = stypes.SpiceEKSegSum()
libspice.ekssum_c(handle, segno, ctypes.byref(segsum))
return segsum | 9c9920d29ed1c1c85524a511119f37322143888c | 27,809 |
def compare_prices_for_same_urls(
source_df: pd.DataFrame, target_df: pd.DataFrame, tagged_fields: TaggedFields
):
"""For each pair of items that have the same `product_url_field` tagged field,
compare `product_price_field` field
Returns:
A result containing pairs of items with same `product_url_field`
from `source_df` and `target_df` which `product_price_field` differ,
missing and new `product_url_field` tagged fields.
"""
result = Result("Compare Prices For Same Urls")
url_field = tagged_fields.get("product_url_field")
if not url_field:
result.add_info(Outcome.SKIPPED)
return result
url_field = url_field[0]
source_df = source_df.dropna(subset=[url_field])
target_df = target_df.dropna(subset=[url_field])
same_urls = source_df[(source_df[url_field].isin(target_df[url_field].values))][
url_field
]
new_urls = source_df[~(source_df[url_field].isin(target_df[url_field].values))][
url_field
]
missing_urls = target_df[(~target_df[url_field].isin(source_df[url_field].values))][
url_field
]
errors = {}
for url, group in missing_urls.groupby(missing_urls):
errors[f"Missing {url}"] = set(group.index)
if not missing_urls.empty:
result.add_info(
f"{len(missing_urls)} urls missing from the tested job", errors=errors
)
if not new_urls.empty:
result.add_info(f"{len(new_urls)} new urls in the tested job")
result.add_info(f"{len(same_urls)} same urls in both jobs")
diff_prices_count = 0
price_field = tagged_fields.get("product_price_field")
if not price_field:
result.add_info("product_price_field tag is not set")
else:
price_field = price_field[0]
detailed_messages = []
for url in same_urls:
if url.strip() != "nan":
source_price = source_df[source_df[url_field] == url][price_field].iloc[
0
]
target_price = target_df[target_df[url_field] == url][price_field].iloc[
0
]
if (
is_number(source_price)
and is_number(target_price)
and ratio_diff(source_price, target_price) > 0.1
):
diff_prices_count += 1
source_key = source_df[source_df[url_field] == url].index[0]
target_key = target_df[target_df[url_field] == url].index[0]
msg = (
f"different prices for url: {url}\nsource price is {source_price} "
f"for {source_key}\ntarget price is {target_price} for {target_key}"
)
detailed_messages.append(msg)
res = f"{len(same_urls)} checked, {diff_prices_count} errors"
if detailed_messages:
result.add_error(res, detailed="\n".join(detailed_messages))
else:
result.add_info(res)
return result | dc23577169bf6788ecd17eb2fc6c959e367b5000 | 27,810 |
def _check_param(dict_):
"""
check dictionary elements and reformat if need be
:return: dictionary reformat
"""
# default empty dictionary
_ = {}
if "google_users" in dict_:
_["google_users"] = _check_param_google_users(dict_["google_users"])
else:
_logger.exception(f"No key 'google_users' in yaml file {setupcfg.extraParam}")
raise KeyError
if "dataset_ids" in dict_:
_["dataset_ids"] = _check_param_dataset_ids(dict_["dataset_ids"])
else:
_logger.exception(f"No key 'dataset_ids' in yaml file {setupcfg.extraParam}")
raise KeyError
return _ | 17ccc017fb4a34f3d68ef7150165185e62c7a8dc | 27,811 |
def fetch_dataset_insistently(url: str, link_text_prefix: str, user_agent: str) -> dict:
"""Fetch the approved routes dataset."""
proxies = get_proxies_geonode() + get_proxies()
print(f'{len(proxies)} proxies found.')
for i, proxy in enumerate(proxies):
print(f'Fetching dataset, try with proxy [{i + 1}] {proxy}.')
req_proxy = {
'http': f'http://{proxy["ip"]}:{proxy["port"]}'
}
try:
download_info = fetch_dataset(url, link_text_prefix, user_agent, req_proxy)
return download_info
except FetchException as e:
raise
except Exception as e:
print(f'Fetching dataset try {proxy} failed with error: {e}')
pass | c98985becc3989980782c4886e18fe7fa56f8d06 | 27,812 |
import numpy
def _dense_to_one_hot(labels_dense):
"""Convert class labels from scalars to one-hot vectors."""
num_classes = len(set(labels_dense))
num_labels = labels_dense.shape[0]
labels_to_numbers = {label: i for i, label in enumerate(list(set(labels_dense)))}
labels_as_numbers = numpy.asarray([labels_to_numbers[label] for label in labels_dense])
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_as_numbers.ravel()] = 1
return labels_one_hot, labels_to_numbers | 49cc3ab6bc5f4ec81323321a9fe13d4da030fb4a | 27,813 |
def swish(x):
"""Swish activation function. For more info: https://arxiv.org/abs/1710.05941"""
return tf.multiply(x, tf.nn.sigmoid(x)) | 40766f934d2e691dc28d5dcd3a44c37cef601896 | 27,814 |
def fatorial(n=1):
"""
-> Calcúla o fatorial de um número e o retorna
:param n: número
"""
f = 1
for i in range(1, n + 1):
f *= i
return f | 5c64b8ccf4a62a1b4294e576b49fbf69e85972ec | 27,815 |
def retrieve_context_nw_topology_service_name_name_by_id(value_name): # noqa: E501
"""Retrieve name by ID
Retrieve operation of resource: name # noqa: E501
:param value_name: ID of value_name
:type value_name: str
:rtype: NameAndValue
"""
return 'do some magic!' | 4dcc0b25c6fd76bf94e14d63cb9731946b97b06a | 27,816 |
def conv1x1(in_planes, out_planes, wib, stride=1):
"""1x1 convolution"""
# resnet_wib = False
resnet_wib = True
resnet_alpha = 1E-3
if not wib:
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
else:
return WibConv2d(alpha=resnet_alpha,
in_channels=in_planes, out_channels=out_planes, kernel_size=1, stride=stride, bias=False) | 0ce1d9d47ff98dc7ce607afafb4ea506c850afc3 | 27,817 |
def get_rules(fault_block, zone):
"""Get rules for fault block and zone names.
In this model the rules depend only on the zone; they do NOT
vary from fault block to fault block for a given zone.
Args:
fault_block (str)
Name of fault block.
zone (str)
Zone name.
Returns:
Function (rule) that computes elastic properties (density, Vp, Vs, Qp, and Qs) from x, y, depth.
"""
RULES = {
"Mantle": {
"default": rules_aagaard_etal_2010.upper_mantle,
},
"Lower Crust": {
"default": rules_aagaard_etal_2010.mafic_great_valley_ophiolite,
},
"Great_Valley_Ophiolite": {
"default": rules_aagaard_etal_2010.mafic_great_valley_ophiolite,
},
"San Leandro G": {
"default": rules_aagaard_etal_2010.mafic_great_valley_ophiolite,
},
"Logan G": {
"default": rules_aagaard_etal_2010.mafic_great_valley_ophiolite,
},
"Kjf_Berkeley": {
"default": rules_aagaard_etal_2010.franciscan_berkeley,
},
"Kjf_Chabot": {
"default": rules_aagaard_etal_2010.franciscan_berkeley,
},
"Kjf_Foothills": {
"default": rules_aagaard_etal_2010.franciscan_foothills,
},
"Kjf_Merced": {
"default": rules_aagaard_etal_2010.franciscan_napa_sonoma,
},
"Kjf_Sur": {
"default": rules_aagaard_etal_2010.franciscan_berkeley,
},
"Kjf_Napa_Somoma": {
"default": rules_aagaard_etal_2010.franciscan_napa_sonoma,
"Napa": rules_hirakawa_aagaard_2021.franciscan_napa,
"Sonoma": rules_aagaard_etal_2010.franciscan_napa_sonoma,
},
"Kjf_Bay Block": {
"default": rules_aagaard_etal_2010.franciscan_napa_sonoma,
},
"Kjf_Evergreen": {
"default": rules_aagaard_etal_2010.franciscan_berkeley,
},
"Kjf_E_Diablo": {
"default": rules_aagaard_etal_2010.franciscan_berkeley,
},
"Kjf_W_Diablo": {
"default": rules_aagaard_etal_2010.franciscan_berkeley,
},
"Kgr_Gab": {
"default": rules_aagaard_etal_2010.salinian_granitic,
},
"Kgr_Halfmoon": {
"default": rules_aagaard_etal_2010.salinian_granitic,
},
"Kgr_SCoast": {
"default": rules_aagaard_etal_2010.salinian_granitic,
},
"Kgr_NShelf": {
"default": rules_aagaard_etal_2010.salinian_granitic,
},
"Valley Sequence": {
"default": rules_aagaard_etal_2010.great_valley_sequence_sedimentary,
"San Leandro": rules_hirakawa_aagaard_2021.valley_sequence_sanleandro,
"Sunol": rules_hirakawa_aagaard_2021.brocher2008_great_valley_sequence,
},
"Great Valley Seq": {
"default": rules_aagaard_etal_2010.great_valley_sequence_sedimentary,
"Berkeley": rules_hirakawa_aagaard_2021.brocher2008_great_valley_sequence,
"Chabot": rules_hirakawa_aagaard_2021.brocher2008_great_valley_sequence,
"W Diablo Range": rules_hirakawa_aagaard_2021.brocher2008_great_valley_sequence,
},
"Cenozoic": {
"default": rules_aagaard_etal_2010.tertiary_sedimentary_southbay,
"Napa": rules_hirakawa_aagaard_2021.cenozoic_napa,
"Sonoma": rules_hirakawa_aagaard_2021.cenozoic_sonoma,
"Alexander": rules_hirakawa_aagaard_2021.brocher2008_great_valley_sequence,
"W Diablo Range": rules_hirakawa_aagaard_2021.quaternary_livermore,
},
"Cenozoic_Great Valley": {
"default": rules_aagaard_etal_2010.tertiary_sedimentary_southbay,
},
"Cenozoic_Halfmoon": {
"default": rules_aagaard_etal_2010.cenozoic_sedimentary_halfmoonbay,
},
"Cenozoic_Ever": {
"default": rules_aagaard_etal_2010.tertiary_sedimentary_southbay,
},
"T_SouthBay": {
"default": rules_aagaard_etal_2010.tertiary_sedimentary_southbay,
},
"T_La Honda": {
"default": rules_aagaard_etal_2010.tertiary_sedimentary_lahondabasin,
},
"T_Pilarcitos": {
"default": rules_aagaard_etal_2010.tertiary_sedimentary_southbay,
},
"T_Bay_Block_Santa_Rosa": {
"default": rules_aagaard_etal_2010.tertiary_sedimentary_southbay,
"Santa Rosa": rules_hirakawa_aagaard_2021.brocher2005_older_cenozoic_sedimentary,
},
"T_Berkeley": {
"default": rules_aagaard_etal_2010.tertiary_sedimentary_southbay,
"Berkeley": rules_hirakawa_aagaard_2021.brocher2008_great_valley_sequence,
"Chabot": rules_hirakawa_aagaard_2021.brocher2008_great_valley_sequence,
},
"QT_Bay_Block_Santa Rosa": {
"default": rules_aagaard_etal_2010.quaternary_tertiary_sedimentary,
},
"QT_Merced_Pilarcitos": {
"default": rules_aagaard_etal_2010.quaternary_tertiary_sedimentary,
},
"QT_South_Bay": {
"default": rules_aagaard_etal_2010.quaternary_tertiary_sedimentary,
},
"water": {
"default": rules_aagaard_etal_2010.seawater,
},
"<null>": {
"default": rules_aagaard_etal_2010.outside_model,
},
"": {
"default": rules_aagaard_etal_2010.outside_model,
},
}
return RULES[zone].get(fault_block, RULES[zone]["default"]) | 12b19cd795ecf618995a2f67f3844792b372d09d | 27,818 |
from operator import concat
def conv_cond_concat(x, y):
"""Concatenate conditioning vector on feature map axis."""
x_shapes = tf.shape(x)
y_shapes = tf.shape(y)
return concat([
x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3) | 583ed5df67245b483531f8e3129ba88b9ec811ef | 27,819 |
def get_clusters(cloud_filtered):
"""
Get clusters from the cloud.
Parameters:
-----------
cloud: pcl.PointCloud()
Returns:
-----------
clusters: pcl.PointCloud() array N
"""
clusters = []
tree = cloud_filtered.make_kdtree()
ec = cloud_filtered.make_EuclideanClusterExtraction()
ec.set_ClusterTolerance (0.02)
ec.set_MinClusterSize (5)
ec.set_MaxClusterSize (100)
ec.set_SearchMethod (tree)
cluster_indices = ec.Extract()
cloud_cluster = pcl.PointCloud()
for j, indices in enumerate(cluster_indices):
#print('indices = ' + str(len(indices)))
points = np.zeros((len(indices), 3), dtype=np.float32)
for i, indice in enumerate(indices):
points[i][0] = cloud_filtered[indice][0]
points[i][1] = cloud_filtered[indice][1]
points[i][2] = cloud_filtered[indice][2]
cloud_cluster.from_array(points)
clusters.append(cloud_cluster)
return clusters | b4b7fa0ff8b7f362783bc94727253a2eb41f6f7e | 27,820 |
import json
def errorResult(request, response, error, errorMsg, httpStatus = 500, result = None, controller = None):
""" set and return the error result
@param controller: pylon controller handling the request, where cal context is injected and later retrieved by trackable
"""
response.status_int = httpStatus
response.content_type = 'application/json'
res = {'error':error, 'errorMsg':errorMsg}
if (result != None):
res['result'] = result
title = __getreqstr(request)
msg = 'Error Result - (%s, %s)' % (str(error), errorMsg)
__injectcontext(controller, title, CRITICAL, msg)
LOG.warning(msg)
return json.dumps(res) | df386f939751ea268907c016120db7219c3e93bc | 27,821 |
from typing import Dict
import logging
def parse_main(text: str = "") -> Dict:
"""
A loop for processing each parsing recipe. Returns a dict of parsed values.
"""
if text == "":
logging.warning("Empty string provided for parsing")
parsed_data = {}
for recipe in parser_recipe:
field = recipe["field"]
parsed_data[field] = parser(text, **recipe)
return parsed_data | 4574b3e9dce321f169f31031e46e572fac14fce3 | 27,822 |
def main():
""" Returns the answer. """
return 42 | f6800af5efb0b65f7c7afdd5ea0ede896fd740f8 | 27,823 |
import subprocess
def sub_proc_launch(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
"""Launch a subprocess and return the Popen process object.
This is non blocking. This is useful for long running processes.
"""
proc = subprocess.Popen(cmd.split(), stdout=stdout, stderr=stderr)
return proc | 3ad582b4c915c65e56833c5cd66c03dcb6765b11 | 27,824 |
def collect_accuracy(path):
""" Collects accuracy values in log file. """
r1 = None
r5 = None
mAP = None
r1_content = 'Rank-1 '
r5_content = 'Rank-5 '
map_content = 'mAP:'
with open(path) as input_stream:
for line in input_stream:
candidate = line.strip()
if r1_content in candidate:
r1 = float(candidate.split(':')[-1].replace('%', ''))
elif r5_content in candidate:
r5 = float(candidate.split(':')[-1].replace('%', ''))
elif map_content in candidate:
mAP = float(candidate.split(':')[-1].replace('%', ''))
return r1, r5, mAP | fa94724f16a332fe18d13df3cc0fbcdd060fe897 | 27,825 |
from typing import Type
from typing import Mapping
def recursively_get_annotations(ty: Type) -> Mapping[str, Type]:
"""Given a type, recursively gather annotations for its subclasses as well. We only
gather annotations if its subclasses are themselves subclasses of Deserializable,
and not Deserializable itself.
This is bad evil code that uses internal Python details that may break in
3.8 or later."""
# Get initial annotations
annotations: Mapping[str, Type] = getattr(ty, "__annotations__", {})
# Recursively gather annotations for base classes
for base in getattr(ty, "__bases__", {}):
if issubclass(base, Deserializable) and (base != Deserializable):
annotations = dict(annotations, **recursively_get_annotations(base))
return annotations | ba531eeba8006aa9393fa2487c056d6309df43d4 | 27,826 |
def sector_model():
"""SectorModel requiring precipitation and cost, providing water
"""
model = EmptySectorModel('water_supply')
model.add_input(
Spec.from_dict({
'name': 'precipitation',
'dims': ['LSOA'],
'coords': {'LSOA': [1, 2, 3]},
'dtype': 'float',
'unit': 'ml'
})
)
model.add_input(
Spec.from_dict({
'name': 'reservoir_level',
'dims': ['LSOA'],
'coords': {'LSOA': [1, 2, 3]},
'dtype': 'float',
'unit': 'ml'
})
)
model.add_input(
Spec.from_dict({
'name': 'rGVA',
'dims': ['LSOA'],
'coords': {'LSOA': [1, 2, 3]},
'dtype': 'float',
'unit': 'million GBP'
})
)
model.add_output(
Spec.from_dict({
'name': 'water',
'dims': ['LSOA'],
'coords': {'LSOA': [1, 2, 3]},
'dtype': 'float',
'unit': 'Ml'
})
)
model.add_output(
Spec.from_dict({
'name': 'reservoir_level',
'dims': ['LSOA'],
'coords': {'LSOA': [1, 2, 3]},
'dtype': 'float',
'unit': 'ml'
})
)
model.add_parameter(
Spec.from_dict({
'name': 'test_parameter',
'description': 'a dummy parameter to test narratives',
'dims': ['national'],
'coords': {'national': ['GB']},
'abs_range': (0.5, 2),
'exp_range': (0.5, 2),
'dtype': 'float',
'default': [[1.]],
'unit': '%'
})
)
return model | 528548e24052913a315804a782cb74cef53b0f08 | 27,827 |
def maybe_flip_x_across_antimeridian(x: float) -> float:
"""Flips a longitude across the antimeridian if needed."""
if x > 90:
return (-180 * 2) + x
else:
return x | 50fac7a92d0ebfcd003fb478183b05668b9c909c | 27,828 |
def contour_check(check_points, random_walk):
"""check_points have dim (n, ndim)
random_walk has 3 elements.
[0] is boundary unit vectors (can be in any space),
[1] is boundary ls (relative to origin)
[2] is origin
returns: indexer of [True,..... etc.] of which points are in or not
operates by finding which direction we are closest too and then comparing our l to that l
"""
boundary_unit_vectors = random_walk[0]
boundary_ls = random_walk[1]
origin = random_walk[2]
points = check_points - origin
#holds the projections of the points onto each of the unitvectors of the boundary
projections = np.dot(points, boundary_unit_vectors) #npoints x nunitvecs
maxprojs = np.argmax(projections, axis = 1) #argmax (of the unitvec) projection for each point
#this tells us which len to compare against
compare_distance_to = np.array([boundary_ls[i] for i in maxprojs])
distances = np.sqrt(points[:,0]**2 + points[:,1]**2)
whichinside = distances<=compare_distance_to
return whichinside | fa973c943c4827bd180eb95a3bbc3e0a7d2beb2a | 27,829 |
def _get_erroneous_call(report_text: str) -> str:
"""."""
erroneous_line = [
line for line in report_text.splitlines() if line.startswith('> ') and RAISES_OUTPUT_SIGNAL_IN_CONTEXT in line
][0]
erroneous_assertion = erroneous_line.lstrip('> ')
erroneous_assertion = string_remove_from_start(erroneous_assertion, 'assert ')
erroneous_assertion = string_remove_after(erroneous_assertion, ' ==')
erroneous_assertion = erroneous_assertion.rstrip('= ')
return erroneous_assertion | 125267db8fb978285fc44ec078364b214c022ca9 | 27,830 |
def write_output(features, forecast_hours, poly, line, point):
"""
writes output to OUTDATA dict depending on query type
:param features: output from clipping function
:param forecast_hours: list of all queried forecast hours
:param poly: boolean to identify a polygon query
:param line: boolean to identify a line query
:param point: boolean to identify a point query
:returns: dict with all queried forecast hours and clipping results
"""
i = 0
if line and not poly and not point:
OUTDATA = {"type": "FeatureCollection",
"features": {
"type": "Feature",
"geometry": {
"type": "LineString",
"coordinates": features[0][0][0][4],
},
"properties": {
"Forecast Hours": []
}
}
}
temp_line = []
dir_line = []
speed_line = []
for hour in forecast_hours:
OUTDATA["features"]['properties']["Forecast Hours"].append({
"Forecast Hour": hour,
})
for i in features[0]:
if 'Temperature Data' in features[0][i][3]:
for x in features[0][0]:
temp_line.append([[x[0], x[1]], x[2]])
(OUTDATA["features"]['properties']["Forecast Hours"]
[int(i/3)]["Temperature"]) = {
"Observations Along Line": temp_line
}
if 'Wind Direction Data' in features[0][i][3]:
for x in features[0][1]:
dir_line.append([[x[0], x[1]], x[2]])
(OUTDATA["features"]['properties']["Forecast Hours"][
int(i/3)]["Wind Direction"]) = {
"Observations Along Line": dir_line
}
if 'Wind Speed Data' in features[0][i][3]:
for x in features[0][2]:
speed_line.append([[x[0], x[1]], x[2]])
(OUTDATA["features"]['properties']["Forecast Hours"]
[int(i/3)]["Wind Speed"]) = {
"Observations Along Line": speed_line
}
return OUTDATA
if poly:
OUTDATA = {"type": "FeatureCollection",
"features": {
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": features[0][0][4],
},
"properties": {
"Forecast Hours": []
}
}
}
for hour in forecast_hours:
OUTDATA["features"]['properties']["Forecast Hours"].append({
"Forecast Hour": hour,
})
for i in features[0]:
if 'Temperature Data' in features[0][i][3]:
(OUTDATA["features"]['properties']["Forecast Hours"]
[int(i/3)]["Temperature"]) = {
"Min Temperature": features[0][i][0],
"Max Temperature": features[0][i][1],
"Mean Temperature": features[0][i][2]
}
if 'Wind Direction Data' in features[0][i][3]:
(OUTDATA["features"]['properties']["Forecast Hours"]
[int(i/3)]["Wind Direction"]) = {
"Min Wind Direction": features[0][i][0],
"Max Wind Direction": features[0][i][1],
"Mean Wind Direction": features[0][i][2]
}
if 'Wind Speed Data' in features[0][i][3]:
(OUTDATA["features"]['properties']["Forecast Hours"]
[int(i/3)]["Wind Speed"]) = {
"Min Wind Speed": features[0][i][0],
"Max Wind Speed": features[0][i][1],
"Mean Wind Speed": features[0][i][2]
}
return OUTDATA
if point:
OUTDATA = {"type": "FeatureCollection",
"features": {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [features[0][0][0],
features[0][0][1]],
},
"properties": {
"Forecast Hours": []
}
}
}
for hour in forecast_hours:
OUTDATA["features"]['properties']["Forecast Hours"].append({
"Forecast Hour": hour,
})
for i in features[0]:
if 'Temperature Data' in features[0][i][3]:
(OUTDATA["features"]['properties']["Forecast Hours"]
[int(i/3)]["Temperature"]) = {
"Temperature": features[0][i][2],
}
if 'Wind Direction Data' in features[0][i][3]:
(OUTDATA["features"]['properties']["Forecast Hours"]
[int(i/3)]["Wind Direction"]) = {
"Wind Direction": features[0][i][2],
}
if 'Wind Speed Data' in features[0][i][3]:
(OUTDATA["features"]['properties']["Forecast Hours"]
[int(i/3)]["Wind Speed"]) = {
"Wind Speed": features[0][i][2],
}
return OUTDATA | abc92e597e4d8a409f7c4d0e0b224a76b4a6cd63 | 27,831 |
def index():
"""
Index set as main route
"""
return render_template('index.html', title='Home') | bacc433a4523a9b390bdde636ead91d72303cf01 | 27,832 |
def plus_one(digits):
"""
Given a non-empty array of digits representing a non-negative integer,
plus one to the integer.
:param digits: list of digits of a non-negative integer,
:type digits: list[int]
:return: digits of operated integer
:rtype: list[int]
"""
result = []
carry = 1
for i in range(len(digits) - 1, -1, -1):
result.append((digits[i] + carry) % 10)
carry = (digits[i] + carry) // 10
if carry:
result.append(1)
return list(reversed(result)) | a11668a1b2b9adb9165152f25bd1528d0cc2bd71 | 27,833 |
def run_experiment(input_frame, n_samples=1, temperature=1, npartitions=1):
"""
Runs experiment given inputs.
Takes `n_samples` samples from the VAE
Returns a list of size `n_samples` of results for each input
"""
encoder_data = a.get_encoder()
decoder_data = a.get_decoder()
vae = a.get_vae(
encoder_data["model"],
decoder_data["model"],
encoder_data["tokenizer"],
decoder_data["tokenizer"],
beta=0,
)
# partially apply function for evaluator
# series here represents a row (we call with axis=1)
def evaluator(series):
return a.eval_analogy(
vae,
encoder_data["tokenizer"],
decoder_data["tokenizer"],
series[0],
series[1],
series[2],
temperature=temperature,
)[0]
new_columns = ["pred_{}".format(i) for i in range(n_samples)]
output_frame = pd.DataFrame()
parallelize = npartitions > 1
for col in new_columns:
if parallelize:
output_frame[col] = input_frame.map_partitions(
lambda df: df.apply(evaluator, axis=1)
).compute()
else:
output_frame[col] = input_frame.apply(evaluator, axis=1)
return output_frame | 2729a977b14235bf5a0e020fbe8a528a5906f212 | 27,834 |
def to_angle(s, sexagesimal_unit=u.deg):
"""Construct an `Angle` with default units.
This creates an :class:`~astropy.coordinates.Angle` with the following
default units:
- A number is in radians.
- A decimal string ('123.4') is in degrees.
- A sexagesimal string ('12:34:56.7') or tuple has `sexagesimal_unit`.
In addition, bytes are decoded to ASCII strings to normalize user inputs.
Parameters
----------
s : :class:`~astropy.coordinates.Angle` or equivalent, string, float, tuple
Anything accepted by `Angle` and also unitless strings, numbers, tuples
sexagesimal_unit : :class:`~astropy.units.UnitBase` or str, optional
The unit applied to sexagesimal strings and tuples
Returns
-------
angle : :class:`~astropy.coordinates.Angle`
Astropy `Angle`
"""
try:
return Angle(s)
except u.UnitsError:
# Bytes is a sequence of ints that will inadvertently end up as radians, so crash instead
if isinstance(s, bytes):
raise TypeError(f'Raw bytes {s} not supported: '
'first decode to string (or add unit)') from None
# We now have a number, string or tuple without a unit
if isinstance(s, str) and ':' in s or isinstance(s, tuple):
return Angle(s, unit=sexagesimal_unit)
elif isinstance(s, str):
return Angle(s, unit=u.deg)
else:
# XXX Maybe deprecate this in future and only deal with strings here
return Angle(s, unit=u.rad) | 106a5be01c3f9150862c1e02f5cd77292b029cf6 | 27,835 |
def simpleBlocking(rec_dict, blk_attr_list):
"""Build the blocking index data structure (dictionary) to store blocking
key values (BKV) as keys and the corresponding list of record identifiers.
A blocking is implemented that simply concatenates attribute values.
Parameter Description:
rec_dict : Dictionary that holds the record identifiers as keys
and corresponding list of record values
blk_attr_list : List of blocking key attributes to use
This method returns a dictionary with blocking key values as its keys and
list of record identifiers as its values (one list for each block).
Examples:
If the blocking is based on 'postcode' then:
block_dict = {'2000': [rec1_id, rec2_id, rec3_id, ...],
'2600': [rec4_id, rec5_id, ...],
...
}
while if the blocking is based on 'postcode' and 'gender' then:
block_dict = {'2000f': [rec1_id, rec3_id, ...],
'2000m': [rec2_id, ...],
'2600f': [rec5_id, ...],
'2600m': [rec4_id, ...],
...
}
"""
block_dict = {} # The dictionary with blocks to be generated and returned
print('Run simple blocking:')
print(' List of blocking key attributes: '+str(blk_attr_list))
print(' Number of records to be blocked: '+str(len(rec_dict)))
print('')
for (rec_id, rec_values) in rec_dict.items():
rec_bkv = '' # Initialise the blocking key value for this record
# Process selected blocking attributes
#
for attr in blk_attr_list:
attr_val = rec_values[attr]
rec_bkv += attr_val
# Insert the blocking key value and record into blocking dictionary
#
if (rec_bkv in block_dict): # Block key value in block index
# Only need to add the record
#
rec_id_list = block_dict[rec_bkv]
rec_id_list.append(rec_id)
else: # Block key value not in block index
# Create a new block and add the record identifier
#
rec_id_list = [rec_id]
block_dict[rec_bkv] = rec_id_list # Store the new block
return block_dict | 5bf9b85ad84ffa3dc11a39a876cbcfefe09a5b2c | 27,836 |
def get_marker_obj(plugin, context, resource, limit, marker):
"""Retrieve a resource marker object.
This function is used to invoke
plugin._get_<resource>(context, marker) and is used for pagination.
:param plugin: The plugin processing the request.
:param context: The request context.
:param resource: The resource name.
:param limit: Indicates if pagination is in effect.
:param marker: The id of the marker object.
:returns: The marker object associated with the plugin if limit and marker
are given.
"""
if limit and marker:
return getattr(plugin, '_get_%s' % resource)(context, marker) | 5e66ca50382c6e8a611983252ce44bf50019177b | 27,837 |
import sys
import importlib
import logging
def import_project_module(project_name, project_dir):
"""Import project module, from the system of from the project directory"""
if "--installed" in sys.argv:
try:
module = importlib.import_module(project_name)
except Exception:
logger.error("Cannot run tests on installed version: %s not installed or raising error.",
project_name)
raise
else: # Use built source
build_dir = build_project(project_name, project_dir)
if build_dir is None:
logging.error("Built project is not available !!! investigate")
sys.path.insert(0, build_dir)
logger.warning("Patched sys.path, added: '%s'", build_dir)
module = importlib.import_module(project_name)
return module | 40f11f65cbd7114a8c10c59d262d9398d713f919 | 27,838 |
def generative(max_value: int = FIBONACCI_MAX) -> int:
"""
This is the fully generative method for the Fibonacci sequence. The full sequence list is generated, the even ones
are sought out, and summed
--> benchmark: 8588 ns/run
:param max_value: The ceiling value of Fibonacci numbers to be added
:return: The summation in question
"""
i = 2
fibonacci_list = [*FIBONACCI_SEED]
# generate the logger
while (fibonacci_list[i-1] + fibonacci_list[i-2]) < max_value:
fibonacci_list.append(fibonacci_list[i-1] + fibonacci_list[i-2])
i += 1
summation = 0
for number in fibonacci_list:
if not number % 2: # if not odd
summation += number
return summation | 086076d2599297fd23eaa5577985bd64df10cc81 | 27,839 |
import traceback
def add_vendor_cves(ms_directory, neo4jpasswd, logger=structlog.get_logger()):
"""
Adds CVEs to database.
:param ms_directory: directory where the microsoft files are downloaded
:param neo4jpasswd: password to neo4j database
:param logger: logger for the method
:return: output message
"""
logger.info("Method add_vendor_CVEs was called with parameter: " +
"ms_directory={}"
.format(ms_directory))
neo4j = Neo4j(neo4jpasswd)
from_date = date.today() - timedelta(days=14)
to_date = date.today()
count = {'Adobe': 0, 'Android': 0, 'Apple': 0, 'Cisco': 0, 'Lenovo': 0,
'Microsoft': 0, 'Oracle': 0, 'RedHat': 0}
try:
logger.debug("Microsoft")
microsoft_entries = get_microsoft_entries(ms_directory, from_date, to_date, logger)
count['Microsoft'] = save_to_db(neo4j, microsoft_entries, logger)
except (ValueError, AttributeError, TimeoutError, IndexError,
WebDriverException, TypeError, HTTPError) as err:
logger.error("Error while parsing Microsoft data: " + str(err))
logger.error(traceback.format_exc())
try:
logger.debug("Apple")
apple_entries = get_apple_entries(from_date, to_date)
count['Apple'] = save_to_db(neo4j, apple_entries, logger)
except (ValueError, AttributeError, TimeoutError, IndexError,
WebDriverException, TypeError, HTTPError) as err:
logger.error("Error while parsing Apple data: " + str(err))
logger.error(traceback.format_exc())
try:
logger.debug("Adobe")
adobe_entries = get_adobe_entries(from_date, to_date)
count['Adobe'] = save_to_db(neo4j, adobe_entries, logger)
except (ValueError, AttributeError, TimeoutError, IndexError,
WebDriverException, TypeError, HTTPError) as err:
logger.error("Error while parsing Adobe data: " + str(err))
logger.error(traceback.format_exc())
try:
logger.debug("RedHat")
redhat_entries = get_redhat_entries(from_date, to_date, logger)
count['RedHat'] = save_to_db(neo4j, redhat_entries, logger)
except (ValueError, AttributeError, TimeoutError, IndexError,
WebDriverException, TypeError, HTTPError) as err:
logger.error("Error while parsing RedHat data: " + str(err))
logger.error(traceback.format_exc())
try:
logger.debug("Oracle")
oracle_entries = get_oracle_entries(from_date, to_date)
count['Oracle'] = save_to_db(neo4j, oracle_entries, logger)
except (ValueError, AttributeError, TimeoutError, IndexError,
WebDriverException, TypeError, HTTPError) as err:
logger.error("Error while parsing Oracle data: " + str(err))
logger.error(traceback.format_exc())
try:
logger.debug("Android")
android_entries = get_android_entries(from_date, to_date)
count['Android'] = save_to_db(neo4j, android_entries, logger)
except (ValueError, AttributeError, TimeoutError, IndexError,
WebDriverException, TypeError, HTTPError) as err:
logger.error("Error while parsing Android data: " + str(err))
logger.error(traceback.format_exc())
try:
logger.debug("Cisco")
cisco_entries = get_cisco_entries(from_date, to_date, logger)
count['Cisco'] = save_to_db(neo4j, cisco_entries, logger)
except (ValueError, AttributeError, TimeoutError, IndexError,
WebDriverException, TypeError, HTTPError) as err:
logger.error("Error while parsing Cisco data: " + str(err))
logger.error(traceback.format_exc())
try:
logger.debug("Lenovo")
lenovo_entries = get_lenovo_entries(from_date, to_date)
count['Lenovo'] = save_to_db(neo4j, lenovo_entries, logger)
except (ValueError, AttributeError, TimeoutError, IndexError,
WebDriverException, TypeError, HTTPError) as err:
logger.error("Error while parsing Lenovo data: " + str(err))
logger.error(traceback.format_exc())
result = ""
for key in count:
result += "{}: {} updated CVEs. ".format(key, count[key])
logger.info(result)
return result | 74e6f46a68eed13f4f4f10a04e60cdd5a9599c6b | 27,840 |
def enc_net(num_classes, pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Net(num_classes, **kwargs)
# if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model | ff8429e6b0e5ef522f6f2a3b0b10a76951a95db8 | 27,841 |
def get_bytes(data):
"""
Helper method to get the no. of bytes in the hex"""
data = str(data)
return int(len(sanatize_hex(data)) / 2) | 014715aa301370ba2d3598c02b66c5b88741b218 | 27,842 |
import collections
def _group_by(input_list, key_fn):
"""Group a list according to a key function (with a hashable range)."""
result = collections.defaultdict(list)
for x in input_list:
result[key_fn(x)].append(x)
return result | 288c108588f9e4ea60c4dac6ff656c8c8ffde580 | 27,843 |
import json
def set_parameters_in_cookie(response: Response) -> Response:
"""Set request parameters in the cookie, to use as future defaults."""
if response.status_code == HTTPStatus.OK:
data = {
param: request.args[param]
for param in PARAMS_TO_PERSIST
if param in request.args
}
response.set_cookie(PARAMS_COOKIE_NAME, json.dumps(data))
return response | bc5f4e225bdef907ee794e3f8f95ac3d78677046 | 27,844 |
from _pytest.logging import LogCaptureFixture
import json
from typing import Any
import logging
def test_wild_dlq_error(mock_handler: MagicMock, mock_rsmq: MagicMock, caplog: LogCaptureFixture) -> None:
"""test error level logs when message fails to successfully reach DLQ"""
mock_handler.return_value = False, False
mock_rsmq.return_value.sendMessage.return_value.execute = lambda: False
m = {"uuid": str(uuid4())}
rsmq_msg = {
"id": "rsmq_id",
"message": json.dumps(m),
}
mock_rsmq.return_value.receiveMessage.return_value.exceptions.return_value.execute = lambda: rsmq_msg
cfg = {
"comp_1": {"value": "random_val", "profile": "rsmq"},
"dlq": {"profile": "rsmq-dlq", "value": "my_dlq"},
}
eng = Engine(input_queue="comp_1", dead_letter_queue="dlq", queue_config=cfg, metrics_port=None)
@eng.stream_app
def fun(msg: Any) -> bool: # pylint: disable=W0613
return True
with caplog.at_level(logging.ERROR):
fun()
assert "failed producing message to dlq" in caplog.text
eng.shutdown() | d00c32e27833975d819b333c723734d24b3ead65 | 27,845 |
from typing import VT
from typing import Optional
def teleport_reduce(g: BaseGraph[VT,ET], quiet:bool=True, stats:Optional[Stats]=None) -> BaseGraph[VT,ET]:
"""This simplification procedure runs :func:`full_reduce` in a way
that does not change the graph structure of the resulting diagram.
The only thing that is different in the output graph are the location and value of the phases."""
s = Simplifier(g)
s.full_reduce(quiet=quiet, stats=stats)
return s.mastergraph | 38474e11094a21e581a18591b7649fdbdd977d72 | 27,846 |
def map_coords_to_scaled(coords, orig_size, new_size):
"""
maps coordinate indices relative to the original 3-D image to indices corresponding to the
re-scaled 3-D image, given the coordinate indices and the shapes of the original
and "new" scaled images. Returns integer indices of the voxel that contains the center of
the transformed coordinate location.
"""
return tuple(
[int(i) for i in map_coords_to_scaled_float(coords, orig_size, new_size)]
) | 74236074a0c6c5afbb56bd5ec75caaf517730040 | 27,847 |
def _PromptToUpdate(path_update, completion_update):
"""Prompt the user to update path or command completion if unspecified.
Args:
path_update: bool, Value of the --update-path arg.
completion_update: bool, Value of the --command-completion arg.
Returns:
(path_update, completion_update) (bool, bool) Whether to update path and
enable completion, respectively, after prompting the user.
"""
# If both were specified, no need to prompt.
if path_update is not None and completion_update is not None:
return path_update, completion_update
# Ask the user only one question to see if they want to do any unspecified
# updates.
actions = []
if path_update is None:
actions.append(_PATH_PROMPT)
if completion_update is None:
actions.append(_COMPLETION_PROMPT)
prompt = '\nModify profile to {}?'.format(' and '.join(actions))
response = console_io.PromptContinue(prompt)
# Update unspecified values to equal user response.
path_update = response if path_update is None else path_update
completion_update = (response if completion_update is None
else completion_update)
return path_update, completion_update | 2759e9c42c702a69fa617fc3302cb3c850afc54a | 27,848 |
import os
def answer_cells_of_dir(submission_dir):
"""
get the contents of all solution cells
of all ipynb files in a directory
"""
cells = {}
files = os.listdir(submission_dir)
for filename in files:
if not filename.endswith(".ipynb"):
continue
noext = filename[:-6]
ipynb = "%s/%s" % (submission_dir, filename)
cells[noext] = answer_cells_of_nb(ipynb)
return cells | ad62795c5037d74aa8a9f0b125f1ad97cd0ad600 | 27,849 |
def _get_static_covariate_df(trajectories):
"""The (static) covariate matrix."""
raw_v_df = (
trajectories.static_covariates.reset_coords(drop=True).transpose(
'location', 'static_covariate').to_pandas())
# This can then be used with, e.g. patsy.
# expanded_v_df = patsy(raw_v_df, ...patsy details...)
# Optionally it can be converted back to xa using.
# expanded_v_xa = xarray.DataArray(expanded_v_df)
# for now...
v_df = raw_v_df
return v_df | 15c8f367452fc5007ad93fd86e04cfea07e96982 | 27,850 |
import json
def answer_cells_of_nb(a_ipynb):
"""
get the contents of all answer cells (having grade_id)
in an a_ipynb file
"""
cells = {}
with open(a_ipynb) as ipynb_fp:
content = json.load(ipynb_fp)
for cell in content["cells"]:
meta = cell["metadata"]
nbg = meta.get("nbgrader")
if nbg is None or not nbg["solution"]:
continue
assert("grade_id" in nbg), (a_ipynb, cell)
prob_name = nbg["grade_id"] # like a1-1-1
source = cell["source"]
outputs = cell.get("outputs", [])
assert(prob_name not in cells), prob_name
cells[prob_name] = source, outputs
return cells | 3b011d48a8ccfa13d462cccf1b0a58440231a1ce | 27,851 |
def _transpose_augment(img_arr):
""" 对称扩增 """
img = Image.fromarray(img_arr, "L")
return [np.asarray(img.transpose(Image.FLIP_LEFT_RIGHT))] | 096c8db4c008c78a5f22bffeea8bb64fb0a4de09 | 27,852 |
def open_sciobj_file_by_path(abs_path, write=False):
"""Open a SciObj file for read or write. If opened for write, create any missing
directories. For a SciObj stored in the default SciObj store, the path includes the
PID hash based directory levels.
This is the only method in GMN that opens SciObj files, so can be modified to
customize the SciObj storage locations and can be mocked for testing.
Note that when a SciObj is created by a client via MNStorage.create(), Django
streams the SciObj bytes to a temporary file or memory location as set by
``FILE_UPLOAD_TEMP_DIR`` and related settings.
"""
if write:
d1_common.utils.filesystem.create_missing_directories_for_file(abs_path)
return open(abs_path, "wb" if write else "rb") | 8c5852de544be21c61636df03ddb681a6c084310 | 27,853 |
def mps_to_kmh(speed_in_mps):
"""Convert from kilometers per hour to meters per second
Aguments:
speed_in_mps: a speed to convert
Returns:
speed_in_kmh: a speed in m/s
"""
return speed_in_mps / 1000.0 * 3600.0 | 5a37cbca17f8262043b7e1cb2b193b4c9d146766 | 27,854 |
import logging
def tokenize_and_remove_stopwords(txt,additional_stopwords):
"""
Runs tokenization and removes stop words on the specified text
Parameters
-----------
txt: text to process
additional_stopwords: path to file containing possible additional stopwords on each line
Returns
-------
Processed list of words where each word is now stemmed
"""
logging.info("Removing stopwords")
words_filter=load_stopwords(additional_stopwords)
tokens=word_tokenize(txt)
#make everything lowercase
tokens=lowercase_words(tokens)
tokens_without_sw=[]
tokens_without_sw = [word for word in tokens if word not in words_filter]
return tokens_without_sw | a118747bbd030e37ee0ed5f421f56390e1bd5b38 | 27,855 |
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add the Wiser System Switch entities."""
data = hass.data[DOMAIN][config_entry.entry_id][DATA] # Get Handler
# Add Defined Switches
wiser_switches = []
for switch in WISER_SWITCHES:
if switch["type"] == "room":
for room in [room for room in data.wiserhub.rooms.all if len(room.devices) > 0]:
wiser_switches.append(
WiserRoomSwitch(data, switch["name"], switch["key"], switch["icon"], room.id )
)
elif switch["type"] == "system":
wiser_switches.append(
WiserSystemSwitch(data, switch["name"], switch["key"], switch["icon"])
)
elif switch["type"] == "device":
for device in [device for device in data.wiserhub.devices.all if hasattr(device, switch["key"])]:
wiser_switches.append(
WiserDeviceSwitch(data, switch["name"], switch["key"], switch["icon"], device.id )
)
async_add_entities(wiser_switches)
# Add SmartPlugs (if any)
wiser_smart_plugs = [
WiserSmartPlug(data, plug.id, "Wiser {}".format(plug.name))
for plug in data.wiserhub.devices.smartplugs.all
]
async_add_entities(wiser_smart_plugs)
return True | 5a83f0888fadab08c573378dce4167f2d01478c1 | 27,856 |
def get_keep_dice_check(input_prompt):
"""
Enables returning a yes or no response to an input prompt.
:param input_prompt: String yes no question.
"""
return pyip.inputYesNo(prompt=input_prompt) | c7b8a1392c3e17a1acba615079848245a1b6e167 | 27,857 |
import sh
def get_pending_jobs(sort=True):
"""Obtains the list of currently pending (queued) jobs for the user."""
username = getusername()
# see squeue man page for status code (%t specifier)
listjob = sh.pipe_out(("squeue", "-u", username, "--noheader", "--format=%i %t"), split=True)
rslt = []
# treat one of these statuses as "running"
qstatus = (
'PD', # pending
'S', # suspended
)
for job1 in listjob:
R = job1.split()
if R[1] in qstatus:
rslt.append(R[0])
if sort:
rslt.sort()
return rslt | 5b03917885f8a09463c65a456c251cf753abdae2 | 27,858 |
def getOverlapRange(rangeA, rangeB):
"""
Calculate the overlapping range between rangeA and rangeB.
Args:
rangeA (list, tuple):
List or tuple containing start and end value in float.
rangeB (list, tuple):
List or tuple containing start and end value in float.
Returns:
(list):
List containing the overlapping range between rangeA and rangeB.
"""
assert isOverlap(rangeA, rangeB), f"There is no overlap between rangeA:{rangeA} and rangeB:{rangeB}"
return [max(rangeA[0], rangeB[0]), min(rangeA[1], rangeB[1])] | 5f3bd22f5ec317d2bde87c92b027f658a80431fb | 27,859 |
def multiply_values(dictionary: dict, num: int) -> dict:
"""Multiplies each value in `dictionary` by `num`
Args:
dictionary (dict): subject dictionary
num (int): multiplier
Returns:
dict: mapping of keys to values multiplied by multiplier
"""
return (
{key: value * num for key, value in dictionary.items()}
if dictionary is not None
else {}
) | 16eb87d60da64d648113858ba5cb4308137e0a14 | 27,860 |
def send_alarm(address, email_type, template_data={}):
"""
Send an email message to the given email address immediately, bypassing any queues or database system.
:param address: The email address to send this message to.
:param email_type: str defining this email template e.g EMAIL_WELCOME. Defined in email_types.json
:param template_data: dict, values that will be merged into the template.
"""
email_message = EmailMessage.from_email_type(address, email_type, template_data)
_DISPATCHER.send_email_alarm(email_message)
return email_message | 2564a3d5c27f092e3e940d907b2cc2bc986257c2 | 27,861 |
def serialize_curve_point(p: Point) -> bytes:
"""
Serialize an elliptic curve point ``p`` in compressed form as described in
SEC1v2 (https://secg.org/sec1-v2.pdf) section 2.3.3.
Corresponds directly to the "ser_P(P)" function in BIP32
(https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#conventions).
:param p: The elliptic curve point to be serialized.
:return: A byte sequence containing the serialization of ``p``.
"""
x, y = p.x(), p.y()
if y & 1:
return b'\x03' + serialize_uint256(x)
else:
return b'\x02' + serialize_uint256(x) | 9e002df4b18245cb4ce54f1aede5687279aae5bb | 27,862 |
def gcom_so_config(revision=None):
"""
Create a shared object for linking.
"""
config = BuildConfig(
project_label=f'gcom shared library {revision}',
source_root=gcom_grab_config(revision=revision).source_root,
steps=[
*common_build_steps(fpic=True),
LinkSharedObject(linker='mpifort', output_fpath='$output/libgcom.so'),
]
)
return config | ddcd625cd1393b38e1871f46a2f1b8738a904f1f | 27,863 |
def func(command, description, link, params_string, returns="On success, the sent Message is returned.", return_type="Message"):
"""
Live template for pycharm:
y = func(command="$cmd$", description="$desc$", link="$lnk$", params_string="$first_param$", returns="$returns$", return_type="$returntype$")
"""
description_with_tabs = "\t\t" + description.strip().replace("\n", "\n\t\t")
param_list_args = []
param_list_kwargs = []
args = []
args2 = []
kwargs = []
kwargs2 = []
asserts = []
str_args = ""
str_kwargs = ""
param_strings = params_string.split("\n")
for param in param_strings:
assert_commands, assert_comments, param_name, param_type, table, non_buildin_type, param_name_input = parse_param_types(param)
param_required = table[2].strip()
param_needed = None
if param_required == "Yes":
param_needed = True
elif param_required == "Optional":
param_needed = False
param_description = table[3].strip()
if param_needed:
param_list_args.append(Param(param_name, param_type, param_needed, param_description))
args.append(param_name)
args2.append("{param_name}={param_name}".format(param_name=param_name))
str_args += '\t\t:param {key}: {descr}\n\t\t:type {key}: {type}\n\n'.format(key=param_name, descr=param_description, type=param_type)
if assert_commands:
asserts.append("assert({var} is not None)".format(var=param_name))
asserts.append("assert({ass})".format(ass=" or ".join(assert_commands)) + ((" # {comment}".format(comment=", ".join(assert_comments))) if assert_comments else ""))
else:
param_list_kwargs.append(Param(param_name, param_type, param_needed, param_description))
kwargs.append("{param_name}=None".format(param_name=param_name))
kwargs2.append("{param_name}={param_name}".format(param_name=param_name))
str_kwargs += '\t\t:keyword {key}: {descr}\n\t\t:type {key}: {type}\n\n'.format(key=param_name, descr=param_description, type=param_type)
if assert_commands:
asserts.append("assert({var} is None or {ass})".format(var=param_name, ass=" or ".join(assert_commands)) + ((" # {comment}".format(comment=", ".join(assert_comments))) if assert_comments else ""))
args.extend(kwargs)
args2.extend(kwargs2)
asserts_string = "\n\t\t" + "\n\t\t".join(asserts)
text = ""
if len(str_args)>0:
text += '\n\t\tParameters:\n\n'
text += str_args
if len(str_kwargs)>0:
text += '\n\t\tOptional keyword parameters:\n\n'
text += str_kwargs
do_args = ['"%s"' % command]
do_args.extend(args2)
result = '\tdef {funcname}(self, {params}):\n\t\t"""\n{description_with_tabs}\n\n\t\t{link}\n\n' \
'{paramshit}\n' \
'\t\tReturns:\n\n\t\t:return: {returns}\n\t\t:rtype: {return_type}\n\t\t"""{asserts_with_tabs}\n\t\treturn self.do({do_args})\n\t# end def {funcname}'.format(
funcname=convert_to_underscore(command),
params=", ".join(args), description_with_tabs=description_with_tabs, link=link,
returns=returns, return_type=return_type, command=command, do_args=", ".join(do_args),
asserts_with_tabs=asserts_string,
paramshit = text
)
result = result.replace("\t", " ")
return result | 4c058afdb03b9d85a32e654f83beec95b72785ee | 27,864 |
def get_basic_details(args, item):
"""
:param args: {
"item_code": "",
"warehouse": None,
"doctype": "",
"name": "",
"project": "",
warehouse: "",
update_stock: "",
project: "",
qty: "",
stock_qty: ""
}
:param item: `item_code` of Item object
:return: frappe._dict
"""
if not item:
item = frappe.get_doc("Item", args.get("item_code"))
warehouse = item.default_warehouse or args.warehouse
# material_request_type = ''
# if args.get('doctype') == "Material Request":
# material_request_type = frappe.db.get_value('Material Request',
# args.get('name'), 'material_request_type')
out = frappe._dict({
"item_code": item.name,
"item_name": item.item_name,
"description": cstr(item.description).strip(),
"image": cstr(item.image).strip(),
"warehouse": warehouse,
# "min_order_qty": flt(item.min_order_qty) if args.doctype == "Material Request" else "",
"qty": args.qty or 1.0,
"stock_qty": args.qty or 1.0
})
for fieldname in ("item_name", "item_group"):
out[fieldname] = item.get(fieldname)
return out | 6ddd7f3249d55a073d57c466e8994c1ccf8b1aa7 | 27,865 |
def validate_standard_json(json_to_test: dict) -> bool:
""" validate fixed json against schema """
valid_json_flag = False
schema_to_use = get_standard_json_schema()
valid_json_flag = validate_json(json_to_test, schema_to_use, True)
return valid_json_flag | 97004d7f5e4758dedeaa4ecaf0ee4a67955336c9 | 27,866 |
from typing import Any
def put_observation_field_values(
observation_id: int,
observation_field_id: int,
value: Any,
access_token: str,
**kwargs,
) -> JsonResponse:
# TODO: Also implement a put_or_update_observation_field_values() that deletes then recreates the field_value?
# TODO: Return some meaningful exception if it fails because the field is already set.
# TODO: It appears pushing the same value/pair twice in a row (but deleting it meanwhile via the UI)...
# TODO: ...triggers an error 404 the second time (report to iNaturalist?)
"""Set an observation field (value) on an observation.
Will fail if this observation field is already set for this observation.
To find an `observation_field_id`, either user :py:func:`.get_observation_fields` or search
on iNaturalist: https://www.inaturalist.org/observation_fields
**API reference:** https://www.inaturalist.org/pages/api+reference#put-observation_field_values-id
Example:
>>> # First find an observation field by name, if the ID is unknown
>>> response = get_observation_fields('vespawatch_id')
>>> observation_field_id = response[0]['id']
>>>
>>> put_observation_field_values(
>>> observation_id=7345179,
>>> observation_field_id=observation_field_id,
>>> value=250,
>>> access_token=token,
>>> )
.. admonition:: Example Response
:class: toggle
.. literalinclude:: ../sample_data/put_observation_field_value_result.json
:language: javascript
Args:
observation_id: ID of the observation receiving this observation field value
observation_field_id: ID of the observation field for this observation field value
value: Value for the observation field
access_token: The access token, as returned by :func:`get_access_token()`
Returns:
The newly updated field value record
"""
json_body = {
'observation_field_value': {
'observation_id': observation_id,
'observation_field_id': observation_field_id,
'value': value,
}
}
response = put(
f'{API_V0_BASE_URL}/observation_field_values/{observation_field_id}',
access_token=access_token,
json=json_body,
**kwargs,
)
return response.json() | 644c69599816cb4215990dffb13f0e6843ab6da1 | 27,867 |
from typing import Any
def desg_to_prefix(desg: str) -> Any:
"""Convert small body designation to file prefix."""
return (desg.replace('/', '').replace(' ', '')
.replace('(', '_').replace(')', '_')) | badde1e3ec9c3f669c7cce8aa55646b15cc5f4c8 | 27,868 |
def get_logger(name=None, log=False, level=INFO, path=None):
"""
Returns the appropriate logger depending on the passed-in arguments.
This is particularly useful in conjunction with command-line arguments when
you won't know for sure what kind of logger the program will need.
:param name: The name of the file to log into.
:param log: Whether to actually commit information to a file.
:param level: The verbosity level. Only events logged at or above this level
will be displayed.
:param path: The folder to put the log file into.
"""
# Are we writing the output to disk? Pick the type of logger based on that.
if log:
return FileLogger(name=name, level=level, path=path)
else:
return StreamLogger(name=name, level=level) | be321f4704e98db7a8f4d6033004194104bbee64 | 27,869 |
def get_url_for_packages(provenance):
"""Return url for every package (versioned) as specified in provenance
It traverses passes provenance ...
Examples
--------
>>> get_url_for_packages({'cmtk' : '3.2.2-1.4build1'})
{'cmtk': 'http://example.com/cmtk_3.2.2-1.4build1.deb'}
Parameters
----------
provenance : TODO
Provenance read from somewhere
Returns
-------
dict
package: url for every package found in provenance
"""
lgr.debug('Finding versioned urls for following provenance info: %s',
str(provenance))
return {
package: 'http://example.com/%s_%s.deb' % (package, version) # dpm.get_ubuntu_binary_pkg_url(package[0], package[1])
for package, version in provenance.items() # get_packages()
}
# orchestrator.add_task('apt', dict(name='%s=%s' % package)) | f89678b9e263bf7053a4d0c9b24e1a4f8d46d180 | 27,870 |
def stop_job(job_name: Text,
execution_id: Text) -> JobInfo:
"""
Stop a job defined in the ai flow workflow.
:param job_name: The job name which task defined in workflow.
:param execution_id: The ai flow workflow execution identify.
:return: The result of the action.
"""
return proto_to_job(get_ai_flow_client().stop_job(job_name, execution_id)) | 0e67a061cbb730ffb6ebe57b11d32a3c110566dc | 27,871 |
def find_user():
"""
Determines current user using the username value of the current session
user and returns the current user as a dict.
"""
current_user = mongo.db.users.find_one({"username": session["user"]})
return current_user | 249836f8f1a23ff34bc55f112db2f4670672a7a1 | 27,872 |
def field2nullable(field, **kwargs):
"""Return the dictionary of swagger field attributes for a nullable field.
:param Field field: A marshmallow field.
:rtype: dict
"""
attributes = {}
if field.allow_none:
omv = kwargs['openapi_major_version']
attributes['x-nullable' if omv < 3 else 'nullable'] = True
return attributes | dd5d4cd63aeede4ef9356baa9fe9a48bd5f87841 | 27,873 |
def zero_expand3d(inputs, stride=1):
"""Expand the inputs by zeros
explain the expand operation:
given stride = 1
[[[1, 2] --> [[[1, 0, 2]
[3, 4]] [0, 0, 0]
[3, 0, 4]]
[[5, 6]
[7, 8]]] [[0, 0, 0]
[0, 0, 0]
[0, 0, 0]]
[[5, 0, 6]
[0, 0, 0]
[7, 0, 8]]]
Args:
-----------------------------
inputs : tvm.te.tensor.Tensor
shape [batch, channel, depth, height, width]
stride: (optional:0) int or tuple
expected: (d_stride, h_stride, w_stride)
-----------------------------
Returns:
-----------------------------
tvm.te.tensor.Tensor
shape [batch, channel, (depth - 1) * d_stride + 1, (height - 1) * h_stride + 1, (width - 1) * w_stride + 1]
-----------------------------
"""
stride = (stride, stride, stride) if isinstance(stride, (int, tvm.tir.IntImm)) else stride
assert_print(isinstance(stride, tuple), "type(stride)={}".format(type(stride)))
assert_print(len(stride) == 3)
expand_zero = tvm.tir.expr.const(0, inputs.dtype)
batch_size, in_channel, depth, height, width = inputs.shape
out_depth = (depth - 1) * stride[0] + 1
out_height = (height - 1) * stride[1] + 1
out_width = (width - 1) * stride[2] + 1
return tvm.te.compute(
(batch_size, in_channel, out_depth, out_height, out_width),
lambda b, c, d ,h, w: tvm.te.if_then_else(
tvm.te.all(
d % stride[0] == 0,
h % stride[1] == 0,
w % stride[2] == 0
),
inputs[b, c, d // stride[0], h // stride[1], w // stride[2]],
expand_zero
)
) | 4944b3f5f42811955b76fa46082dc5617fb648b7 | 27,874 |
import json
def _load_setup_cfg():
"""Load the setup configuration from the 'setup.json' file."""
try:
with open(ROOT / 'setup.json') as setup_json_file:
return json.load(setup_json_file)
except json.decoder.JSONDecodeError as error: # pylint: disable=no-member
raise DependencySpecificationError("Error while parsing 'setup.json' file: {}".format(error))
except FileNotFoundError:
raise DependencySpecificationError("The 'setup.json' file is missing!") | b3e26e25f18098a51210221299f3a1066c92e5db | 27,875 |
import json
def toJSON(obj, opt_pretty=False, for_cloud_api=True):
"""Serialize an object to a JSON string appropriate for API calls.
Args:
obj: The object to serialize.
opt_pretty: True to pretty-print the object.
for_cloud_api: Whether the encoding should be done for the Cloud API or the
legacy API.
Returns:
A JSON string representing the input.
"""
serializer = Serializer(not opt_pretty, for_cloud_api=for_cloud_api)
encoded = serializer._encode(obj) # pylint: disable=protected-access
return json.dumps(encoded, indent=2 if opt_pretty else None) | 3f3d79d0b3b200ed3a05b55ea671eccae99543ce | 27,876 |
import urllib
def load_config_file_koe(filename):
""" Loads in a config file for KOE to run
Args:
filename: Filename (can be absolute or relative path, or a URL) to read config file from.
Returns:
dict: Configuration file as a dict object.
"""
config_values = {}
# First try to open things locally. If that doesn't work try it as a URL
try:
config_lines = open(filename, "r").readlines()
except FileNotFoundError:
try:
r = urllib.request.urlopen(filename)
config_lines = []
for line in r:
config_lines.append(line.decode("utf-8"))
except:
print("Failed to get any file")
return None
for line in config_lines:
line = line.strip()
if len(line) == 0 or line[0] == "#":
continue
splits = line.split("=")
config_values[splits[0].strip()] = splits[1].strip()
for n in config_values:
value = config_values[n]
if value.lower() == "true":
config_values[n] = True
continue
elif value.lower() == "false":
config_values[n] = False
continue
config_values["SIZE"] = int(config_values["SIZE"])
config_values["CORES"] = int(config_values["CORES"])
config_values["ZEROPOINT"] = int(config_values["ZEROPOINT"])
config_values["ARC_CONV"] = float(config_values["ARC_CONV"])
config_values["LINEAR_STEP"] = float(config_values["LINEAR_STEP"])
config_values["ALARM_TIME"] = int(config_values["ALARM_TIME"])
config_values["BOX_SIZE"] = int(config_values["BOX_SIZE"])
config_values["FILTER_SIZE"] = int(config_values["FILTER_SIZE"])
value_string = config_values["MASK_PARAMS"].split(",")
config_values["MASK_PARAMS"] = [float(value_string[0]), float(value_string[1]), int(value_string[2])]
return config_values | e04b162a396f5e3e4747855f7c69b9cad017bb39 | 27,877 |
def select_by_type(transcripts, log):
"""Filter transcripts depending on different type"""
# Difference types: UTR5_number and UTR5_boundary
candidates, dtype, dcrit = analyse_difference_type_utr5_number_or_boundary(transcripts, log)
if candidates is not None:
return candidates, dtype, dcrit
# Difference type: UTR_ends
candidates, dtype, dcrit = analyse_difference_type_UTR_ends(transcripts, log)
if candidates is not None:
return candidates, dtype, dcrit
# Difference type: UTR3
return analyse_difference_type_UTR3(transcripts, log) | 2b1b7311459e7a305a2cbc64d295114f5bca3fc3 | 27,878 |
def lorentzian_distance(x, y):
"""Calculates the Lorentzian Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Lorentzian Distance between x and y.
"""
dist = np.log(1 + np.fabs(x - y))
return np.sum(dist) | d11cc411aa22aab14b1b3ee2dd606d5a8efb6fe7 | 27,879 |
def check_database_status(database_name, env):
"""This function looks for a DatabaseCreate task and returns a http
response or the Database itself depeding on the context. If the
DatabaseCreate task is still running of failed, a http response is
returned, otherwise this functions tries to retrieve the Database with
the get_database function.
Parameters:
database_name (str): Name of the database
env (str): It represents the database environment (prod or dev)
Returns:
Database or Response: Database or Rest Framework Response object
"""
database_create = last_database_create(database_name, env)
LOG.info(
"Task {}".format(getattr(database_create, 'task', 'No tasks found'))
)
if database_create:
if database_create.is_running:
msg = "Database {} in env {} is beeing created.".format(
database_name, env)
return log_and_response(
msg=msg, http_status=status.HTTP_412_PRECONDITION_FAILED)
elif database_create.is_status_error:
msg = ("A error ocurred creating database {} in env {}. Check "
"error on task history in https://dbaas.globoi.com").format(
database_name, env)
return log_and_response(
msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
try:
database = get_database(database_name, env)
except IndexError as e:
msg = "Database {} does not exist in env {}.".format(
database_name, env)
return log_and_response(
msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except MultipleObjectsReturned as e:
msg = "There are multiple databases called {} in {}.".format(
database_name, env)
return log_and_response(
msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
msg = "Something ocurred on dbaas, please get in touch with your DBA."
return log_and_response(
msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if not(database and database.status):
msg = "Database {} is not Alive.".format(database_name)
return log_and_response(
msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return database | 17d9f616d20638c4624e5b35a042d9265ccf625f | 27,880 |
def get_flat_schema(schema_name=None):
"""Flatten the specified data model schema, defaulting to the core schema,
useful for retrieving FITS keywords or valid value lists.
"""
return _schema_to_flat(_load_schema(schema_name)) | 6f43a095015c25bdace05cf473f252ac699b33f9 | 27,881 |
def repeat3(img):
"""
Repeat an array 3 times along its last axis
:param img: A numpy.ndarray
:return: A numpy.ndarray with a shape of: img.shape + (3,)
"""
return np.repeat(img[..., np.newaxis], 3, axis=-1) | eddd3469d8d02457b87ef00c13ef7213d3a5568b | 27,882 |
import time
import tqdm
def encode_strategies(strategies, batch_size=stg.JOBLIB_BATCH_SIZE,
parallel=True):
"""
Encode strategies
Parameters
----------
strategies : Strategies array
Array of strategies to be encoded.
Returns
-------
numpy array
Encodings for each strategy in strategies.
Strategies array
Array of unique strategies.
"""
stg.logger.info("Encoding strategies")
N = len(strategies)
stg.logger.info("Getting unique set of strategies")
start_time = time()
unique = unique_strategies(strategies)
end_time = time()
stg.logger.info("Extraction time %.3f sec" % (end_time - start_time))
n_unique_strategies = len(unique)
stg.logger.info("Found %d unique strategies" % n_unique_strategies)
# Map strategies to number
n_jobs = u.get_n_processes() if parallel else 1
stg.logger.info("Assign samples to unique strategies (n_jobs = %d)"
% n_jobs)
results = Parallel(n_jobs=n_jobs, batch_size=batch_size)(delayed(assign_to_unique_strategy)(s, unique) for s in tqdm(strategies))
y = np.array(results)
return y, unique | c77fcd28c69b447e43fc9eef359b32426771d6bd | 27,883 |
def generate_authenticator(data, authenticator_key):
"""
This function will generate an authenticator for the data (provides authentication and integrity).
:param data: The data over which to generate the authenticator.
:type data: :class:`str`
:param authenticator_key: The secret key to be used by the function, in byte string.
You can use :func:`~securitylib.crypto.generate_authenticator_key` to generate it.
:type authenticator_key: :class:`str`
:returns: :class:`str` -- The generated authenticator in byte string.
"""
validate_authenticator_key(authenticator_key)
return advanced_crypto.generate_authenticator(data, authenticator_key) | 8203c9f487d2acf6a8a0bbd907bc1f8cc9dc026c | 27,884 |
import torch
import logging
def detection_target_layer(proposals, gt_class_ids, gt_boxes, gt_masks):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,
(dy, dx, log(dh), log(dw), class_id)]
Class-specific bbox refinments.
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)
Masks cropped to bbox boundaries and resized to neural
network output size.
"""
no_crowd_bool = _handle_crowds(proposals, gt_class_ids,
gt_boxes, gt_masks)
# Compute overlaps matrix [nb_batches, proposals, gt_boxes]
overlaps = _bbox_overlaps(proposals, gt_boxes)
# Determine positive and negative ROIs
roi_iou_max = torch.max(overlaps, dim=1)[0]
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = roi_iou_max >= 0.5
# Subsample ROIs. Aim for ROI_POSITIVE_RATIO positive
# Positive ROIs
if torch.nonzero(positive_roi_bool).nelement() != 0:
positive_indices = torch.nonzero(positive_roi_bool)[:, 0]
positive_count = int(Config.PROPOSALS.TRAIN_ROIS_PER_IMAGE *
Config.PROPOSALS.ROI_POSITIVE_RATIO)
rand_idx = torch.randperm(positive_indices.shape[0])
rand_idx = rand_idx[:positive_count].to(Config.DEVICE)
positive_indices = positive_indices[rand_idx]
positive_count = positive_indices.shape[0]
positive_rois = proposals[positive_indices, :]
# Assign positive ROIs to GT boxes.
positive_overlaps = overlaps[positive_indices, :]
roi_gt_box_assignment = torch.max(positive_overlaps, dim=1)[1]
roi_gt_boxes = gt_boxes[roi_gt_box_assignment, :]
roi_gt_class_ids = gt_class_ids[roi_gt_box_assignment]
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement(positive_rois,
roi_gt_boxes)
# Assign positive ROIs to GT masks
roi_masks = gt_masks[roi_gt_box_assignment, :, :]
# Compute mask targets
boxes = positive_rois
if Config.MINI_MASK.USE:
boxes = utils.to_mini_mask(positive_rois, roi_gt_boxes)
box_ids = (torch.arange(roi_masks.shape[0]).int()
.to(Config.DEVICE))
masks = CropAndResizeFunction(
Config.HEADS.MASK.SHAPE[0],
Config.HEADS.MASK.SHAPE[1],
0)(roi_masks.unsqueeze(1), boxes, box_ids)
masks = masks.squeeze(1)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = torch.round(masks)
else:
positive_count = 0
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_roi_bool = roi_iou_max < 0.5
negative_roi_bool = negative_roi_bool & no_crowd_bool
logging.debug(f"pos: {positive_roi_bool.sum()}, "
f"neg: {negative_roi_bool.sum()}")
# Negative ROIs. Add enough to maintain positive:negative ratio.
if torch.nonzero(negative_roi_bool).nelement() != 0 and positive_count > 0:
negative_indices = torch.nonzero(negative_roi_bool)[:, 0]
r = 1.0 / Config.PROPOSALS.ROI_POSITIVE_RATIO
negative_count = int(r * positive_count - positive_count)
rand_idx = torch.randperm(negative_indices.shape[0])
rand_idx = rand_idx[:negative_count].to(Config.DEVICE)
negative_indices = negative_indices[rand_idx]
negative_count = negative_indices.shape[0]
negative_rois = proposals[negative_indices, :]
else:
negative_count = 0
logging.debug(f"positive_count: {positive_count}, "
f"negative_count: {negative_count}")
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
if positive_count > 0 and negative_count > 0:
rois = torch.cat((positive_rois, negative_rois), dim=0)
mrcnn_target = (MRCNNTarget(Config.HEADS.MASK.SHAPE,
roi_gt_class_ids, deltas, masks)
.fill_zeros(negative_count))
elif positive_count > 0:
rois = positive_rois
mrcnn_target = MRCNNTarget(Config.HEADS.MASK.SHAPE,
roi_gt_class_ids, deltas, masks)
else:
rois = torch.FloatTensor().to(Config.DEVICE)
mrcnn_target = MRCNNTarget(Config.HEADS.MASK.SHAPE)
return rois, mrcnn_target.to(Config.DEVICE) | 539578c3be6a9872d812ccdb236c0cc43d7efe85 | 27,885 |
def multi_recall(pred_y, true_y, labels):
"""
Calculate the recall of multi classification
:param pred_y: predict result
:param true_y: true result
:param labels: label list
:return:
"""
if isinstance(pred_y[0], list):
pred_y = [item[0] for item in pred_y]
recalls = [binary_recall(pred_y, true_y, label) for label in labels]
rec = mean(recalls)
return rec | a11984b6c509b9b95d65ad148ca712099ff91a66 | 27,886 |
def is_safe_range(expression):
"""
Return true if an expression is safe range.
This function receives an expression in safe range
normal form and returns true if all its free variables
are range restricted.
"""
try:
return extract_logic_free_variables(
expression
) == range_restricted_variables(expression)
except NeuroLangException:
return False | e6a23b250f936cc78918ad15eb1122a419a8b872 | 27,887 |
def star_marker_level(prev, curr):
"""Allow markers to be on the same level as a preceding star"""
return (prev.is_stars() and not curr.is_stars() and
prev.depth == curr.depth) | 3311c452c8f138cd8fa75b67109e75a9bf30902c | 27,888 |
from typing import Optional
from typing import Sequence
def get_mail_addresses(ids: Optional[Sequence[str]] = None,
key_word: Optional[str] = None,
output_file: Optional[str] = None,
sendtype: Optional[str] = None,
status: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMailAddressesResult:
"""
This data source provides the Direct Mail Mail Addresses of the current Alibaba Cloud user.
> **NOTE:** Available in v1.134.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
ids = alicloud.directmail.get_mail_addresses(ids=["example_id"])
pulumi.export("directMailMailAddressId1", ids.addresses[0].id)
```
:param Sequence[str] ids: A list of Mail Address IDs.
:param str key_word: The key word about account email address.
:param str sendtype: Account type.
:param str status: Account Status. Valid values: `0`, `1`. Freeze: 1, normal: 0.
"""
__args__ = dict()
__args__['ids'] = ids
__args__['keyWord'] = key_word
__args__['outputFile'] = output_file
__args__['sendtype'] = sendtype
__args__['status'] = status
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('alicloud:directmail/getMailAddresses:getMailAddresses', __args__, opts=opts, typ=GetMailAddressesResult).value
return AwaitableGetMailAddressesResult(
addresses=__ret__.addresses,
id=__ret__.id,
ids=__ret__.ids,
key_word=__ret__.key_word,
output_file=__ret__.output_file,
sendtype=__ret__.sendtype,
status=__ret__.status) | 52590cc1c12788e47aa81adc1e9f51bbd9092f31 | 27,889 |
import torch
def load_checkpoint(
file,
model: torch.nn.Module,
optimizer: torch.optim.Optimizer = None,
lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None,
strict: bool = True,
):
"""Loads training states from a checkpoint file.
Args:
file: a file-like object (has to implement read(), readline(), tell(), and seek()), or a string or os.PathLike
object containing a file name.
model (:class:`torch.nn.Module`): Model to load saved weights and buffers.
optimizer (Union[:class:`torch.optim.Optimizer`, :class:`colossalai.nn.optimizer`]): Optimizer to recuperate.
lr_scheduler (:class:`torch.optim.lr_scheduler._LRScheduler`, optional):
lr_scheduler to recuperate, defaults to None.
strict (bool, optional): Whether to strictly enforce that the keys in :attr:`state_dict`
of the checkpoint match the names of parameters and buffers in model, defaults to True.
Returns:
int: The saved epoch number.
Raises:
RuntimeError: Raise error if the model/optimizer cannot successfully be recuperated
"""
state_dict = (
torch.load(file, map_location=torch.device("cpu")) if gpc.get_local_rank(ParallelMode.MODEL) == 0 else None
)
# model states
model_state = state_dict.pop("model") if state_dict is not None else dict()
# pipeline
if is_using_pp():
model_state = partition_pipeline_parallel_state_dict(model, model_state)
try:
model.load_state_dict(model_state, strict=strict)
except RuntimeError as e:
error_msgs = str(e)
if error_msgs.startswith("Error(s) in loading state_dict for "):
error_msgs = error_msgs.split("\n\t")[1:]
dst_rank = gpc.get_ranks_in_group(ParallelMode.MODEL)[0]
all_error_msgs = [None for _ in range(gpc.get_world_size(ParallelMode.MODEL))]
dist.gather_object(error_msgs, all_error_msgs, dst=dst_rank, group=gpc.get_cpu_group(ParallelMode.MODEL))
if gpc.get_global_rank() == 0:
all_error_msgs = list(chain.from_iterable(all_error_msgs))
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(
model.__class__.__name__, "\n\t".join(all_error_msgs)
)
)
else:
raise e
# broadcast the rest states
state_dict = broadcast_state_dict(state_dict, ParallelMode.MODEL)
# # optimizer states
# if optimizer is not None and 'optimizer' in state_dict:
# optimizer.load_state_dict(state_dict['optimizer'])
# # lr scheduler states
# if lr_scheduler is not None and 'lr_scheduler' in state_dict:
# lr_scheduler.load_state_dict(state_dict['lr_scheduler'])
# last epoch
last_epoch = state_dict.pop("epoch", -1)
return last_epoch | f4eb59a303a5bf13ff1bdb9f37ca577a4d9e0419 | 27,890 |
def num_or_str(x):
"""The argument is a string; convert to a number if possible, or strip it.
Ex: num_or_str('42') ==> 42; num_or_str(' 42x ') ==> '42x' """
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return str(x).strip() | 6709cfc772ecc79993563f43c2d8ea4526f222c6 | 27,891 |
def mac_timezone():
"""Determine system timezone"""
output = cmdmod['cmd.run']("/usr/sbin/systemsetup -gettimezone")
return {'mac_timezone': output[11:]} | e5e8e45fdbd54d1741dd80a76a47f26f43640293 | 27,892 |
import os
def count_files(directory, filters, extension, show_files=False, **kwargs):
"""counts the number of files in the first level of a directory
Parameters
----------
directory : str
path of directory to be checked
filters : str
filter present in file to be checked
extension : str
secondary filter
show_files : bool, optional
if set to True, prints file name
start_time : int
seconds; if set, use as reference; only count if file is newer than start_time
"""
if "start_time" in kwargs:
start_time = kwargs.get("start_time")
count = 0
file_list = []
for dirpath, subdirs, files in os.walk(directory):
for x in files:
if os.path.isfile(os.path.join(directory, x)):
if filters in x:
if extension.lower() in x.lower():
try:
if os.path.getmtime(os.path.join(dirpath,x)) > start_time:
file_list.append(x)
count = count + 1
except NameError:
file_list.append(x)
count = count + 1
if show_files == True:
return count, file_list
return count | ddc6d23cc42cb4e83bd9127456af1ed013212b42 | 27,893 |
from .storage import create_results_archive
import sys
def run_emcee_seeded(light_curve, transit_params, spot_parameters, n_steps,
n_walkers, output_path, burnin=0.7,
n_extra_spots=1, skip_priors=False):
"""
Fit for transit depth and spot parameters given initial guess informed by
results from `peak_finder`
Parameters
----------
light_curve : `friedrich.lightcurve.TransitLightCurve`
Light curve to fit
transit_params : `~batman.TransitParams`
Transit light curve parameters
spot_parameters : list
List of all spot parameters in [amp, t0, sig, amp, t0, sig, ...] order
n_steps : int
Number of MCMC steps to take
n_walkers : int
Number of MCMC walkers to initialize (must be even, more than twice the
number of free params in fit)
output_path : str
Path to HDF5 archive output for storing results
burnin : float
Fraction of total number of steps to save to output (will truncate
the first `burnin` of the light curve)
n_extra_spots : int
Add `n_extra_spots` extra spots to the fit to soak up spots not
predicted by `peak_finder`
skip_priors : bool
Should a prior be applied to the depth parameter?
Returns
-------
sampler : `emcee.EnsembleSampler`
Sampler object returned by `emcee`
"""
times = light_curve.times.jd
fluxes = light_curve.fluxes
errors = light_curve.errors
lower_t_bound, upper_t_bound = get_in_transit_bounds(times, transit_params)
amps = spot_parameters[::3]
init_depth = transit_params.rp**2
extra_spot_params = [0.1*np.min(amps), np.mean(times),
0.05*(upper_t_bound-lower_t_bound)]
fit_params = np.concatenate([spot_parameters,
n_extra_spots*extra_spot_params])
ndim, nwalkers = len(fit_params), n_walkers
pos = []
while len(pos) < nwalkers:
realization = fit_params + 1e-5*np.random.randn(ndim)
if not np.isinf(lnprior(realization, fluxes, lower_t_bound,
upper_t_bound, transit_params, skip_priors)):
pos.append(realization)
print('Begin MCMC...')
pool = MPIPool(loadbalance=True)
if not pool.is_master():
pool.wait()
sys.exit(0)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob,
args=(times, fluxes, errors, lower_t_bound,
upper_t_bound, transit_params,
skip_priors),
pool=pool)
sampler.run_mcmc(pos, n_steps)
print('Finished MCMC...')
pool.close()
burnin_len = int(burnin*n_steps)
create_results_archive(output_path, light_curve, sampler, burnin_len, ndim)
return sampler | 09c3979eec25f6190f0cd1d158bff7f03d45af68 | 27,894 |
def lookup_loc_carriers(model_run):
"""
loc_carriers, used in system_wide balance, are linked to loc_tech_carriers
e.g. `X1::power` will be linked to `X1::chp::power` and `X1::battery::power`
in a comma delimited string, e.g. `X1::chp::power,X1::battery::power`
"""
# get the technologies associated with a certain loc_carrier
lookup_loc_carriers_dict = dict(dims=["loc_carriers"])
data = []
for loc_carrier in model_run.sets["loc_carriers"]:
loc_tech_carrier = list(
set(
i
for i in model_run.sets["loc_tech_carriers_prod"]
+ model_run.sets["loc_tech_carriers_con"]
if loc_carrier == "{0}::{2}".format(*i.split("::"))
)
)
data.append(",".join(loc_tech_carrier))
lookup_loc_carriers_dict["data"] = data
return lookup_loc_carriers_dict | 85c20bd789e0250405dded9e0e4a56777047ef5a | 27,895 |
def filldown(table, *fields, **kwargs):
"""
Replace missing values with non-missing values from the row above.
E.g.::
>>> from petl import filldown, look
>>> look(table1)
+-------+-------+-------+
| 'foo' | 'bar' | 'baz' |
+=======+=======+=======+
| 1 | 'a' | None |
+-------+-------+-------+
| 1 | None | 0.23 |
+-------+-------+-------+
| 1 | 'b' | None |
+-------+-------+-------+
| 2 | None | None |
+-------+-------+-------+
| 2 | None | 0.56 |
+-------+-------+-------+
| 2 | 'c' | None |
+-------+-------+-------+
| None | 'c' | 0.72 |
+-------+-------+-------+
>>> table2 = filldown(table1)
>>> look(table2)
+-------+-------+-------+
| 'foo' | 'bar' | 'baz' |
+=======+=======+=======+
| 1 | 'a' | None |
+-------+-------+-------+
| 1 | 'a' | 0.23 |
+-------+-------+-------+
| 1 | 'b' | 0.23 |
+-------+-------+-------+
| 2 | 'b' | 0.23 |
+-------+-------+-------+
| 2 | 'b' | 0.56 |
+-------+-------+-------+
| 2 | 'c' | 0.56 |
+-------+-------+-------+
| 2 | 'c' | 0.72 |
+-------+-------+-------+
>>> table3 = filldown(table1, 'bar')
>>> look(table3)
+-------+-------+-------+
| 'foo' | 'bar' | 'baz' |
+=======+=======+=======+
| 1 | 'a' | None |
+-------+-------+-------+
| 1 | 'a' | 0.23 |
+-------+-------+-------+
| 1 | 'b' | None |
+-------+-------+-------+
| 2 | 'b' | None |
+-------+-------+-------+
| 2 | 'b' | 0.56 |
+-------+-------+-------+
| 2 | 'c' | None |
+-------+-------+-------+
| None | 'c' | 0.72 |
+-------+-------+-------+
>>> table4 = filldown(table1, 'bar', 'baz')
>>> look(table4)
+-------+-------+-------+
| 'foo' | 'bar' | 'baz' |
+=======+=======+=======+
| 1 | 'a' | None |
+-------+-------+-------+
| 1 | 'a' | 0.23 |
+-------+-------+-------+
| 1 | 'b' | 0.23 |
+-------+-------+-------+
| 2 | 'b' | 0.23 |
+-------+-------+-------+
| 2 | 'b' | 0.56 |
+-------+-------+-------+
| 2 | 'c' | 0.56 |
+-------+-------+-------+
| None | 'c' | 0.72 |
+-------+-------+-------+
.. versionadded:: 0.11
"""
return FillDownView(table, fields, **kwargs) | 1f14d9e3aba6791ab9d512c647053ad41fcfab59 | 27,896 |
def realworld_bring_peg(fully_observable=True,
time_limit=_TIME_LIMIT,
random=None,
log_output=None,
environment_kwargs=None,
safety_spec=None,
delay_spec=None,
noise_spec=None,
perturb_spec=None,
dimensionality_spec=None,
multiobj_spec=None,
combined_challenge=None):
"""Returns manipulator bring task with the peg prop."""
use_peg = True
insert = False
return gen_task(use_peg, insert, fully_observable, time_limit, random,
log_output, environment_kwargs, safety_spec, delay_spec,
noise_spec, perturb_spec, dimensionality_spec, multiobj_spec,
combined_challenge) | 496c7e31fee93d87d10cbee7bb6369c417956b23 | 27,897 |
def schedule_gemm(cfg, s, A, B, C, batched=False, schedule_transforms=True):
"""Schedule GEMM, single and batched
Parameters
----------
cfg : Config
Schedule configuration
s : tvm.te.schedule.Schedule
Operator schedule
A : tvm.te.Tensor
2D/3D Tensor, shape [n, k]/[b, n, k]
B : tvm.te.Tensor
2D/3D Tensor, shape [k, m]/[b, k, m]
C : tvm.te.Tensor
2D/3D Tensor, shape [n, m]/[b, n, m]
batched : bool
Whether the GEMM is batched
Returns
-------
"""
block_size_x = 4
block_size_y = 4
warp_size_x = 2
warp_size_y = 2
work_group_x = cfg["work_group_x"].val
work_group_y = cfg["work_group_y"].val
k_unroll = cfg["unroll_k_factor"].val
if not batched:
y_index, x_index = (0, 1)
else:
y_index, x_index = (1, 2)
trans_inter, A_transposed_interleaved = transpose_interleave(
s, A, cfg["A_interleave"].val, y_index, x_index, [C], batched=batched
)
inter_trans, B_interleaved_transposed = interleave_transpose(
s, B, cfg["B_interleave"].val, y_index, x_index, [C], batched=batched
)
if schedule_transforms:
# Schedule A
y, x = s[trans_inter].op.axis
y, x, yi, xi = s[trans_inter].tile(y, x, 1, 8)
s[trans_inter].unroll(yi)
s[trans_inter].unroll(xi)
tile_and_bind(s, trans_inter, y, x, 1, 4)
# Schedule B
y, x = s[inter_trans].op.axis
xo, xi = s[inter_trans].split(x, 4)
s[inter_trans].vectorize(xi)
tile_and_bind(s, inter_trans, y, xo, 4, 4)
# Schedule C
CR_A = s.cache_read(A_transposed_interleaved, "local", [C])
CR_B = s.cache_read(B_interleaved_transposed, "local", [C])
CW_C = s.cache_write(C, "local")
if not batched:
y, x = s[C].op.axis
else:
z, y, x = s[C].op.axis
y, x, yt, xt = s[C].tile(y, x, block_size_y, block_size_x)
s[C].unroll(yt)
s[C].vectorize(xt)
# Tile the global work space to generate 'square' warps -> 2x2 for warp size of 4
y, x, wy, wx = s[C].tile(y, x, warp_size_y, warp_size_x)
x = s[C].fuse(x, wy, wx)
if not batched:
yo, xo, yi, xi = tile_and_bind(s, C, y, x, work_group_y, work_group_x)
else:
# For batched GEMM bind batch to z axis
zo, yo, xo, zi, yi, xi = tile_and_bind3d(s, C, z, y, x, 1, work_group_y, work_group_x)
s[CW_C].compute_at(s[C], xi)
if not batched:
y, x = s[CW_C].op.axis
else:
_, y, x = s[CW_C].op.axis
y, x, yt, xt = s[CW_C].tile(y, x, block_size_y, block_size_x)
k = s[CW_C].op.reduce_axis[0]
s[CW_C].reorder(k, yt, xt)
ko, ki = s[CW_C].split(k, k_unroll)
s[CW_C].unroll(ki)
s[CW_C].unroll(yt)
s[CW_C].unroll(xt)
if not batched:
i, j = s[CR_A].op.axis
else:
_, i, j = s[CR_A].op.axis
s[CR_A].reorder(j, i)
s[CR_A].compute_at(s[CW_C], ki)
s[CR_A].unroll(j)
s[CR_A].vectorize(i)
if not batched:
i, j = s[CR_B].op.axis
else:
_, i, j = s[CR_B].op.axis
s[CR_B].compute_at(s[CW_C], ki)
s[CR_B].unroll(i)
s[CR_B].vectorize(j)
return trans_inter, inter_trans | 2a99a20f4e9634bdaa06d114a9eafb7406736bc3 | 27,898 |
import math
def distance(x1: float, y1: float, x2: float, y2: float) -> float:
"""Возвращает расстояние между двумя точками на плоскости"""
return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) | 2113cb5926492ba89820ebb7f42de6993e46e3cb | 27,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.