hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace2a2a463714d9bcfcfa8117452b0fe4023d5e0 | 5,624 | py | Python | ojos_ca/usecase/interactor/django/sys.py | ojos/python-ca | dba9e9c61fd997c8c2ed60a6bd6f076c5f216265 | [
"MIT"
] | null | null | null | ojos_ca/usecase/interactor/django/sys.py | ojos/python-ca | dba9e9c61fd997c8c2ed60a6bd6f076c5f216265 | [
"MIT"
] | null | null | null | ojos_ca/usecase/interactor/django/sys.py | ojos/python-ca | dba9e9c61fd997c8c2ed60a6bd6f076c5f216265 | [
"MIT"
] | null | null | null | from typing import Any
from django.http.request import HttpRequest
from ojos_ca.domain.value_object.exception import InvalidValueException
from ojos_ca.interface.repo.django import SeqRepo, SysVarRepo
from ojos_ca.usecase.interactor.core import RepoInteractor
from ojos_ca.usecase.interactor.exception import (
BadRequestException, ConflictException, NotFoundException
)
from .core import RequestInteractor
class SeqRequestInteractor(RepoInteractor, RequestInteractor):
def __init__(self, repo: SeqRepo, *args, **kwargs):
super(SeqRequestInteractor, self).__init__(repo, *args, **kwargs)
def exec(self, request: HttpRequest, seq_id: str=None, *args, **kwargs) -> Any:
return {'content': {'seq_id': seq_id}}
class GetSeqInteractor(SeqRequestInteractor):
def exec(self, request: HttpRequest, seq_id: str=None, *args, **kwargs) -> Any:
if seq_id is None:
content = [self._repo.serializer.entity_to_dict(seq) for seq in self._repo.find_all()]
else:
seq = self._repo.find_by_key(seq_id)
if seq is None:
raise NotFoundException(name='seq_id', value=seq_id)
content = self._repo.serializer.entity_to_dict(seq)
return {'content': content}
class PostSeqInteractor(SeqRequestInteractor):
def exec(self, request: HttpRequest, seq_id: str=None, *args, **kwargs) -> Any:
try:
seq = self._repo.serializer.dict_to_entity(**self._repo.serializer.json_to_dict(request.body))
except InvalidValueException as e:
raise BadRequestException(message=e.message)
if self._repo.find_by_key(seq.seq_id) is not None:
raise ConflictException(name='seq_id', value=seq.seq_id)
seq, _, _ = self._repo.update_or_create(seq)
content = self._repo.serializer.entity_to_dict(seq)
return {'content': content}
class PutSeqInteractor(SeqRequestInteractor):
def exec(self, request: HttpRequest, seq_id: str=None, *args, **kwargs) -> Any:
seq = self._repo.find_by_key(seq_id)
if seq is None:
raise NotFoundException(name='seq_id', value=seq_id)
try:
_count = self._repo.serializer.json_to_dict(request.body).get('count', seq.count)
except InvalidValueException as e:
raise BadRequestException(message=e.message)
if seq.count != _count:
seq.count = _count
seq, _, _ = self._repo.update_or_create(seq)
content = self._repo.serializer.entity_to_dict(seq)
return {'content': content}
class DeleteSeqInteractor(SeqRequestInteractor):
def exec(self, request: HttpRequest, seq_id: str=None, *args, **kwargs) -> Any:
seq = self._repo.find_by_key(seq_id)
if seq is None:
raise NotFoundException(name='seq_id', value=seq_id)
self._repo.delete(seq_id)
return {'message': 'No Content', 'status_code': 204}
class SysVarRequestInteractor(RepoInteractor, RequestInteractor):
def __init__(self, repo: SysVarRepo, *args, **kwargs):
super(SysVarRequestInteractor, self).__init__(repo, *args, **kwargs)
def exec(self, request: HttpRequest, key: str=None, *args, **kwargs) -> Any:
return {'content': {'key': key}}
class GetSysVarViewInteractor(SysVarRequestInteractor):
def exec(self, request: HttpRequest, key: str=None, *args, **kwargs) -> Any:
if key is None:
content = [self._repo.serializer.entity_to_dict(sysvar) for sysvar in self._repo.find_all()]
else:
seq = self._repo.find_by_key(key)
if seq is None:
raise NotFoundException(name='key', value=key)
content = self._repo.serializer.entity_to_dict(seq)
return {'content': content}
class PostSysVarViewInteractor(SysVarRequestInteractor):
def exec(self, request: HttpRequest, key: str=None, *args, **kwargs) -> Any:
try:
sysvar = self._repo.serializer.dict_to_entity(**self._repo.serializer.json_to_dict(request.body))
except InvalidValueException as e:
raise BadRequestException(message=e.message)
if self._repo.find_by_key(sysvar.key) is not None:
raise ConflictException(name='key', value=sysvar.key)
sysvar, _, _ = self._repo.update_or_create(sysvar)
content = self._repo.serializer.entity_to_dict(sysvar)
return {'content': content}
class PutSysVarViewInteractor(SysVarRequestInteractor):
def exec(self, request: HttpRequest, key: str=None, *args, **kwargs) -> Any:
sysvar = self._repo.find_by_key(key)
if sysvar is None:
raise NotFoundException(name='key', value=key)
_sysvar = self._repo.serializer.json_to_dict(request.body)
try:
sysvar.raw_data = _sysvar.get('raw_data', sysvar.raw_data)
sysvar.module = _sysvar.get('module', sysvar.module)
sysvar.note = _sysvar.get('note', sysvar.note)
except InvalidValueException as e:
raise BadRequestException(message=e.message)
sysvar, _, _ = self._repo.update_or_create(sysvar)
content = self._repo.serializer.entity_to_dict(sysvar)
return {'content': content}
class DeleteSysVarViewInteractor(SysVarRequestInteractor):
def exec(self, request: HttpRequest, key: str=None, *args, **kwargs) -> Any:
sysvar = self._repo.find_by_key(key)
if sysvar is None:
raise NotFoundException(name='key', value=key)
self._repo.delete(key)
return {'message': 'No Content', 'status_code': 204}
| 39.055556 | 109 | 0.672119 |
ace2a2b4ee0755707991ed99d34fa900b4045f9e | 497 | py | Python | python-dev/a212_cipher_and_rsa/rsa_decrypt.py | tacslouai/python | 87bdb6623440c4e6810c2a77df9f2b912ba16d5a | [
"Unlicense"
] | null | null | null | python-dev/a212_cipher_and_rsa/rsa_decrypt.py | tacslouai/python | 87bdb6623440c4e6810c2a77df9f2b912ba16d5a | [
"Unlicense"
] | null | null | null | python-dev/a212_cipher_and_rsa/rsa_decrypt.py | tacslouai/python | 87bdb6623440c4e6810c2a77df9f2b912ba16d5a | [
"Unlicense"
] | null | null | null | # a212_rsa_decrypt.py
import rsa as rsa
sender=input("who sent you this message?")
print("If they sent you the message, then",sender,"should have used your public key to encrypt")
key = int(input("Enter your Private Key: " ))
mod_value = int(input("Enter your Modulus: " ))
encrypted_msg = input("Paste in the message they sent you with no brackets and hit enter: ")
#break apart the list that is cut/copied over on ", "
msg = encrypted_msg.split(", ")
print (rsa.decrypt(key,mod_value , msg))
| 41.416667 | 96 | 0.726358 |
ace2a40df18eac977ba17d3ae42e5118cd4daf2a | 525 | py | Python | constants.py | sanitgupta/pacPlanning | 17ac2e3a51c5d4e23d81d36044963a8e61c0b368 | [
"MIT"
] | 1 | 2020-09-14T16:00:19.000Z | 2020-09-14T16:00:19.000Z | constants.py | sanitgupta/pacPlanning | 17ac2e3a51c5d4e23d81d36044963a8e61c0b368 | [
"MIT"
] | null | null | null | constants.py | sanitgupta/pacPlanning | 17ac2e3a51c5d4e23d81d36044963a8e61c0b368 | [
"MIT"
] | null | null | null | import numpy as np
# Vmax = 10000
# Rmax = 100
MAX_ITERATION_LIMIT = 100000
c = 10000
PRINT_ITERATION = 10000
converge_iterations = 1000
epsilon_convergence = 1e-3
eps_values = {'mdp-01.txt':5*0.51794, 'mdp-02.txt':1.9667, 'mdp-03.txt':5*0.860558,'mdp-03-trunc.txt':5*0.860558, 'mdp-04.txt':1.47829, 'mdp-06.txt':0.1, 'mdp-riverswim.txt':5*0.153096, 'mdp-riverf.txt':5*0.153096, 'mdp-CasinoLand.txt':5*1.235594, 'mdp-SixArms.txt':5*0.82568}
#seeds = [10,20,30,40,50,60,70,80,90,100]
seeds = [20]
#seeds = np.arange(0, 10, 5) | 40.384615 | 276 | 0.691429 |
ace2a71aa1b45cf482d01b473940acaecebe7f74 | 6,692 | py | Python | geo_ez/utility_functions.py | avryhof/geo_ez | cfddf9108e3690499e15a459b87b8c992772cc94 | [
"MIT"
] | null | null | null | geo_ez/utility_functions.py | avryhof/geo_ez | cfddf9108e3690499e15a459b87b8c992772cc94 | [
"MIT"
] | null | null | null | geo_ez/utility_functions.py | avryhof/geo_ez | cfddf9108e3690499e15a459b87b8c992772cc94 | [
"MIT"
] | null | null | null | import math
import bleach
import csv
import datetime
from django.db.models.expressions import RawSQL
from django.utils.timezone import make_aware
from geo_ez.data_functions import to_dict
from geo_ez.models import PostalCode
from geo_ez.us_census_class import USCensus
from geo_ez.usps_class import USPS
def csv_to_dicts(csv_file, **kwargs):
encoding = kwargs.pop("encoding", "utf8")
has_comments = kwargs.pop("has_comments", False)
sheet = []
try:
df = open(csv_file, "r", encoding=encoding)
except TypeError:
df = open(csv_file, "r")
if has_comments:
rows = csv.DictReader(filter(lambda row: row[0] != "#", df), **kwargs)
else:
rows = csv.DictReader(df, **kwargs)
for row in rows:
sheet.append(to_dict(row))
df.close()
return sheet
def geocode(address_dict, **kwargs):
address_normalized = kwargs.get("normalized", False)
search_address = dict(
address1=address_dict.get("address1"),
address2=address_dict.get("address2"),
city=address_dict.get("city"),
state=address_dict.get("state"),
zip_code=address_dict.get("zip_code"),
)
if not address_normalized:
# First, we normalize the address with the US Postal Service
ps = USPS()
valid_address = ps.address(**search_address)
else:
valid_address = search_address
usc = USCensus()
return usc.geocode(query=valid_address)
def miles_to_km(miles):
return miles * 1.60934
def km_to_miles(km):
return km * 0.621371
def deg2rad(deg):
return deg * (math.pi / 180)
def get_distance(lat1, lon1, lat2, lon2, **kwargs):
use_miles = kwargs.get("use_miles", True)
lat1 = float(lat1)
lat2 = float(lat2)
lon1 = float(lon1)
lon2 = float(lon2)
radius = 6371 # Radius of the earth in km
d_lat = deg2rad(lat2 - lat1) # deg2rad below
d_lon = deg2rad(lon2 - lon1)
a = math.sin(d_lat / 2) * math.sin(d_lat / 2) + math.cos(deg2rad(lat1)) * math.cos(deg2rad(lat2)) * math.sin(
d_lon / 2
) * math.sin(d_lon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
distance = radius * c # Distance in km
if not use_miles:
return_value = distance
else:
return_value = km_to_miles(distance)
return return_value
def get_postal_code_by_coords(latitude, longitude):
"""
Finds the nearest postal code to the provided coordinates.
:param latitude:
:param longitude:
:return:
"""
retn = None
result_count = 0
radius = 1
while result_count == 0:
postal_codes = postal_codes_within_radius(latitude, longitude, radius=radius)
result_count = len(postal_codes)
if result_count > 0:
retn = postal_codes[0]
break
radius = radius + 1
return retn
def points_within_radius(gismodel, latitude, longitude, **kwargs):
radius = kwargs.get("radius", False)
use_miles = kwargs.get("use_miles", True)
if radius:
radius = float(radius)
distance_unit = float(3959 if use_miles else 6371)
# Great circle distance formula
gcd_formula = (
"%s * acos(least(greatest(cos(radians(%s)) * cos(radians(latitude)) * cos(radians(longitude) - "
"radians(%s)) + sin(radians(%s)) * sin(radians(latitude)), -1), 1))"
)
distance_raw_sql = RawSQL(gcd_formula, (distance_unit, latitude, longitude, latitude))
qs = gismodel.objects.all().annotate(distance=distance_raw_sql).order_by("distance")
if radius:
qs = qs.filter(distance__lt=radius)
return qs
def postal_codes_within_radius(latitude, longitude, **kwargs):
return points_within_radius(PostalCode, latitude, longitude, **kwargs)
def import_postal_codes_csv(data_file_path, **kwargs):
delimiter = kwargs.get("delimiter", "\t")
data_file = open(data_file_path, "rU", encoding="utf8")
rows = csv.reader(data_file, delimiter=delimiter)
insert_list = []
for row in rows:
if len(row) > 0 and row[11]:
try:
postal_code = PostalCode.objects.get(postal_code=row[1], name=row[2], place_name=row[2])
except PostalCode.DoesNotExist:
insert_list.append(
PostalCode(
country_code=row[0],
postal_code=row[1],
name=row[2],
place_name=row[2],
admin_name1=row[3],
admin_code1=row[4],
admin_name2=row[5],
admin_code2=row[6],
admin_name3=row[7],
admin_code3=row[8],
latitude=row[9],
longitude=row[10],
accuracy=row[11],
updated=make_aware(datetime.datetime.now()),
)
)
else:
postal_code.country_code = row[0]
postal_code.postal_code = row[1]
postal_code.name = row[2]
postal_code.place_name = row[2]
postal_code.admin_name1 = row[3]
postal_code.admin_code1 = row[4]
postal_code.admin_name2 = row[5]
postal_code.admin_code2 = row[6]
postal_code.admin_name3 = row[7]
postal_code.admin_code3 = row[8]
postal_code.latitude = row[9]
postal_code.longitude = row[10]
postal_code.accuracy = row[11]
postal_code.updated = make_aware(datetime.datetime.now())
postal_code.save()
data_file.close()
PostalCode.objects.bulk_create(insert_list)
def zip_codes_in_radius(**kwargs):
zip_code = kwargs.get("zip_code", None)
radius = kwargs.get("radius", False)
distance_units = bleach.clean(kwargs.get("distance_units", "miles"))
if distance_units.lower() in ["mi", "miles", "imperial", "empirical", "us", "united states", "usa"]:
use_miles = True
starting_zip_code = PostalCode.objects.get(postal_code=zip_code)
zipcodes_in_radius = points_within_radius(
PostalCode, starting_zip_code.latitude, starting_zip_code.longitude, radius=radius, use_miles=True
)
zip_codes = []
for zip_code in zipcodes_in_radius:
zip_codes.append(
{
"zip_code": zip_code.postal_code,
"distance": round(zip_code.distance, 3),
"city": zip_code.place_name,
"state": zip_code.admin_code1,
}
)
return zip_codes
| 28.844828 | 113 | 0.598625 |
ace2a89e882a009b711fddd3289dfd2bbed08a51 | 325 | py | Python | tests/test_query_base.py | tellor-io/pytelliot | c5da9168f8dcf8ac5728c208f169b6f7ddba85d7 | [
"MIT"
] | 2 | 2021-09-01T14:19:26.000Z | 2021-09-07T15:17:14.000Z | tests/test_query_base.py | tellor-io/pytelliot | c5da9168f8dcf8ac5728c208f169b6f7ddba85d7 | [
"MIT"
] | 77 | 2021-09-03T04:01:20.000Z | 2021-11-09T14:45:52.000Z | tests/test_query_base.py | tellor-io/pytelliot | c5da9168f8dcf8ac5728c208f169b6f7ddba85d7 | [
"MIT"
] | 3 | 2021-09-02T10:51:15.000Z | 2021-09-07T15:16:59.000Z | from dataclasses import dataclass
from telliot_core.queries.query import OracleQuery
def test_main():
@dataclass
class MyQuery(OracleQuery):
text: str
val: int = 3
q = MyQuery("asdf")
state = q.get_state()
print(state)
assert state == {"type": "MyQuery", "text": "asdf", "val": 3}
| 20.3125 | 65 | 0.627692 |
ace2a92407063a73473717dc3a9a7b026bf96c64 | 138,555 | py | Python | jax/_src/api.py | mreineck/jax | f6d329b2d9b5f83c6a59e5739aa1ca8d4d1ffa1c | [
"Apache-2.0"
] | null | null | null | jax/_src/api.py | mreineck/jax | f6d329b2d9b5f83c6a59e5739aa1ca8d4d1ffa1c | [
"Apache-2.0"
] | null | null | null | jax/_src/api.py | mreineck/jax | f6d329b2d9b5f83c6a59e5739aa1ca8d4d1ffa1c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX user-facing transformations and utilities.
The transformations here mostly wrap internal transformations, providing
convenience flags to control behavior and handling Python containers of
arguments and outputs. The Python containers handled are pytrees (see
tree_util.py), which include nested tuples/lists/dicts, where the leaves are
arrays.
"""
# flake8: noqa: F401
import collections
import functools
from functools import partial
import inspect
import itertools as it
import sys
import threading
import weakref
import types
from typing import (Any, Callable, Iterable, NamedTuple, Mapping, Optional,
Sequence, Tuple, TypeVar, Union, overload)
from warnings import warn
import numpy as np
from contextlib import contextmanager, ExitStack
import jax
from jax import core
from jax import linear_util as lu
from jax._src import dtypes
from jax.core import eval_jaxpr
from jax._src.api_util import (
flatten_fun, apply_flat_fun, flatten_fun_nokwargs, flatten_fun_nokwargs2,
argnums_partial, argnums_partial_except, flatten_axes, donation_vector,
rebase_donate_argnums, _ensure_index, _ensure_index_tuple,
shaped_abstractify, _ensure_str_tuple, argnames_partial_except)
from jax._src import traceback_util
from jax._src.traceback_util import api_boundary
from jax.tree_util import (tree_map, tree_flatten, tree_unflatten,
tree_structure, tree_transpose, tree_leaves,
tree_multimap, treedef_is_leaf, treedef_children,
Partial, PyTreeDef)
from jax._src.util import (unzip2, curry, safe_map, safe_zip, prod, split_list,
extend_name_stack, wrap_name, cache, wraps,
HashableFunction)
from jax._src import device_array
from jax._src import dispatch
from jax._src.lib import jax_jit
from jax._src.lib import version
from jax._src.lib import xla_bridge as xb
from jax._src.lib import xla_client as xc
from jax._src.lib import pmap_lib
# Unused imports to be exported
from jax._src.lib.xla_bridge import (device_count, local_device_count, devices,
local_devices, process_index,
process_count, host_id, host_ids,
host_count, default_backend)
from jax.core import ShapedArray, raise_to_shaped
from jax.interpreters import partial_eval as pe
from jax.interpreters import xla
from jax.interpreters import pxla
from jax.interpreters import ad
from jax.interpreters import batching
from jax.interpreters import masking
from jax.interpreters import invertible_ad as iad
from jax.interpreters.invertible_ad import custom_ivjp
from jax.custom_batching import custom_vmap
from jax.custom_derivatives import (closure_convert, custom_gradient, custom_jvp,
custom_vjp, linear_call)
from jax.custom_transpose import custom_transpose
from jax.ad_checkpoint import checkpoint_policies
from jax._src.config import (flags, config, bool_env,
disable_jit as _disable_jit,
debug_nans as config_debug_nans,
debug_infs as config_debug_infs,
_thread_local_state as config_thread_local_state)
traceback_util.register_exclusion(__file__)
_dtype = partial(dtypes.dtype, canonicalize=True)
AxisName = Any
# These TypeVars are used below to express the fact that function types
# (i.e. call signatures) are invariant under the jit, vmap, and pmap
# transformations.
# Note that the function type annotations will generally not strictly hold
# in JIT internals, as Tracer values are passed through the function.
# Should this raise any type errors for the tracing code in future, we can disable
# type checking in parts of the tracing code, or remove these annotations.
F = TypeVar("F", bound=Callable)
T = TypeVar("T")
U = TypeVar("U")
map, unsafe_map = safe_map, map
zip, unsafe_zip = safe_zip, zip
FLAGS = flags.FLAGS
flags.DEFINE_bool(
"experimental_cpp_jit", bool_env("JAX_CPP_JIT", True),
"A flag enabling the C++ jax.jit fast path."
"Set this to `False` only if it crashes otherwise and report "
"the error to the jax-team.")
flags.DEFINE_bool(
"experimental_cpp_pmap", bool_env("JAX_CPP_PMAP", True),
"A flag enabling the C++ jax.pmap fast path. Until the default "
"is switched to True, the feature is not supported and possibly broken "
"(e.g. it may use unreleased code from jaxlib.")
def _nan_check_posthook(fun, args, kwargs, output):
"""Hook function called by the C++ jit/pmap to perform NaN checking."""
leaves = tree_leaves(output)
buffers = []
for da_or_sda in leaves:
if hasattr(da_or_sda, "device_buffer"):
buffers.append(da_or_sda.device_buffer)
elif hasattr(da_or_sda, "device_buffers"):
buffers.extend(da_or_sda.device_buffers)
try:
dispatch.check_special(xla.xla_call_p, buffers)
except FloatingPointError:
# compiled_fun can only raise in this case
assert config.jax_debug_nans or config.jax_debug_infs
print("Invalid nan value encountered in the output of a C++-jit/pmap "
"function. Calling the de-optimized version.")
fun._cache_miss(*args, **kwargs)[0] # probably won't return
def _update_debug_special_global(_):
if config._read("jax_debug_nans") or config._read("jax_debug_infs"):
jax_jit.global_state().post_hook = _nan_check_posthook
else:
jax_jit.global_state().post_hook = None
def _update_debug_special_thread_local(_):
if (getattr(config_thread_local_state, "jax_debug_nans", False) or
getattr(config_thread_local_state, "jax_debug_infs", False)):
jax_jit.thread_local_state().post_hook = _nan_check_posthook
else:
jax_jit.thread_local_state().post_hook = None
config_debug_nans._add_hooks(_update_debug_special_global,
_update_debug_special_thread_local)
config_debug_infs._add_hooks(_update_debug_special_global,
_update_debug_special_thread_local)
float0 = dtypes.float0
def _check_callable(fun):
# In Python 3.10+, the only thing stopping us from supporting staticmethods
# is that we can't take weak references to them, which the C++ JIT requires.
if isinstance(fun, staticmethod):
raise TypeError(f"staticmethod arguments are not supported, got {fun}")
if not callable(fun):
raise TypeError(f"Expected a callable value, got {fun}")
if _isgeneratorfunction(fun):
raise TypeError(f"Expected a function, got a generator function: {fun}")
def _isgeneratorfunction(fun):
# re-implemented here because of https://bugs.python.org/issue33261
while inspect.ismethod(fun):
fun = fun.__func__
while isinstance(fun, functools.partial):
fun = fun.func
return inspect.isfunction(fun) and bool(fun.__code__.co_flags & inspect.CO_GENERATOR)
_POSITIONAL_OR_KEYWORD = inspect.Parameter.POSITIONAL_OR_KEYWORD
def _infer_argnums_and_argnames(
fun: Callable,
argnums: Union[int, Iterable[int], None],
argnames: Union[str, Iterable[str], None],
) -> Tuple[Tuple[int, ...], Tuple[str, ...]]:
"""Infer missing argnums and argnames for a function with inspect."""
if argnums is None and argnames is None:
argnums = ()
argnames = ()
elif argnums is not None and argnames is not None:
argnums = _ensure_index_tuple(argnums)
argnames = _ensure_str_tuple(argnames)
else:
try:
signature = inspect.signature(fun)
except ValueError:
# In rare cases, inspect can fail, e.g., on some builtin Python functions.
# In these cases, don't infer any parameters.
parameters: Mapping[str, inspect.Parameter] = {}
else:
parameters = signature.parameters
if argnums is None:
assert argnames is not None
argnames = _ensure_str_tuple(argnames)
argnums = tuple(
i for i, (k, param) in enumerate(parameters.items())
if param.kind == _POSITIONAL_OR_KEYWORD and k in argnames
)
else:
assert argnames is None
argnums = _ensure_index_tuple(argnums)
argnames = tuple(
k for i, (k, param) in enumerate(parameters.items())
if param.kind == _POSITIONAL_OR_KEYWORD and i in argnums
)
return argnums, argnames
def jit(
fun: F,
*,
static_argnums: Union[int, Iterable[int], None] = None,
static_argnames: Union[str, Iterable[str], None] = None,
device: Optional[xc.Device] = None,
backend: Optional[str] = None,
donate_argnums: Union[int, Iterable[int]] = (),
inline: bool = False,
) -> F:
"""Sets up ``fun`` for just-in-time compilation with XLA.
Args:
fun: Function to be jitted. Should be a pure function, as side-effects may
only be executed once. Its arguments and return value should be arrays,
scalars, or (nested) standard Python containers (tuple/list/dict) thereof.
Positional arguments indicated by ``static_argnums`` can be anything at
all, provided they are hashable and have an equality operation defined.
Static arguments are included as part of a compilation cache key, which is
why hash and equality operators must be defined.
static_argnums: An optional int or collection of ints that specify which
positional arguments to treat as static (compile-time constant).
Operations that only depend on static arguments will be constant-folded in
Python (during tracing), and so the corresponding argument values can be
any Python object.
Static arguments should be hashable, meaning both ``__hash__`` and
``__eq__`` are implemented, and immutable. Calling the jitted function
with different values for these constants will trigger recompilation.
Arguments that are not arrays or containers thereof must be marked as
static.
If neither ``static_argnums`` nor ``static_argnames`` is provided, no
arguments are treated as static. If ``static_argnums`` is not provided but
``static_argnames`` is, or vice versa, JAX uses ``inspect.signature(fun)``
to find any positional arguments that correspond to ``static_argnames``
(or vice versa). If both ``static_argnums`` and ``static_argnames`` are
provided, ``inspect.signature`` is not used, and only actual
parameters listed in either ``static_argnums`` or ``static_argnames`` will
be treated as static.
static_argnames: An optional string or collection of strings specifying
which named arguments to treat as static (compile-time constant). See the
comment on ``static_argnums`` for details. If not
provided but ``static_argnums`` is set, the default is based on calling
``inspect.signature(fun)`` to find corresponding named arguments.
device: This is an experimental feature and the API is likely to change.
Optional, the Device the jitted function will run on. (Available devices
can be retrieved via :py:func:`jax.devices`.) The default is inherited
from XLA's DeviceAssignment logic and is usually to use
``jax.devices()[0]``.
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the XLA backend: ``'cpu'``, ``'gpu'``, or
``'tpu'``.
donate_argnums: Specify which arguments are "donated" to the computation.
It is safe to donate arguments if you no longer need them once the
computation has finished. In some cases XLA can make use of donated
buffers to reduce the amount of memory needed to perform a computation,
for example recycling one of your input buffers to store a result. You
should not reuse buffers that you donate to a computation, JAX will raise
an error if you try to. By default, no arguments are donated.
inline: Specify whether this function should be inlined into enclosing
jaxprs (rather than being represented as an application of the xla_call
primitive with its own subjaxpr). Default False.
Returns:
A wrapped version of ``fun``, set up for just-in-time compilation.
In the following example, ``selu`` can be compiled into a single fused kernel
by XLA:
>>> import jax
>>>
>>> @jax.jit
... def selu(x, alpha=1.67, lmbda=1.05):
... return lmbda * jax.numpy.where(x > 0, x, alpha * jax.numpy.exp(x) - alpha)
>>>
>>> key = jax.random.PRNGKey(0)
>>> x = jax.random.normal(key, (10,))
>>> print(selu(x)) # doctest: +SKIP
[-0.54485 0.27744 -0.29255 -0.91421 -0.62452 -0.24748
-0.85743 -0.78232 0.76827 0.59566 ]
"""
if FLAGS.experimental_cpp_jit:
return _cpp_jit(fun, static_argnums, static_argnames, device, backend,
donate_argnums, inline)
else:
return _python_jit(fun, static_argnums, static_argnames, device, backend,
donate_argnums, inline)
def _prepare_jit(fun, static_argnums, static_argnames, donate_argnums,
args, kwargs):
if max(donate_argnums, default=-1) >= len(args):
raise ValueError(
f"jitted function has donate_argnums={donate_argnums} but "
f"was called with only {len(args)} positional arguments.")
f = lu.wrap_init(fun)
f, args = argnums_partial_except(f, static_argnums, args, allow_invalid=True)
f, kwargs = argnames_partial_except(f, static_argnames, kwargs)
args_flat, in_tree = tree_flatten((args, kwargs))
if donate_argnums:
donated_invars = donation_vector(donate_argnums, args, kwargs)
else:
donated_invars = (False,) * len(args_flat)
return f, in_tree, args_flat, donated_invars
def _python_jit(
fun: F,
static_argnums: Union[int, Iterable[int], None] = None,
static_argnames: Union[str, Iterable[str], None] = None,
device: Optional[xc.Device] = None,
backend: Optional[str] = None,
donate_argnums: Union[int, Iterable[int]] = (),
inline: bool = False,
) -> F:
# The Python implementation of `jax.jit`, being slowly replaced by _cpp_jit.
_check_callable(fun)
static_argnums, static_argnames = _infer_argnums_and_argnames(
fun, static_argnums, static_argnames)
static_argnums = _ensure_index_tuple(static_argnums)
donate_argnums = _ensure_index_tuple(donate_argnums)
donate_argnums = rebase_donate_argnums(donate_argnums, static_argnums)
@wraps(fun)
@api_boundary
def f_jitted(*args, **kwargs):
if config.jax_disable_jit:
return fun(*args, **kwargs)
closed_fun, in_tree, args_flat, donated_invars = _prepare_jit(
fun, static_argnums, static_argnames, donate_argnums, args, kwargs)
for arg in args_flat:
_check_arg(arg)
flat_fun, out_tree = flatten_fun(closed_fun, in_tree)
out_flat = xla.xla_call(
flat_fun, *args_flat,
device=device, backend=backend, name=flat_fun.__name__,
donated_invars=donated_invars, inline=inline)
return tree_unflatten(out_tree(), out_flat)
f_jitted.lower = _jit_lower(fun, static_argnums, static_argnames, device,
backend, donate_argnums, inline)
return f_jitted
class _BackendAndDeviceInfo(NamedTuple):
default_device: xc.Device
committed_to_device: bool
class _FastpathData(NamedTuple):
xla_executable: xla.XlaExecutable
out_pytree_def: Any
sticky_device: xc.Device
avals: Iterable[Any]
lazy_exprs: Iterable[Any]
kept_var_bitvec: Iterable[bool]
_cpp_jit_cache = jax_jit.CompiledFunctionCache()
def _cpp_jit(
fun: F,
static_argnums: Union[int, Iterable[int], None] = None,
static_argnames: Union[str, Iterable[str], None] = None,
device: Optional[xc.Device] = None,
backend: Optional[str] = None,
donate_argnums: Union[int, Iterable[int]] = (),
inline: bool = False,
) -> F:
# An implementation of `jit` that tries to do as much as possible in C++.
# The goal of this function is to speed up the time it takes to process the
# arguments, find the correct C++ executable, start the transfer of arguments
# and schedule the computation.
# As long as it does not support all features of the Python implementation
# the C++ code will fallback to `_python_jit` when it faces some unsupported
# feature.
_check_callable(fun)
static_argnums, static_argnames = _infer_argnums_and_argnames(
fun, static_argnums, static_argnames)
static_argnums = _ensure_index_tuple(static_argnums)
donate_argnums = _ensure_index_tuple(donate_argnums)
donate_argnums = rebase_donate_argnums(donate_argnums, static_argnums)
if device is not None and backend is not None:
raise ValueError("can't specify both a device and a backend for jit, "
f"got device={device} and backend={backend}.")
@api_boundary
def cache_miss(*args, **kwargs):
### This first part is basically the same code as in _python_jit.
# An alternative would be for cache_miss to accept from C++ the arguments
# (dyn_args, donated_invars, args_flat, in_tree), since otherwise we have
# work/code that is redundant between C++ and Python. We can try that later.
closed_fun, in_tree, args_flat, donated_invars = _prepare_jit(
fun, static_argnums, static_argnames, donate_argnums, args, kwargs)
for arg in args_flat:
_check_arg(arg)
flat_fun, out_tree = flatten_fun(closed_fun, in_tree)
out_flat = xla.xla_call(
flat_fun, *args_flat,
device=device, backend=backend, name=flat_fun.__name__,
donated_invars=donated_invars, inline=inline)
out_pytree_def = out_tree()
out = tree_unflatten(out_pytree_def, out_flat)
### Decide whether we can support the C++ fast path
# High level note: The Python tracing mechanism is complex; in particular
# to know whether `jax.jit(f)(x)` will execute or trace, it's not enough to
# inspect the argument x, we actually do need to execute it and look at the
# outputs that could be tracers (if f is capturing `Tracer` by closure).
execute: Optional[functools.partial] = (
dispatch._xla_callable.most_recent_entry())
use_fastpath = (
# This is if we have already executed this code-path (most-recent entry
# has been reset to None). Thus, we do not support the fast-path.
execute is not None and
execute.func is dispatch._execute_compiled and # not trivial, not pmap
# Not supported: ShardedDeviceArray
all(device_array.type_is_device_array(x) for x in out_flat))
### If we can use the fastpath, we return required info to the caller.
if use_fastpath:
_, xla_executable, _, result_handlers, kept_var_idx = execute.args
sticky_device = None
avals = []
lazy_exprs = [None] * len(result_handlers)
for result_handler in result_handlers:
aval, sticky_device = result_handler.args
avals.append(aval)
assert len(avals) == len(out_flat)
kept_var_bitvec = [i in kept_var_idx for i in range(len(args_flat))]
fastpath_data = _FastpathData(xla_executable, out_pytree_def,
sticky_device, avals, lazy_exprs,
kept_var_bitvec)
else:
fastpath_data = None
return out, fastpath_data
def get_device_info():
"""Backends do not exist before __main__ is being executed."""
committed_to_device = device is not None or backend is not None
if device is not None:
default_device = device
else:
backend_ = xb.get_backend(backend)
default_device = backend_.get_default_device_assignment(1)[0]
return _BackendAndDeviceInfo(default_device, committed_to_device)
cpp_jitted_f = jax_jit.jit(fun, cache_miss, get_device_info,
static_argnums=static_argnums,
static_argnames=static_argnames,
donate_argnums=donate_argnums,
cache=_cpp_jit_cache)
f_jitted = wraps(fun)(cpp_jitted_f)
f_jitted.lower = _jit_lower(fun, static_argnums, static_argnames, device,
backend, donate_argnums, inline)
return f_jitted
class Lowered:
"""Lowering of a function specialized to argument types and values.
A lowering is a computation ready for compilation. This class
carries a lowering together with the remaining information needed to
later compile and execute it. It also provides a common API for
querying properties of lowered computations across JAX's various
lowering paths (``jit``, ``pmap``, etc.).
"""
__slots__ = ['in_tree', 'out_tree', 'donate_argnums', '_lowering',
'_no_kwargs']
in_tree: PyTreeDef
out_tree: PyTreeDef
donate_argnums: Tuple[int]
_lowering: Union[dispatch.XlaComputation,
pxla.MeshComputation,
pxla.PmapComputation]
_no_kwargs: bool
def __init__(self, lowering, in_tree, out_tree, donate_argnums,
no_kwargs=False):
self._lowering = lowering
self.in_tree = in_tree
self.out_tree = out_tree
self.donate_argnums = donate_argnums
self._no_kwargs = no_kwargs
def compile(self) -> 'Compiled':
return Compiled(
self._lowering.compile(), self.in_tree, self.out_tree,
self.donate_argnums, self._no_kwargs)
def compiler_ir(self, dialect: Optional[str] = None):
if dialect is None or dialect == "mhlo":
return self._lowering.mhlo()
elif dialect == "hlo":
return self._lowering.hlo()
else:
raise ValueError(f"Unknown dialect {dialect}")
# TODO(frostig): remove this in favor of `compiler_ir`
def _xla_computation(self):
return self._lowering.hlo()
class Compiled:
"""Compiled representation of a function specialized to types/values.
A compiled computation is associated with an executable and the
remaining information needed to execute it. It also provides a
common API for querying properties of compiled computations across
JAX's various compilation paths and backends.
"""
__slots__ = ['in_tree', 'out_tree', 'donate_argnums', '_executable',
'_no_kwargs']
in_tree: PyTreeDef
out_tree: PyTreeDef
donate_argnums: Tuple[int]
_executable: Union[dispatch.XlaCompiledComputation,
pxla.MeshExecutable,
pxla.PmapExecutable]
_no_kwargs: bool
def __init__(self, executable, in_tree, out_tree, donate_argnums,
no_kwargs=False):
self._executable = executable
self.in_tree = in_tree
self.out_tree = out_tree
self.donate_argnums = donate_argnums
self._no_kwargs = no_kwargs
def compiler_ir(self):
"""Post-compilation IR.
Compilation typically involves code transformation and
optimization. This method exists to reflect the compiler's
representation of the program after such passes, whenever
possible.
"""
return self._executable.xla_executable().hlo_modules()
def runtime_executable(self):
return self._executable.xla_executable()
def _xla_executable(self):
# TODO(frostig): finalize API. For now, return the underlying
# executable directly via this method.
return self._executable.xla_executable()
def __call__(self, *args, **kwargs):
if self._no_kwargs:
if kwargs:
kws = ', '.join(kwargs.keys())
raise NotImplementedError(
'function was compiled by a transformation that does not support '
f'keyword arguments, but called with keyword arguments: {kws}')
args_flat, in_tree = tree_flatten(args)
else:
args_flat, in_tree = tree_flatten((args, kwargs))
if in_tree != self.in_tree:
# TODO(frostig): provide more info about the source function
# and transformation
raise TypeError(
f'function compiled for {self.in_tree}, called with {in_tree}')
try:
out_flat = self._executable.call(*args_flat)
except TypeError as e:
# We can't transform ahead-of-time compiled calls, since we've
# lowered and compiled for a fixed function signature, and JAX
# transformations change signatures. We interpret a Tracer
# argument as an indication of a transformation attempt. We
# could check this before the executable call, but we'd rather
# avoid isinstance checks on the call path. Seeing a TypeError
# might mean that arguments have JAX-invalid types, which in
# turn might mean some are Tracers.
for arg in args_flat:
if isinstance(arg, core.Tracer):
raise TypeError(
'Cannot apply JAX transformations to a function lowered and '
'compiled for a particular signature. Detected argument of '
f'Tracer type {type(arg)}.')
else:
raise
return tree_unflatten(self.out_tree, out_flat)
def _jit_lower(fun, static_argnums, static_argnames, device, backend,
donate_argnums, inline):
"""Make a ``lower`` method for jitted functions."""
# If the function we returned from ``jit`` were a class instance,
# this might naturally be a method, with ``fun`` as a ``self`` and
# all the other arguments stored as attributes.
def arg_spec(x):
# like xla.arg_spec but duck-types on x.shape and x.dtype
aval = shaped_abstractify(x)
try:
return aval, x._device
except:
return aval, None
@api_boundary
def lower(*args, **kwargs) -> Lowered:
"""Lower this function for the given arguments.
A lowered function is staged out of Python and translated to a
compiler's input language, possibly in a backend-dependent
manner. It is ready for compilation but not yet compiled.
Returns:
A ``Lowered`` instance representing the lowering.
"""
closed_fun, in_tree, args_flat, donated_invars = _prepare_jit(
fun, static_argnums, static_argnames, donate_argnums, args, kwargs)
flat_fun, out_tree = flatten_fun(closed_fun, in_tree)
name = flat_fun.__name__
arg_specs = unsafe_map(arg_spec, args_flat)
computation = dispatch.lower_xla_callable(
flat_fun, device, backend, name, donated_invars, *arg_specs)
return Lowered(computation, in_tree, out_tree(), donate_argnums)
return lower
@contextmanager
def disable_jit():
"""Context manager that disables :py:func:`jit` behavior under its dynamic context.
For debugging it is useful to have a mechanism that disables :py:func:`jit`
everywhere in a dynamic context.
Values that have a data dependence on the arguments to a jitted function are
traced and abstracted. For example, an abstract value may be a
:py:class:`ShapedArray` instance, representing the set of all possible arrays
with a given shape and dtype, but not representing one concrete array with
specific values. You might notice those if you use a benign side-effecting
operation in a jitted function, like a print:
>>> import jax
>>>
>>> @jax.jit
... def f(x):
... y = x * 2
... print("Value of y is", y)
... return y + 3
...
>>> print(f(jax.numpy.array([1, 2, 3])))
Value of y is Traced<ShapedArray(int32[3])>with<DynamicJaxprTrace(level=0/1)>
[5 7 9]
Here ``y`` has been abstracted by :py:func:`jit` to a :py:class:`ShapedArray`,
which represents an array with a fixed shape and type but an arbitrary value.
The value of ``y`` is also traced. If we want to see a concrete value while
debugging, and avoid the tracer too, we can use the :py:func:`disable_jit`
context manager:
>>> import jax
>>>
>>> with jax.disable_jit():
... print(f(jax.numpy.array([1, 2, 3])))
...
Value of y is [2 4 6]
[5 7 9]
"""
with _disable_jit(True):
yield
def xla_computation(fun: Callable,
static_argnums: Union[int, Iterable[int]] = (),
axis_env: Optional[Sequence[Tuple[AxisName, int]]] = None,
in_parts=None, out_parts=None,
backend: Optional[str] = None,
tuple_args: bool = False,
instantiate_const_outputs: Optional[bool] = None,
return_shape: bool = False,
donate_argnums: Union[int, Iterable[int]] = ()) -> Callable:
"""Creates a function that produces its XLA computation given example args.
Args:
fun: Function from which to form XLA computations.
static_argnums: See the :py:func:`jax.jit` docstring.
axis_env: Optional, a sequence of pairs where the first element is an axis
name and the second element is a positive integer representing the size of
the mapped axis with that name. This parameter is useful when lowering
functions that involve parallel communication collectives, and it
specifies the axis name/size environment that would be set up by
applications of :py:func:`jax.pmap`. See the examples below.
in_parts: Optional, how each argument to ``fun`` should be partitioned or
replicated. This is used to specify partitioned XLA computations, see
``sharded_jit`` for more info.
out_parts: Optional, how each output of ``fun`` should be partitioned or
replicated. This is used to specify partitioned XLA computations, see
``sharded_jit`` for more info.
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the XLA backend: ``'cpu'``, ``'gpu'``, or
``'tpu'``.
tuple_args: Optional bool, defaults to ``False``. If ``True``, the resulting
XLA computation will have a single tuple argument that is unpacked into
the specified function arguments. If `None`, tupling will be enabled when
there are more than 100 arguments, since some platforms have limits on
argument arity.
instantiate_const_outputs: Deprecated argument, does nothing.
return_shape: Optional boolean, defaults to ``False``. If ``True``, the
wrapped function returns a pair where the first element is the XLA
computation and the second element is a pytree with the same structure as
the output of ``fun`` and where the leaves are objects with ``shape``,
``dtype``, and ``named_shape`` attributes representing the corresponding
types of the output leaves.
donate_argnums: Specify which arguments are "donated" to the computation.
It is safe to donate arguments if you no longer need them once the
computation has finished. In some cases XLA can make use of donated
buffers to reduce the amount of memory needed to perform a computation,
for example recycling one of your input buffers to store a result. You
should not reuse buffers that you donate to a computation, JAX will raise
an error if you try to.
Returns:
A wrapped version of ``fun`` that when applied to example arguments returns
a built XLA Computation (see xla_client.py), from which representations of
the unoptimized XLA HLO computation can be extracted using methods like
``as_hlo_text``, ``as_serialized_hlo_module_proto``, and
``as_hlo_dot_graph``. If the argument ``return_shape`` is ``True``, then the
wrapped function returns a pair where the first element is the XLA
Computation and the second element is a pytree representing the structure,
shapes, dtypes, and named shapes of the output of ``fun``.
Concrete example arguments are not always necessary. For those arguments not
indicated by ``static_argnums``, any object with ``shape`` and ``dtype``
attributes is acceptable (excepting namedtuples, which are treated as Python
containers).
For example:
>>> import jax
>>>
>>> def f(x): return jax.numpy.sin(jax.numpy.cos(x))
>>> c = jax.xla_computation(f)(3.)
>>> print(c.as_hlo_text()) # doctest: +SKIP
HloModule xla_computation_f.6
<BLANKLINE>
ENTRY xla_computation_f.6 {
constant.2 = pred[] constant(false)
parameter.1 = f32[] parameter(0)
cosine.3 = f32[] cosine(parameter.1)
sine.4 = f32[] sine(cosine.3)
ROOT tuple.5 = (f32[]) tuple(sine.4)
}
<BLANKLINE>
<BLANKLINE>
Alternatively, the assignment to ``c`` above could be written:
>>> import types
>>> scalar = types.SimpleNamespace(shape=(), dtype=np.dtype(np.float32))
>>> c = jax.xla_computation(f)(scalar)
Here's an example that involves a parallel collective and axis name:
>>> def f(x): return x - jax.lax.psum(x, 'i')
>>> c = jax.xla_computation(f, axis_env=[('i', 4)])(2)
>>> print(c.as_hlo_text()) # doctest: +SKIP
HloModule jaxpr_computation.9
primitive_computation.3 {
parameter.4 = s32[] parameter(0)
parameter.5 = s32[] parameter(1)
ROOT add.6 = s32[] add(parameter.4, parameter.5)
}
ENTRY jaxpr_computation.9 {
tuple.1 = () tuple()
parameter.2 = s32[] parameter(0)
all-reduce.7 = s32[] all-reduce(parameter.2), replica_groups={{0,1,2,3}}, to_apply=primitive_computation.3
ROOT subtract.8 = s32[] subtract(parameter.2, all-reduce.7)
}
<BLANKLINE>
<BLANKLINE>
Notice the ``replica_groups`` that were generated. Here's an example that
generates more interesting ``replica_groups``:
>>> from jax import lax
>>> def g(x):
... rowsum = lax.psum(x, 'i')
... colsum = lax.psum(x, 'j')
... allsum = lax.psum(x, ('i', 'j'))
... return rowsum, colsum, allsum
...
>>> axis_env = [('i', 4), ('j', 2)]
>>> c = xla_computation(g, axis_env=axis_env)(5.)
>>> print(c.as_hlo_text()) # doctest: +SKIP
HloModule jaxpr_computation__1.19
[removed uninteresting text here]
ENTRY jaxpr_computation__1.19 {
tuple.1 = () tuple()
parameter.2 = f32[] parameter(0)
all-reduce.7 = f32[] all-reduce(parameter.2), replica_groups={{0,2,4,6},{1,3,5,7}}, to_apply=primitive_computation__1.3
all-reduce.12 = f32[] all-reduce(parameter.2), replica_groups={{0,1},{2,3},{4,5},{6,7}}, to_apply=primitive_computation__1.8
all-reduce.17 = f32[] all-reduce(parameter.2), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=primitive_computation__1.13
ROOT tuple.18 = (f32[], f32[], f32[]) tuple(all-reduce.7, all-reduce.12, all-reduce.17)
}
"""
del instantiate_const_outputs # Unused
_check_callable(fun)
static_argnums = _ensure_index_tuple(static_argnums)
donate_argnums = _ensure_index_tuple(donate_argnums)
donate_argnums = rebase_donate_argnums(donate_argnums, static_argnums)
fun_name = getattr(fun, "__name__", "unknown")
backend = backend if backend is not None else xb.get_backend().platform
def make_axis_env(nreps):
if axis_env is None:
return xla.AxisEnv(nreps, (), ())
else:
nreps = nreps * prod(size for name, size in axis_env)
names, sizes = unzip2(axis_env)
return xla.AxisEnv(nreps, names, sizes)
@wraps(fun)
@api_boundary
def computation_maker(*args, **kwargs):
if max(static_argnums + donate_argnums, default=-1) >= len(args):
raise ValueError(f"jitted function has static_argnums={static_argnums},"
f" donate_argnums={donate_argnums} but "
f"was called with only {len(args)} positional arguments.")
f = lu.wrap_init(fun)
if static_argnums:
f, dyn_args = argnums_partial_except(f, static_argnums, args, allow_invalid=False)
else:
dyn_args = args
args_flat, in_tree = tree_flatten((dyn_args, kwargs))
if donate_argnums:
donated_invars = donation_vector(donate_argnums, dyn_args, kwargs)
else:
donated_invars = (False,) * len(args_flat)
if in_parts is None:
in_parts_flat = None
else:
in_parts_flat = tuple(flatten_axes(
"xla_computation in_parts", in_tree.children()[0], in_parts))
jaxtree_fun, out_tree = flatten_fun(f, in_tree)
avals = map(shaped_abstractify, args_flat)
with ExitStack() as stack:
for axis_name, size in axis_env or []:
stack.enter_context(core.extend_axis_env(axis_name, size, None))
jaxpr, out_avals, consts = pe.trace_to_jaxpr_dynamic(jaxtree_fun, avals)
jaxpr = dispatch.apply_outfeed_rewriter(jaxpr)
axis_env_ = make_axis_env(dispatch.jaxpr_replicas(jaxpr))
if out_parts is None:
out_parts_flat = None
else:
out_parts_flat = tuple(flatten_axes(
"xla_computation out_parts", out_tree(), out_parts))
c = xc.XlaBuilder(f"xla_computation_{fun_name}")
xla_consts = map(partial(xla.pyval_to_ir_constant, c), consts)
should_tuple = tuple_args if tuple_args is not None else (len(avals) > 100)
xla_args, donated_invars = xla._xla_callable_args(
c, avals, should_tuple, partitions=in_parts_flat, donated_invars=donated_invars)
ctx = xla.TranslationContext(
c, backend, axis_env_,
extend_name_stack(wrap_name(fun_name, "xla_computation")))
out_nodes = xla.jaxpr_subcomp(ctx, jaxpr, xla_consts, *xla_args)
build_out_tuple = partial(xc.ops.Tuple, c, out_nodes)
if out_parts is not None:
out_tuple = xla.with_sharding(c, out_parts_flat, build_out_tuple)
else:
out_tuple = build_out_tuple()
if any(donated_invars):
donated_invars = xla.set_up_aliases(c, xla_args, c.GetShape(out_tuple),
donated_invars, tuple_args)
if any(donated_invars):
shapes = [str(c.GetShape(a)) for a, d in zip(xla_args, donated_invars) if d]
warn(f"Some donated buffers were not usable: {', '.join(shapes)}")
built = c.build(out_tuple)
out_shapes_flat = [
ShapeDtypeStruct(a.shape, a.dtype, a.named_shape) for a in out_avals]
out_shape = tree_unflatten(out_tree(), out_shapes_flat)
for out_aval in out_avals:
if not isinstance(out_aval, xla.ShapedArray):
raise RuntimeError("As we want to propagate the weak_type, we need "
"to get a ShapedArray, otherwise this "
"information is lost")
if return_shape:
return built, out_shape
else:
return built
return computation_maker
def grad(fun: Callable, argnums: Union[int, Sequence[int]] = 0,
has_aux: bool = False, holomorphic: bool = False,
allow_int: bool = False,
reduce_axes: Sequence[AxisName] = ()) -> Callable:
"""Creates a function that evaluates the gradient of ``fun``.
Args:
fun: Function to be differentiated. Its arguments at positions specified by
``argnums`` should be arrays, scalars, or standard Python containers.
Argument arrays in the positions specified by ``argnums`` must be of
inexact (i.e., floating-point or complex) type. It
should return a scalar (which includes arrays with shape ``()`` but not
arrays with shape ``(1,)`` etc.)
argnums: Optional, integer or sequence of integers. Specifies which
positional argument(s) to differentiate with respect to (default 0).
has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where the
first element is considered the output of the mathematical function to be
differentiated and the second element is auxiliary data. Default False.
holomorphic: Optional, bool. Indicates whether ``fun`` is promised to be
holomorphic. If True, inputs and outputs must be complex. Default False.
allow_int: Optional, bool. Whether to allow differentiating with
respect to integer valued inputs. The gradient of an integer input will
have a trivial vector-space dtype (float0). Default False.
reduce_axes: Optional, tuple of axis names. If an axis is listed here, and
``fun`` implicitly broadcasts a value over that axis, the backward pass
will perform a ``psum`` of the corresponding gradient. Otherwise, the
gradient will be per-example over named axes. For example, if ``'batch'``
is a named batch axis, ``grad(f, reduce_axes=('batch',))`` will create a
function that computes the total gradient while ``grad(f)`` will create
one that computes the per-example gradient.
Returns:
A function with the same arguments as ``fun``, that evaluates the gradient
of ``fun``. If ``argnums`` is an integer then the gradient has the same
shape and type as the positional argument indicated by that integer. If
argnums is a tuple of integers, the gradient is a tuple of values with the
same shapes and types as the corresponding arguments. If ``has_aux`` is True
then a pair of (gradient, auxiliary_data) is returned.
For example:
>>> import jax
>>>
>>> grad_tanh = jax.grad(jax.numpy.tanh)
>>> print(grad_tanh(0.2))
0.961043
"""
value_and_grad_f = value_and_grad(fun, argnums, has_aux=has_aux,
holomorphic=holomorphic,
allow_int=allow_int,
reduce_axes=reduce_axes)
docstr = ("Gradient of {fun} with respect to positional argument(s) "
"{argnums}. Takes the same arguments as {fun} but returns the "
"gradient, which has the same shape as the arguments at "
"positions {argnums}.")
@wraps(fun, docstr=docstr, argnums=argnums)
@api_boundary
def grad_f(*args, **kwargs):
_, g = value_and_grad_f(*args, **kwargs)
return g
@wraps(fun, docstr=docstr, argnums=argnums)
@api_boundary
def grad_f_aux(*args, **kwargs):
(_, aux), g = value_and_grad_f(*args, **kwargs)
return g, aux
return grad_f_aux if has_aux else grad_f
def value_and_grad(fun: Callable, argnums: Union[int, Sequence[int]] = 0,
has_aux: bool = False, holomorphic: bool = False,
allow_int: bool = False, reduce_axes: Sequence[AxisName] = ()
) -> Callable[..., Tuple[Any, Any]]:
"""Create a function that evaluates both ``fun`` and the gradient of ``fun``.
Args:
fun: Function to be differentiated. Its arguments at positions specified by
``argnums`` should be arrays, scalars, or standard Python containers. It
should return a scalar (which includes arrays with shape ``()`` but not
arrays with shape ``(1,)`` etc.)
argnums: Optional, integer or sequence of integers. Specifies which
positional argument(s) to differentiate with respect to (default 0).
has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where the
first element is considered the output of the mathematical function to be
differentiated and the second element is auxiliary data. Default False.
holomorphic: Optional, bool. Indicates whether ``fun`` is promised to be
holomorphic. If True, inputs and outputs must be complex. Default False.
allow_int: Optional, bool. Whether to allow differentiating with
respect to integer valued inputs. The gradient of an integer input will
have a trivial vector-space dtype (float0). Default False.
reduce_axes: Optional, tuple of axis names. If an axis is listed here, and
``fun`` implicitly broadcasts a value over that axis, the backward pass
will perform a ``psum`` of the corresponding gradient. Otherwise, the
gradient will be per-example over named axes. For example, if ``'batch'``
is a named batch axis, ``value_and_grad(f, reduce_axes=('batch',))`` will
create a function that computes the total gradient while
``value_and_grad(f)`` will create one that computes the per-example
gradient.
Returns:
A function with the same arguments as ``fun`` that evaluates both ``fun``
and the gradient of ``fun`` and returns them as a pair (a two-element
tuple). If ``argnums`` is an integer then the gradient has the same shape
and type as the positional argument indicated by that integer. If argnums is
a sequence of integers, the gradient is a tuple of values with the same
shapes and types as the corresponding arguments. If ``has_aux`` is True
then a tuple of ((value, auxiliary_data), gradient) is returned.
"""
docstr = ("Value and gradient of {fun} with respect to positional "
"argument(s) {argnums}. Takes the same arguments as {fun} but "
"returns a two-element tuple where the first element is the value "
"of {fun} and the second element is the gradient, which has the "
"same shape as the arguments at positions {argnums}.")
_check_callable(fun)
argnums = core.concrete_or_error(_ensure_index, argnums)
reduce_axes = _ensure_str_tuple(reduce_axes)
@wraps(fun, docstr=docstr, argnums=argnums)
@api_boundary
def value_and_grad_f(*args, **kwargs):
max_argnum = argnums if isinstance(argnums, int) else max(argnums)
if max_argnum >= len(args):
raise TypeError(f"differentiating with respect to argnums={argnums} requires at least "
f"{max_argnum + 1} positional arguments to be passed by the caller, "
f"but got only {len(args)} positional arguments.")
f = lu.wrap_init(fun, kwargs)
f_partial, dyn_args = argnums_partial(f, argnums, args,
require_static_args_hashable=False)
for leaf in tree_leaves(dyn_args):
_check_input_dtype_grad(holomorphic, allow_int, leaf)
if not has_aux:
ans, vjp_py = _vjp(f_partial, *dyn_args, reduce_axes=reduce_axes)
else:
ans, vjp_py, aux = _vjp(
f_partial, *dyn_args, has_aux=True, reduce_axes=reduce_axes)
_check_scalar(ans)
tree_map(partial(_check_output_dtype_grad, holomorphic), ans)
g = vjp_py(jax.lax._one(ans))
g = g[0] if isinstance(argnums, int) else g
if not has_aux:
return ans, g
else:
return (ans, aux), g
return value_and_grad_f
def _check_scalar(x):
msg = "Gradient only defined for scalar-output functions. Output {}.".format
try:
aval = core.get_aval(x)
except TypeError as e:
raise TypeError(msg(f"was {x}")) from e
else:
if isinstance(aval, ShapedArray):
if aval.shape != ():
raise TypeError(msg(f"had shape: {aval.shape}"))
else:
raise TypeError(msg(f"had abstract value {aval}"))
def _check_input_dtype_revderiv(name, holomorphic, allow_int, x):
_check_arg(x)
aval = core.get_aval(x)
if holomorphic:
if not dtypes.issubdtype(aval.dtype, np.complexfloating):
raise TypeError(f"{name} with holomorphic=True requires inputs with complex dtype, "
f"but got {aval.dtype.name}.")
if (dtypes.issubdtype(aval.dtype, np.integer) or
dtypes.issubdtype(aval.dtype, np.bool_)):
if not allow_int:
raise TypeError(f"{name} requires real- or complex-valued inputs (input dtype "
f"that is a sub-dtype of np.inexact), but got {aval.dtype.name}. "
"If you want to use Boolean- or integer-valued inputs, use vjp "
"or set allow_int to True.")
elif not dtypes.issubdtype(aval.dtype, np.inexact):
raise TypeError(f"{name} requires numerical-valued inputs (input dtype that is a "
f"sub-dtype of np.bool_ or np.number), but got {aval.dtype.name}.")
_check_input_dtype_grad = partial(_check_input_dtype_revderiv, "grad")
def _check_output_dtype_revderiv(name, holomorphic, x):
aval = core.get_aval(x)
if holomorphic:
if not dtypes.issubdtype(aval.dtype, np.complexfloating):
raise TypeError(f"{name} with holomorphic=True requires outputs with complex dtype, "
f"but got {aval.dtype.name}.")
elif dtypes.issubdtype(aval.dtype, np.complexfloating):
raise TypeError(f"{name} requires real-valued outputs (output dtype that is "
f"a sub-dtype of np.floating), but got {aval.dtype.name}. "
"For holomorphic differentiation, pass holomorphic=True. "
"For differentiation of non-holomorphic functions involving complex "
"outputs, use jax.vjp directly.")
elif not dtypes.issubdtype(aval.dtype, np.floating):
raise TypeError(f"{name} requires real-valued outputs (output dtype that is "
f"a sub-dtype of np.floating), but got {aval.dtype.name}. "
"For differentiation of functions with integer outputs, use "
"jax.vjp directly.")
_check_output_dtype_grad = partial(_check_output_dtype_revderiv, "grad")
def jacfwd(fun: Callable, argnums: Union[int, Sequence[int]] = 0,
has_aux: bool = False, holomorphic: bool = False) -> Callable:
"""Jacobian of ``fun`` evaluated column-by-column using forward-mode AD.
Args:
fun: Function whose Jacobian is to be computed.
argnums: Optional, integer or sequence of integers. Specifies which
positional argument(s) to differentiate with respect to (default ``0``).
has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where the
first element is considered the output of the mathematical function to be
differentiated and the second element is auxiliary data. Default False.
holomorphic: Optional, bool. Indicates whether ``fun`` is promised to be
holomorphic. Default False.
Returns:
A function with the same arguments as ``fun``, that evaluates the Jacobian of
``fun`` using forward-mode automatic differentiation. If ``has_aux`` is True
then a pair of (jacobian, auxiliary_data) is returned.
>>> import jax
>>> import jax.numpy as jnp
>>>
>>> def f(x):
... return jnp.asarray(
... [x[0], 5*x[2], 4*x[1]**2 - 2*x[2], x[2] * jnp.sin(x[0])])
...
>>> print(jax.jacfwd(f)(jnp.array([1., 2., 3.])))
[[ 1. 0. 0. ]
[ 0. 0. 5. ]
[ 0. 16. -2. ]
[ 1.6209 0. 0.84147]]
"""
_check_callable(fun)
argnums = _ensure_index(argnums)
def jacfun(*args, **kwargs):
f = lu.wrap_init(fun, kwargs)
f_partial, dyn_args = argnums_partial(f, argnums, args,
require_static_args_hashable=False)
tree_map(partial(_check_input_dtype_jacfwd, holomorphic), dyn_args)
if not has_aux:
pushfwd = partial(_jvp, f_partial, dyn_args)
y, jac = vmap(pushfwd, out_axes=(None, -1))(_std_basis(dyn_args))
else:
pushfwd = partial(_jvp, f_partial, dyn_args, has_aux=True)
y, jac, aux = vmap(pushfwd, out_axes=(None, -1, None))(_std_basis(dyn_args))
tree_map(partial(_check_output_dtype_jacfwd, holomorphic), y)
example_args = dyn_args[0] if isinstance(argnums, int) else dyn_args
jac_tree = tree_map(partial(_jacfwd_unravel, example_args), y, jac)
if not has_aux:
return jac_tree
else:
return jac_tree, aux
return jacfun
def _check_input_dtype_jacfwd(holomorphic: bool, x: Any) -> None:
_check_arg(x)
aval = core.get_aval(x)
if holomorphic:
if not dtypes.issubdtype(aval.dtype, np.complexfloating):
raise TypeError("jacfwd with holomorphic=True requires inputs with complex "
f"dtype, but got {aval.dtype.name}.")
elif not dtypes.issubdtype(aval.dtype, np.floating):
raise TypeError("jacfwd requires real-valued inputs (input dtype that is "
f"a sub-dtype of np.floating), but got {aval.dtype.name}. "
"For holomorphic differentiation, pass holomorphic=True. "
"For differentiation of non-holomorphic functions involving "
"complex inputs or integer inputs, use jax.jvp directly.")
def _check_output_dtype_jacfwd(holomorphic, x):
aval = core.get_aval(x)
if holomorphic:
if not dtypes.issubdtype(aval.dtype, np.complexfloating):
raise TypeError("jacfwd with holomorphic=True requires outputs with complex dtype, "
f"but got {aval.dtype.name}.")
def jacrev(fun: Callable, argnums: Union[int, Sequence[int]] = 0,
has_aux: bool = False, holomorphic: bool = False, allow_int: bool = False) -> Callable:
"""Jacobian of ``fun`` evaluated row-by-row using reverse-mode AD.
Args:
fun: Function whose Jacobian is to be computed.
argnums: Optional, integer or sequence of integers. Specifies which
positional argument(s) to differentiate with respect to (default ``0``).
has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where the
first element is considered the output of the mathematical function to be
differentiated and the second element is auxiliary data. Default False.
holomorphic: Optional, bool. Indicates whether ``fun`` is promised to be
holomorphic. Default False.
allow_int: Optional, bool. Whether to allow differentiating with
respect to integer valued inputs. The gradient of an integer input will
have a trivial vector-space dtype (float0). Default False.
Returns:
A function with the same arguments as ``fun``, that evaluates the Jacobian of
``fun`` using reverse-mode automatic differentiation. If ``has_aux`` is True
then a pair of (jacobian, auxiliary_data) is returned.
>>> import jax
>>> import jax.numpy as jnp
>>>
>>> def f(x):
... return jnp.asarray(
... [x[0], 5*x[2], 4*x[1]**2 - 2*x[2], x[2] * jnp.sin(x[0])])
...
>>> print(jax.jacrev(f)(jnp.array([1., 2., 3.])))
[[ 1. 0. 0. ]
[ 0. 0. 5. ]
[ 0. 16. -2. ]
[ 1.6209 0. 0.84147]]
"""
_check_callable(fun)
def jacfun(*args, **kwargs):
f = lu.wrap_init(fun, kwargs)
f_partial, dyn_args = argnums_partial(f, argnums, args,
require_static_args_hashable=False)
tree_map(partial(_check_input_dtype_jacrev, holomorphic, allow_int), dyn_args)
if not has_aux:
y, pullback = _vjp(f_partial, *dyn_args)
else:
y, pullback, aux = _vjp(f_partial, *dyn_args, has_aux=True)
tree_map(partial(_check_output_dtype_jacrev, holomorphic), y)
jac = vmap(pullback)(_std_basis(y))
jac = jac[0] if isinstance(argnums, int) else jac
example_args = dyn_args[0] if isinstance(argnums, int) else dyn_args
jac_tree = tree_map(partial(_jacrev_unravel, y), example_args, jac)
jac_tree = tree_transpose(tree_structure(example_args), tree_structure(y), jac_tree)
if not has_aux:
return jac_tree
else:
return jac_tree, aux
return jacfun
jacobian = jacrev
_check_input_dtype_jacrev = partial(_check_input_dtype_revderiv, "jacrev")
_check_output_dtype_jacrev = partial(_check_output_dtype_revderiv, "jacrev")
def hessian(fun: Callable, argnums: Union[int, Sequence[int]] = 0,
holomorphic: bool = False) -> Callable:
"""Hessian of ``fun`` as a dense array.
Args:
fun: Function whose Hessian is to be computed. Its arguments at positions
specified by ``argnums`` should be arrays, scalars, or standard Python
containers thereof. It should return arrays, scalars, or standard Python
containers thereof.
argnums: Optional, integer or sequence of integers. Specifies which
positional argument(s) to differentiate with respect to (default ``0``).
holomorphic: Optional, bool. Indicates whether ``fun`` is promised to be
holomorphic. Default False.
Returns:
A function with the same arguments as ``fun``, that evaluates the Hessian of
``fun``.
>>> import jax
>>>
>>> g = lambda x: x[0]**3 - 2*x[0]*x[1] - x[1]**6
>>> print(jax.hessian(g)(jax.numpy.array([1., 2.])))
[[ 6. -2.]
[ -2. -480.]]
:py:func:`hessian` is a generalization of the usual definition of the Hessian
that supports nested Python containers (i.e. pytrees) as inputs and outputs.
The tree structure of ``jax.hessian(fun)(x)`` is given by forming a tree
product of the structure of ``fun(x)`` with a tree product of two copies of
the structure of ``x``. A tree product of two tree structures is formed by
replacing each leaf of the first tree with a copy of the second. For example:
>>> import jax.numpy as jnp
>>> f = lambda dct: {"c": jnp.power(dct["a"], dct["b"])}
>>> print(jax.hessian(f)({"a": jnp.arange(2.) + 1., "b": jnp.arange(2.) + 2.}))
{'c': {'a': {'a': DeviceArray([[[ 2., 0.], [ 0., 0.]],
[[ 0., 0.], [ 0., 12.]]], dtype=float32),
'b': DeviceArray([[[ 1. , 0. ], [ 0. , 0. ]],
[[ 0. , 0. ], [ 0. , 12.317766]]], dtype=float32)},
'b': {'a': DeviceArray([[[ 1. , 0. ], [ 0. , 0. ]],
[[ 0. , 0. ], [ 0. , 12.317766]]], dtype=float32),
'b': DeviceArray([[[0. , 0. ], [0. , 0. ]],
[[0. , 0. ], [0. , 3.843624]]], dtype=float32)}}}
Thus each leaf in the tree structure of ``jax.hessian(fun)(x)`` corresponds to
a leaf of ``fun(x)`` and a pair of leaves of ``x``. For each leaf in
``jax.hessian(fun)(x)``, if the corresponding array leaf of ``fun(x)`` has
shape ``(out_1, out_2, ...)`` and the corresponding array leaves of ``x`` have
shape ``(in_1_1, in_1_2, ...)`` and ``(in_2_1, in_2_2, ...)`` respectively,
then the Hessian leaf has shape ``(out_1, out_2, ..., in_1_1, in_1_2, ...,
in_2_1, in_2_2, ...)``. In other words, the Python tree structure represents
the block structure of the Hessian, with blocks determined by the input and
output pytrees.
In particular, an array is produced (with no pytrees involved) when the
function input ``x`` and output ``fun(x)`` are each a single array, as in the
``g`` example above. If ``fun(x)`` has shape ``(out1, out2, ...)`` and ``x``
has shape ``(in1, in2, ...)`` then ``jax.hessian(fun)(x)`` has shape
``(out1, out2, ..., in1, in2, ..., in1, in2, ...)``. To flatten pytrees into
1D vectors, consider using :py:func:`jax.flatten_util.flatten_pytree`.
"""
return jacfwd(jacrev(fun, argnums, holomorphic), argnums, holomorphic)
def _std_basis(pytree):
leaves, _ = tree_flatten(pytree)
ndim = sum(map(np.size, leaves))
dtype = dtypes.result_type(*leaves)
flat_basis = jax.numpy.eye(ndim, dtype=dtype)
return _unravel_array_into_pytree(pytree, 1, None, flat_basis)
def _jacfwd_unravel(input_pytree, output_pytree_leaf, arr):
return _unravel_array_into_pytree(
input_pytree, -1, output_pytree_leaf, arr)
def _jacrev_unravel(output_pytree, input_pytree_leaf, arr):
return _unravel_array_into_pytree(
output_pytree, 0, input_pytree_leaf, arr)
def _possible_downcast(x, example):
if (dtypes.issubdtype(x.dtype, np.complexfloating) and
not dtypes.issubdtype(_dtype(example), np.complexfloating)):
x = x.real
dtype = None if example is None else _dtype(example)
weak_type = None if example is None else dtypes.is_weakly_typed(example)
return jax._src.lax.lax._convert_element_type(x, dtype, weak_type)
def _unravel_array_into_pytree(pytree, axis, example, arr):
"""Unravel an array into a PyTree with a given structure.
Args:
pytree: The pytree that provides the structure.
axis: The parameter axis is either -1, 0, or 1. It controls the
resulting shapes.
example: If specified, cast the components to the matching dtype/weak_type,
or else use the pytree leaf type if example is None.
arr: The array to be unraveled.
"""
leaves, treedef = tree_flatten(pytree)
axis = axis % arr.ndim
shapes = [arr.shape[:axis] + np.shape(l) + arr.shape[axis+1:] for l in leaves]
parts = _split(arr, np.cumsum(map(np.size, leaves[:-1])), axis)
reshaped_parts = [
_possible_downcast(np.reshape(x, shape), leaf if example is None else example)
for x, shape, leaf in zip(parts, shapes, leaves)]
return tree_unflatten(treedef, reshaped_parts)
def _split(x, indices, axis):
if isinstance(x, np.ndarray):
return np.split(x, indices, axis)
else:
return x.split(indices, axis)
def vmap(fun: F, in_axes=0, out_axes=0, axis_name=None, axis_size=None) -> F:
"""Vectorizing map. Creates a function which maps ``fun`` over argument axes.
Args:
fun: Function to be mapped over additional axes.
in_axes: An integer, None, or (nested) standard Python container
(tuple/list/dict) thereof specifying which input array axes to map over.
If each positional argument to ``fun`` is an array, then ``in_axes`` can
be an integer, a None, or a tuple of integers and Nones with length equal
to the number of positional arguments to ``fun``. An integer or ``None``
indicates which array axis to map over for all arguments (with ``None``
indicating not to map any axis), and a tuple indicates which axis to map
for each corresponding positional argument. Axis integers must be in the
range ``[-ndim, ndim)`` for each array, where ``ndim`` is the number of
dimensions (axes) of the corresponding input array.
If the positional arguments to ``fun`` are container types, the
corresponding element of ``in_axes`` can itself be a matching container,
so that distinct array axes can be mapped for different container
elements. ``in_axes`` must be a container tree prefix of the positional
argument tuple passed to ``fun``.
Either ``axis_size`` must be provided explicitly, or at least one
positional argument must have ``in_axes`` not None. The sizes of the
mapped input axes for all mapped positional arguments must all be equal.
Arguments passed as keywords are always mapped over their leading axis
(i.e. axis index 0).
See below for examples.
out_axes: An integer, None, or (nested) standard Python container
(tuple/list/dict) thereof indicating where the mapped axis should appear
in the output. All outputs with a mapped axis must have a non-None
``out_axes`` specification. Axis integers must be in the range ``[-ndim,
ndim)`` for each output array, where ``ndim`` is the number of dimensions
(axes) of the array returned by the :func:`vmap`-ed function, which is one
more than the number of dimensions (axes) of the corresponding array
returned by ``fun``.
axis_name: Optional, a hashable Python object used to identify the mapped
axis so that parallel collectives can be applied.
axis_size: Optional, an integer indicating the size of the axis to be
mapped. If not provided, the mapped axis size is inferred from arguments.
Returns:
Batched/vectorized version of ``fun`` with arguments that correspond to
those of ``fun``, but with extra array axes at positions indicated by
``in_axes``, and a return value that corresponds to that of ``fun``, but
with extra array axes at positions indicated by ``out_axes``.
For example, we can implement a matrix-matrix product using a vector dot
product:
>>> import jax.numpy as jnp
>>>
>>> vv = lambda x, y: jnp.vdot(x, y) # ([a], [a]) -> []
>>> mv = vmap(vv, (0, None), 0) # ([b,a], [a]) -> [b] (b is the mapped axis)
>>> mm = vmap(mv, (None, 1), 1) # ([b,a], [a,c]) -> [b,c] (c is the mapped axis)
Here we use ``[a,b]`` to indicate an array with shape (a,b). Here are some
variants:
>>> mv1 = vmap(vv, (0, 0), 0) # ([b,a], [b,a]) -> [b] (b is the mapped axis)
>>> mv2 = vmap(vv, (0, 1), 0) # ([b,a], [a,b]) -> [b] (b is the mapped axis)
>>> mm2 = vmap(mv2, (1, 1), 0) # ([b,c,a], [a,c,b]) -> [c,b] (c is the mapped axis)
Here's an example of using container types in ``in_axes`` to specify which
axes of the container elements to map over:
>>> A, B, C, D = 2, 3, 4, 5
>>> x = jnp.ones((A, B))
>>> y = jnp.ones((B, C))
>>> z = jnp.ones((C, D))
>>> def foo(tree_arg):
... x, (y, z) = tree_arg
... return jnp.dot(x, jnp.dot(y, z))
>>> tree = (x, (y, z))
>>> print(foo(tree))
[[12. 12. 12. 12. 12.]
[12. 12. 12. 12. 12.]]
>>> from jax import vmap
>>> K = 6 # batch size
>>> x = jnp.ones((K, A, B)) # batch axis in different locations
>>> y = jnp.ones((B, K, C))
>>> z = jnp.ones((C, D, K))
>>> tree = (x, (y, z))
>>> vfoo = vmap(foo, in_axes=((0, (1, 2)),))
>>> print(vfoo(tree).shape)
(6, 2, 5)
Here's another example using container types in ``in_axes``, this time a
dictionary, to specify the elements of the container to map over:
>>> dct = {'a': 0., 'b': jnp.arange(5.)}
>>> x = 1.
>>> def foo(dct, x):
... return dct['a'] + dct['b'] + x
>>> out = vmap(foo, in_axes=({'a': None, 'b': 0}, None))(dct, x)
>>> print(out)
[1. 2. 3. 4. 5.]
The results of a vectorized function can be mapped or unmapped. For example,
the function below returns a pair with the first element mapped and the second
unmapped. Only for unmapped results we can specify ``out_axes`` to be ``None``
(to keep it unmapped).
>>> print(vmap(lambda x, y: (x + y, y * 2.), in_axes=(0, None), out_axes=(0, None))(jnp.arange(2.), 4.))
(DeviceArray([4., 5.], dtype=float32), 8.0)
If the ``out_axes`` is specified for an unmapped result, the result is
broadcast across the mapped axis:
>>> print(vmap(lambda x, y: (x + y, y * 2.), in_axes=(0, None), out_axes=0)(jnp.arange(2.), 4.))
(DeviceArray([4., 5.], dtype=float32), DeviceArray([8., 8.], dtype=float32, weak_type=True))
If the ``out_axes`` is specified for a mapped result, the result is transposed
accordingly.
Finally, here's an example using ``axis_name`` together with collectives:
>>> xs = jnp.arange(3. * 4.).reshape(3, 4)
>>> print(vmap(lambda x: lax.psum(x, 'i'), axis_name='i')(xs))
[[12. 15. 18. 21.]
[12. 15. 18. 21.]
[12. 15. 18. 21.]]
See the :py:func:`jax.pmap` docstring for more examples involving collectives.
"""
_check_callable(fun)
docstr = ("Vectorized version of {fun}. Takes similar arguments as {fun} "
"but with additional array axes over which {fun} is mapped.")
if fun.__doc__:
docstr += "\n\nOriginal documentation:\n\n"
docstr += fun.__doc__
axis_name = core.no_axis_name if axis_name is None else axis_name
if isinstance(in_axes, list):
# To be a tree prefix of the positional args tuple, in_axes can never be a
# list: if in_axes is not a leaf, it must be a tuple of trees. However,
# in cases like these users expect tuples and lists to be treated
# essentially interchangeably, so we canonicalize lists to tuples here
# rather than raising an error. https://github.com/google/jax/issues/2367
in_axes = tuple(in_axes)
if not all(type(l) is int or type(l) in batching.spec_types
for l in tree_leaves(in_axes)):
raise TypeError("vmap in_axes must be an int, None, or (nested) container "
f"with those types as leaves, but got {in_axes}.")
if not all(type(l) is int or type(l) in batching.spec_types
for l in tree_leaves(out_axes)):
raise TypeError("vmap out_axes must be an int, None, or (nested) container "
f"with those types as leaves, but got {out_axes}.")
@wraps(fun, docstr=docstr)
@api_boundary
def batched_fun(*args, **kwargs):
args_flat, in_tree = tree_flatten((args, kwargs), is_leaf=batching.is_vmappable)
f = lu.wrap_init(fun)
flat_fun, out_tree = batching.flatten_fun_for_vmap(f, in_tree)
in_axes_flat = flatten_axes("vmap in_axes", in_tree, (in_axes, 0), kws=True)
axis_size_ = (axis_size if axis_size is not None else
_mapped_axis_size(in_tree, args_flat, in_axes_flat, "vmap",
kws=True))
out_flat = batching.batch(
flat_fun, axis_name, axis_size_, in_axes_flat,
lambda: flatten_axes("vmap out_axes", out_tree(), out_axes)
).call_wrapped(*args_flat)
return tree_unflatten(out_tree(), out_flat)
return batched_fun
def _mapped_axis_size(tree, vals, dims, name, *, kws=False):
if not vals:
args, kwargs = tree_unflatten(tree, vals)
raise ValueError(
f"{name} wrapped function must be passed at least one argument "
f"containing an array, got empty *args={args} and **kwargs={kwargs}"
)
def _get_axis_size(name: str, shape: Tuple[int, ...], axis: int):
try:
return shape[axis]
except (IndexError, TypeError) as e:
min_rank = axis + 1 if axis >= 0 else -axis
raise ValueError(f"{name} was requested to map its argument along axis {axis}, "
f"which implies that its rank should be at least {min_rank}, "
f"but is only {len(shape)} (its shape is {shape})") from e
mapped_axis_sizes = {_get_axis_size(name, np.shape(x), d)
for x, d in zip(vals, dims)
if d is not None}
try:
size, = mapped_axis_sizes
return size
except ValueError as e:
if not mapped_axis_sizes:
raise ValueError(f"{name} must have at least one non-None value in in_axes") from e
msg = f"{name} got inconsistent sizes for array axes to be mapped:\n" + "{}"
# we switch the error message based on whether args is a tuple of arrays,
# in which case we can produce an error message based on argument indices,
# or if it has nested containers.
if kws:
# if keyword arguments are included in the tree, we make adapt the error
# message only to be about the positional arguments
tree, leaf = treedef_children(tree)
assert treedef_is_leaf(leaf)
# TODO(mattjj,phawkins): add a way to inspect pytree kind more directly
if tree == tree_flatten((core.unit,) * tree.num_leaves)[1]:
lines1 = [f"arg {i} has shape {np.shape(x)} and axis {d} is to be mapped"
for i, (x, d) in enumerate(zip(vals, dims))]
sizes = collections.defaultdict(list)
for i, (x, d) in enumerate(zip(vals, dims)):
if d is not None:
sizes[x.shape[d]].append(i)
lines2 = ["{} {} {} {} to be mapped of size {}".format(
"args" if len(idxs) > 1 else "arg",
", ".join(map(str, idxs)),
"have" if len(idxs) > 1 else "has",
"axes" if len(idxs) > 1 else "an axis",
size)
for size, idxs in sizes.items()]
raise ValueError(msg.format("\n".join(lines1 + ["so"] + lines2))) from None
else:
sizes = [x.shape[d] if d is not None else None for x, d in zip(vals, dims)]
sizes = tree_unflatten(tree, sizes)
raise ValueError(msg.format(f"the tree of axis sizes is:\n{sizes}")) from None
def pmap(
fun: F,
axis_name: Optional[AxisName] = None,
*,
in_axes=0,
out_axes=0,
static_broadcasted_argnums: Union[int, Iterable[int]] = (),
devices: Optional[Sequence[xc.Device]] = None,
backend: Optional[str] = None,
axis_size: Optional[int] = None,
donate_argnums: Union[int, Iterable[int]] = (),
global_arg_shapes: Optional[Tuple[Tuple[int, ...], ...]] = None,
) -> F:
"""Parallel map with support for collective operations.
The purpose of :py:func:`pmap` is to express single-program multiple-data
(SPMD) programs. Applying :py:func:`pmap` to a function will compile the
function with XLA (similarly to :py:func:`jit`), then execute it in parallel
on XLA devices, such as multiple GPUs or multiple TPU cores. Semantically it
is comparable to :py:func:`vmap` because both transformations map a function
over array axes, but where :py:func:`vmap` vectorizes functions by pushing the
mapped axis down into primitive operations, :py:func:`pmap` instead replicates
the function and executes each replica on its own XLA device in parallel.
The mapped axis size must be less than or equal to the number of local XLA
devices available, as returned by :py:func:`jax.local_device_count()` (unless
``devices`` is specified, see below). For nested :py:func:`pmap` calls, the
product of the mapped axis sizes must be less than or equal to the number of
XLA devices.
.. note::
:py:func:`pmap` compiles ``fun``, so while it can be combined with
:py:func:`jit`, it's usually unnecessary.
**Multi-process platforms:** On multi-process platforms such as TPU pods,
:py:func:`pmap` is designed to be used in SPMD Python programs, where every
process is running the same Python code such that all processes run the same
pmapped function in the same order. Each process should still call the pmapped
function with mapped axis size equal to the number of *local* devices (unless
``devices`` is specified, see below), and an array of the same leading axis
size will be returned as usual. However, any collective operations in ``fun``
will be computed over *all* participating devices, including those on other
processes, via device-to-device communication. Conceptually, this can be
thought of as running a pmap over a single array sharded across processes,
where each process "sees" only its local shard of the input and output. The
SPMD model requires that the same multi-process pmaps must be run in the same
order on all devices, but they can be interspersed with arbitrary operations
running in a single process.
Args:
fun: Function to be mapped over argument axes. Its arguments and return
value should be arrays, scalars, or (nested) standard Python containers
(tuple/list/dict) thereof. Positional arguments indicated by
``static_broadcasted_argnums`` can be anything at all, provided they are
hashable and have an equality operation defined.
axis_name: Optional, a hashable Python object used to identify the mapped
axis so that parallel collectives can be applied.
in_axes: A non-negative integer, None, or nested Python container thereof
that specifies which axes of positional arguments to map over. Arguments
passed as keywords are always mapped over their leading axis (i.e. axis
index 0). See :py:func:`vmap` for details.
out_axes: A non-negative integer, None, or nested Python container thereof
indicating where the mapped axis should appear in the output. All outputs
with a mapped axis must have a non-None ``out_axes`` specification
(see :py:func:`vmap`).
static_broadcasted_argnums: An int or collection of ints specifying which
positional arguments to treat as static (compile-time constant).
Operations that only depend on static arguments will be constant-folded.
Calling the pmapped function with different values for these constants
will trigger recompilation. If the pmapped function is called with fewer
positional arguments than indicated by ``static_argnums`` then an error is
raised. Each of the static arguments will be broadcasted to all devices.
Arguments that are not arrays or containers thereof must be marked as
static. Defaults to ().
Static arguments must be hashable, meaning both ``__hash__`` and
``__eq__`` are implemented, and should be immutable.
devices: This is an experimental feature and the API is likely to change.
Optional, a sequence of Devices to map over. (Available devices can be
retrieved via jax.devices()). Must be given identically for each process
in multi-process settings (and will therefore include devices across
processes). If specified, the size of the mapped axis must be equal to
the number of devices in the sequence local to the given process. Nested
:py:func:`pmap` s with ``devices`` specified in either the inner or outer
:py:func:`pmap` are not yet supported.
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the XLA backend. 'cpu', 'gpu', or 'tpu'.
axis_size: Optional; the size of the mapped axis.
donate_argnums: Specify which arguments are "donated" to the computation.
It is safe to donate arguments if you no longer need them once the
computation has finished. In some cases XLA can make use of donated
buffers to reduce the amount of memory needed to perform a computation,
for example recycling one of your input buffers to store a result. You
should not reuse buffers that you donate to a computation, JAX will raise
an error if you try to.
global_arg_shapes: Optional, must be set when using pmap(sharded_jit) and
the partitioned values span multiple processes. The global cross-process
per-replica shape of each argument, i.e. does not include the leading
pmapped dimension. Can be None for replicated arguments. This API is
likely to change in the future.
Returns:
A parallelized version of ``fun`` with arguments that correspond to those of
``fun`` but with extra array axes at positions indicated by ``in_axes`` and
with output that has an additional leading array axis (with the same size).
For example, assuming 8 XLA devices are available, :py:func:`pmap` can be used
as a map along a leading array axis:
>>> import jax.numpy as jnp
>>>
>>> out = pmap(lambda x: x ** 2)(jnp.arange(8)) # doctest: +SKIP
>>> print(out) # doctest: +SKIP
[0, 1, 4, 9, 16, 25, 36, 49]
When the leading dimension is smaller than the number of available devices JAX
will simply run on a subset of devices:
>>> x = jnp.arange(3 * 2 * 2.).reshape((3, 2, 2))
>>> y = jnp.arange(3 * 2 * 2.).reshape((3, 2, 2)) ** 2
>>> out = pmap(jnp.dot)(x, y) # doctest: +SKIP
>>> print(out) # doctest: +SKIP
[[[ 4. 9.]
[ 12. 29.]]
[[ 244. 345.]
[ 348. 493.]]
[[ 1412. 1737.]
[ 1740. 2141.]]]
If your leading dimension is larger than the number of available devices you
will get an error:
>>> pmap(lambda x: x ** 2)(jnp.arange(9)) # doctest: +SKIP
ValueError: ... requires 9 replicas, but only 8 XLA devices are available
As with :py:func:`vmap`, using ``None`` in ``in_axes`` indicates that an
argument doesn't have an extra axis and should be broadcasted, rather than
mapped, across the replicas:
>>> x, y = jnp.arange(2.), 4.
>>> out = pmap(lambda x, y: (x + y, y * 2.), in_axes=(0, None))(x, y) # doctest: +SKIP
>>> print(out) # doctest: +SKIP
([4., 5.], [8., 8.])
Note that :py:func:`pmap` always returns values mapped over their leading axis,
equivalent to using ``out_axes=0`` in :py:func:`vmap`.
In addition to expressing pure maps, :py:func:`pmap` can also be used to express
parallel single-program multiple-data (SPMD) programs that communicate via
collective operations. For example:
>>> f = lambda x: x / jax.lax.psum(x, axis_name='i')
>>> out = pmap(f, axis_name='i')(jnp.arange(4.)) # doctest: +SKIP
>>> print(out) # doctest: +SKIP
[ 0. 0.16666667 0.33333334 0.5 ]
>>> print(out.sum()) # doctest: +SKIP
1.0
In this example, ``axis_name`` is a string, but it can be any Python object
with ``__hash__`` and ``__eq__`` defined.
The argument ``axis_name`` to :py:func:`pmap` names the mapped axis so that
collective operations, like :func:`jax.lax.psum`, can refer to it. Axis names
are important particularly in the case of nested :py:func:`pmap` functions,
where collective operations can operate over distinct axes:
>>> from functools import partial
>>> import jax
>>>
>>> @partial(pmap, axis_name='rows')
... @partial(pmap, axis_name='cols')
... def normalize(x):
... row_normed = x / jax.lax.psum(x, 'rows')
... col_normed = x / jax.lax.psum(x, 'cols')
... doubly_normed = x / jax.lax.psum(x, ('rows', 'cols'))
... return row_normed, col_normed, doubly_normed
>>>
>>> x = jnp.arange(8.).reshape((4, 2))
>>> row_normed, col_normed, doubly_normed = normalize(x) # doctest: +SKIP
>>> print(row_normed.sum(0)) # doctest: +SKIP
[ 1. 1.]
>>> print(col_normed.sum(1)) # doctest: +SKIP
[ 1. 1. 1. 1.]
>>> print(doubly_normed.sum((0, 1))) # doctest: +SKIP
1.0
On multi-process platforms, collective operations operate over all devices,
including those on other processes. For example, assuming the following code
runs on two processes with 4 XLA devices each:
>>> f = lambda x: x + jax.lax.psum(x, axis_name='i')
>>> data = jnp.arange(4) if jax.process_index() == 0 else jnp.arange(4, 8)
>>> out = pmap(f, axis_name='i')(data) # doctest: +SKIP
>>> print(out) # doctest: +SKIP
[28 29 30 31] # on process 0
[32 33 34 35] # on process 1
Each process passes in a different length-4 array, corresponding to its 4
local devices, and the psum operates over all 8 values. Conceptually, the two
length-4 arrays can be thought of as a sharded length-8 array (in this example
equivalent to jnp.arange(8)) that is mapped over, with the length-8 mapped
axis given name 'i'. The pmap call on each process then returns the
corresponding length-4 output shard.
The ``devices`` argument can be used to specify exactly which devices are used
to run the parallel computation. For example, again assuming a single process
with 8 devices, the following code defines two parallel computations, one
which runs on the first six devices and one on the remaining two:
>>> from functools import partial
>>> @partial(pmap, axis_name='i', devices=jax.devices()[:6])
... def f1(x):
... return x / jax.lax.psum(x, axis_name='i')
>>>
>>> @partial(pmap, axis_name='i', devices=jax.devices()[-2:])
... def f2(x):
... return jax.lax.psum(x ** 2, axis_name='i')
>>>
>>> print(f1(jnp.arange(6.))) # doctest: +SKIP
[0. 0.06666667 0.13333333 0.2 0.26666667 0.33333333]
>>> print(f2(jnp.array([2., 3.]))) # doctest: +SKIP
[ 13. 13.]
"""
if FLAGS.experimental_cpp_pmap:
func = _cpp_pmap
else:
func = _python_pmap
return func(
fun,
axis_name,
in_axes=in_axes,
out_axes=out_axes,
static_broadcasted_argnums=static_broadcasted_argnums,
devices=devices,
backend=backend,
axis_size=axis_size,
donate_argnums=donate_argnums,
global_arg_shapes=global_arg_shapes)
class PmapCallInfo(NamedTuple):
flat_fun: lu.WrappedFun
in_tree: PyTreeDef
out_tree: PyTreeDef
flat_args: Sequence[Any]
donated_invars: Sequence[bool]
in_axes_flat: Sequence[Optional[int]]
local_axis_size: int
global_arg_shapes_flat: Sequence[Optional[Tuple[int, ...]]]
out_axes_thunk: HashableFunction
def _prepare_pmap(fun, in_axes, out_axes, static_broadcasted_tuple,
donate_tuple, global_arg_shapes, args, kwargs):
f = lu.wrap_init(fun)
if static_broadcasted_tuple:
if max(static_broadcasted_tuple) >= len(args):
raise ValueError(
f"pmapped function has static_broadcasted_argnums={static_broadcasted_tuple}"
f" but was called with only {len(args)} positional "
f"argument{'s' if len(args) > 1 else ''}. "
"All static broadcasted arguments must be passed positionally.")
dyn_argnums = [i for i in range(len(args))
if i not in static_broadcasted_tuple]
f, dyn_args = argnums_partial(f, dyn_argnums, args)
if isinstance(in_axes, tuple):
dyn_in_axes = tuple(in_axes[i] for i in dyn_argnums)
else:
dyn_in_axes = in_axes
dyn_global_arg_shapes = global_arg_shapes
if isinstance(global_arg_shapes, tuple):
dyn_global_arg_shapes = tuple(global_arg_shapes[i] for i in dyn_argnums)
else:
dyn_global_arg_shapes = global_arg_shapes
else:
dyn_args, dyn_in_axes = args, in_axes
dyn_global_arg_shapes = global_arg_shapes
args, in_tree = tree_flatten((dyn_args, kwargs))
if donate_tuple:
donated_invars = donation_vector(donate_tuple, dyn_args, kwargs)
else:
donated_invars = (False,) * len(args)
in_axes_flat = tuple(flatten_axes("pmap in_axes", in_tree, (dyn_in_axes, 0)))
global_arg_shapes_flat = tuple(flatten_axes(
"pmap global_arg_shapes", in_tree, (dyn_global_arg_shapes, None),
kws=True))
local_axis_size = _mapped_axis_size(
in_tree, args, in_axes_flat, "pmap", kws=True)
for arg in args:
_check_arg(arg)
flat_fun, out_tree = flatten_fun(f, in_tree)
if any(out_axis is None for out_axis in tree_flatten(out_axes)):
raise NotImplementedError("None out_axes in pmap are not supported yet")
# NOTE: We don't put out_tree() in the closure, because it's (1) non-hashable,
# (2) depends deterministically on flat_fun (at least that's the assumption
# that we make).
if out_axes == 0:
# TODO(apaszke,mattjj): flatten_axes assumes that the output pytree is
# functorial (i.e. it can hold leaves of any type), but some user code
# breaks this assumption. This is a stop-gap solution to keep the old
# out_axes == 0 path working as we look for a better solution.
out_axes_thunk = HashableFunction(
lambda: (0,) * out_tree().num_leaves,
closure=out_axes)
else:
# out_axes_thunk closes over the out_axes, they are flattened here to make
# them hashable.
out_axes_leaves, out_axes_treedef = tree_flatten(out_axes)
out_axes_thunk = HashableFunction(
lambda: tuple(flatten_axes("pmap out_axes", out_tree(),
tree_unflatten(out_axes_treedef,
list(out_axes_leaves)))),
closure=(tuple(out_axes_leaves), out_axes_treedef))
return PmapCallInfo(flat_fun=flat_fun,
in_tree=in_tree,
out_tree=out_tree,
flat_args=args,
donated_invars=donated_invars,
in_axes_flat=in_axes_flat,
local_axis_size=local_axis_size,
global_arg_shapes_flat=global_arg_shapes_flat,
out_axes_thunk=out_axes_thunk)
def _get_f_mapped(
*,
fun: F,
axis_name: Optional[AxisName],
in_axes=0,
out_axes=0,
static_broadcasted_tuple: Tuple[int],
devices: Optional[Sequence[xc.Device]],
backend: Optional[str],
axis_size: Optional[int],
donate_tuple: Tuple[int],
global_arg_shapes: Optional[Tuple[Tuple[int, ...], ...]],
):
def f_pmapped(*args, **kwargs):
p = _prepare_pmap(
fun, in_axes, out_axes, static_broadcasted_tuple, donate_tuple,
global_arg_shapes, args, kwargs)
out = pxla.xla_pmap(
p.flat_fun, *p.flat_args, backend=backend, axis_name=axis_name,
axis_size=p.local_axis_size, global_axis_size=axis_size,
devices=None if devices is None else tuple(devices),
in_axes=p.in_axes_flat, out_axes_thunk=p.out_axes_thunk,
name=p.flat_fun.__name__, donated_invars=p.donated_invars,
global_arg_shapes=p.global_arg_shapes_flat)
return p.out_tree, out
return f_pmapped
def _shared_code_pmap(fun, axis_name, static_broadcasted_argnums,
donate_argnums, in_axes, out_axes):
# axis_size is an optional integer representing the global axis size. The
# aggregate size (across all processes) size of the mapped axis must match the
# given value.
_check_callable(fun)
axis_name = core._TempAxisName(fun) if axis_name is None else axis_name
static_broadcasted_tuple = _ensure_index_tuple(static_broadcasted_argnums)
donate_tuple = rebase_donate_argnums(
_ensure_index_tuple(donate_argnums), static_broadcasted_tuple)
if not all(type(l) is int for l in tree_leaves(in_axes)):
raise TypeError("pmap in_axes must be an int, None, or (nested) container "
f"with those types as leaves, but got {in_axes}.")
if not all(type(l) is int for l in tree_leaves(out_axes)):
raise TypeError("pmap out_axes must be an int, None, or (nested) container "
f"with those types as leaves, but got {out_axes}.")
return axis_name, static_broadcasted_tuple, donate_tuple
def _python_pmap(
fun: F,
axis_name: Optional[AxisName] = None,
*,
in_axes=0,
out_axes=0,
static_broadcasted_argnums: Union[int, Iterable[int]] = (),
devices: Optional[Sequence[xc.Device]] = None,
backend: Optional[str] = None,
axis_size: Optional[int] = None,
donate_argnums: Union[int, Iterable[int]] = (),
global_arg_shapes: Optional[Tuple[Tuple[int, ...], ...]] = None,
) -> F:
"""The Python only implementation."""
axis_name, static_broadcasted_tuple, donate_tuple = _shared_code_pmap(
fun, axis_name, static_broadcasted_argnums, donate_argnums, in_axes,
out_axes)
@wraps(fun)
@api_boundary
def f_pmapped(*args, **kwargs):
f_pmapped_ = _get_f_mapped(
fun=fun,
axis_name=axis_name,
in_axes=in_axes,
out_axes=out_axes,
static_broadcasted_tuple=static_broadcasted_tuple,
devices=devices,
backend=backend,
axis_size=axis_size,
global_arg_shapes=global_arg_shapes,
donate_tuple=donate_tuple)
out_tree, out_flat = f_pmapped_(*args, **kwargs)
return tree_unflatten(out_tree(), out_flat)
f_pmapped.lower = _pmap_lower(
fun, axis_name, in_axes, out_axes, static_broadcasted_tuple, devices,
backend, axis_size, global_arg_shapes, donate_tuple)
return f_pmapped
class _PmapFastpathData(NamedTuple):
version: int # For forward and backward compatibility
xla_executable: xla.XlaExecutable
in_handler: Any
out_handler: Any
out_pytree_def: Any
# Data needed to handle the inputs.
input_sharding_specs: Sequence[pxla.ShardingSpec]
input_devices: Sequence[xc.Device]
input_indices: Sequence[pxla.Index]
# Data needed to build the ShardedDeviceArray from C++.
out_sharding_specs: Sequence[pxla.ShardingSpec]
out_indices: Sequence[pxla.Index]
out_avals: Sequence[Any]
def _cpp_pmap(
fun: F,
axis_name: Optional[AxisName] = None,
*,
in_axes=0,
out_axes=0,
static_broadcasted_argnums: Union[int, Iterable[int]] = (),
devices: Optional[Sequence[xc.Device]] = None,
backend: Optional[str] = None,
axis_size: Optional[int] = None,
donate_argnums: Union[int, Iterable[int]] = (),
global_arg_shapes: Optional[Tuple[Tuple[int, ...], ...]] = None,
) -> F:
axis_name, static_broadcasted_tuple, donate_tuple = _shared_code_pmap(
fun, axis_name, static_broadcasted_argnums, donate_argnums, in_axes,
out_axes)
del static_broadcasted_argnums, donate_argnums
@api_boundary
def cache_miss(*args, **kwargs):
f_pmapped_ = _get_f_mapped(
fun=fun,
axis_name=axis_name,
in_axes=in_axes,
out_axes=out_axes,
static_broadcasted_tuple=static_broadcasted_tuple,
devices=devices,
backend=backend,
axis_size=axis_size,
global_arg_shapes=global_arg_shapes,
donate_tuple=donate_tuple)
out_tree, out_flat = f_pmapped_(*args, **kwargs)
out_pytree_def = out_tree()
out = tree_unflatten(out_pytree_def, out_flat)
### Decide whether we can support the C++ fast path
execute: Optional[functools.partial] = None
execute = pxla.parallel_callable.most_recent_entry()
use_fastpath = (
execute is not None and
# We don't support JAX extension backends. In particular, some
# extentions do not return a partial with a `func` attribute.
getattr(execute[0], "func", None) is pxla.execute_replicated and
# No tracers in the outputs. Checking for ShardedDeviceArray should be
# sufficient, but we use the more general `DeviceArray`.
all(isinstance(x, device_array.DeviceArray) for x in out_flat))
### If we can use the fastpath, we return required info to the caller.
if use_fastpath:
xla_executable, backend_, in_handler, out_handler = execute[0].args
fastpath_data = _PmapFastpathData(
version=1,
xla_executable=xla_executable,
in_handler=in_handler,
out_handler=out_handler,
out_pytree_def=out_pytree_def,
input_sharding_specs=in_handler.sharding_specs,
input_devices=in_handler.local_devices,
input_indices=in_handler.input_indices,
out_sharding_specs=out_handler.out_specs,
out_indices=out_handler.out_indices,
out_avals=out_handler.unmapped_local_out_avals,
)
else:
fastpath_data = None
return out, fastpath_data
cpp_mapped_f = pmap_lib.pmap(fun, cache_miss,
static_broadcasted_tuple, pxla._shard_arg)
f_pmapped = wraps(fun)(cpp_mapped_f)
f_pmapped.lower = _pmap_lower(
fun, axis_name, in_axes, out_axes, static_broadcasted_tuple, devices,
backend, axis_size, global_arg_shapes, donate_tuple)
return f_pmapped
def _pmap_lower(fun, axis_name, in_axes, out_axes, static_broadcasted_tuple,
devices, backend, axis_size, global_arg_shapes, donate_tuple):
"""Make a ``lower`` method for pmapped functions."""
# If the function we returned from ``pmap`` were a class instance,
# this might naturally be a method, with ``fun`` as a ``self`` and
# all the other arguments stored as attributes.
@api_boundary
def lower(*args, **kwargs) -> Lowered:
"""Lower a parallel-mapped form of this function for the given arguments.
A parallel-mapped and lowered function is staged out of Python and
translated to a compiler's input language, possibly in a
backend-dependent manner. It is ready for compilation but is not yet
compiled. It represents a function intended for SPMD execution on
multiple devices.
Returns:
A ``Lowered`` instance representing the post-map lowering.
"""
p = _prepare_pmap(
fun, in_axes, out_axes, static_broadcasted_tuple, donate_tuple,
global_arg_shapes, args, kwargs)
abstract_args = map(xla.abstractify, p.flat_args)
computation = pxla.lower_parallel_callable(
p.flat_fun, backend, axis_name,
axis_size=p.local_axis_size, global_axis_size=axis_size,
devices=None if devices is None else tuple(devices),
name=p.flat_fun.__name__,
in_axes=p.in_axes_flat,
out_axes_thunk=p.out_axes_thunk,
donated_invars=p.donated_invars,
global_arg_shapes=p.global_arg_shapes_flat,
avals=abstract_args)
return Lowered(computation, p.in_tree, p.out_tree(), donate_tuple)
return lower
def mask(fun: Callable, in_shapes, out_shape=None) -> Callable:
_check_callable(fun)
unique_ids = masking.UniqueIds()
in_specs, in_shapes_tree = tree_flatten(in_shapes)
in_specs = map(masking.parse_spec, in_specs)
in_specs = map(partial(masking.remap_ids, unique_ids), in_specs)
if out_shape is not None:
out_specs, out_spec_tree = tree_flatten(out_shape)
out_specs = map(masking.parse_spec, out_specs)
out_specs = map(partial(masking.remap_ids, unique_ids), out_specs)
def wrapped_fun(args, logical_env):
args_flat, in_tree = tree_flatten(args)
if in_tree != in_shapes_tree:
raise TypeError(f"Tree mismatch: Input {in_tree} and shape spec {in_shapes_tree}.")
logical_env = {unique_ids[name] : val for name, val in logical_env.items()}
in_shapes = map(masking.finalize_spec, in_specs, map(np.shape, args_flat))
padded_env = masking.bind_shapes(in_shapes, [x.shape for x in args_flat])
f = lu.wrap_init(fun)
flat_fun, out_tree_thunk = flatten_fun_nokwargs(f, in_tree)
outs, out_shapes = masking.mask_fun(
flat_fun, logical_env, padded_env, args_flat, in_shapes)
out_tree = out_tree_thunk()
if out_shape is None:
def logical_shape(poly_shape, padded_val):
shape = masking.eval_poly_shape(poly_shape, logical_env)
return ShapeDtypeStruct(shape, core.get_aval(padded_val).dtype)
out_logicals = map(logical_shape, out_shapes, outs)
return tree_unflatten(out_tree, outs), tree_unflatten(out_tree, out_logicals)
else:
masking.check_shapes(out_specs, out_spec_tree, list(out_shapes), out_tree)
def padded_spec(shape_spec):
return tuple(dim if dim is masking._monomorphic_dim else
masking.eval_poly(dim, padded_env) for dim in shape_spec)
masking.check_shapes(map(padded_spec, out_specs), out_spec_tree,
map(np.shape, outs), out_tree, "Padded output")
return tree_unflatten(out_tree, outs)
return wrapped_fun
@curry
def shapecheck(in_shapes, out_shape, fun: Callable):
_check_callable(fun)
in_shapes, in_tree = tree_flatten(in_shapes)
in_shapes = map(masking.parse_spec, in_shapes)
out_specs, out_spec_tree = tree_flatten(out_shape)
out_specs = map(masking.parse_spec, out_specs)
flat_fun, out_tree_thunk = flatten_fun_nokwargs(lu.wrap_init(fun), in_tree)
avals = map(partial(ShapedArray, dtype=np.float32), in_shapes)
out_shapes = [o.shape for o in pe.abstract_eval_fun(flat_fun.call_wrapped, *avals)]
masking.check_shapes(map(tuple, out_specs), out_spec_tree,
map(tuple, out_shapes), out_tree_thunk())
return fun
def jvp(
fun: Callable, primals, tangents, has_aux: bool = False
) -> Tuple[Any, ...]:
"""Computes a (forward-mode) Jacobian-vector product of ``fun``.
Args:
fun: Function to be differentiated. Its arguments should be arrays, scalars,
or standard Python containers of arrays or scalars. It should return an
array, scalar, or standard Python container of arrays or scalars.
primals: The primal values at which the Jacobian of ``fun`` should be
evaluated. Should be either a tuple or a list of arguments,
and its length should be equal to the number of positional parameters of
``fun``.
tangents: The tangent vector for which the Jacobian-vector product should be
evaluated. Should be either a tuple or a list of tangents, with the same
tree structure and array shapes as ``primals``.
has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where the
first element is considered the output of the mathematical function to be
differentiated and the second element is auxiliary data. Default False.
Returns:
If ``has_aux`` is ``False``, returns a ``(primals_out, tangents_out)`` pair,
where ``primals_out`` is ``fun(*primals)``,
and ``tangents_out`` is the Jacobian-vector product of
``function`` evaluated at ``primals`` with ``tangents``. The
``tangents_out`` value has the same Python tree structure and shapes as
``primals_out``. If ``has_aux`` is ``True``, returns a
``(primals_out, tangents_out, aux)`` tuple where ``aux``
is the auxiliary data returned by ``fun``.
For example:
>>> import jax
>>>
>>> y, v = jax.jvp(jax.numpy.sin, (0.1,), (0.2,))
>>> print(y)
0.09983342
>>> print(v)
0.19900084
"""
_check_callable(fun)
return _jvp(lu.wrap_init(fun), primals, tangents, has_aux=has_aux)
def _jvp(fun: lu.WrappedFun, primals, tangents, has_aux=False):
"""Variant of jvp() that takes an lu.WrappedFun."""
if (not isinstance(primals, (tuple, list)) or
not isinstance(tangents, (tuple, list))):
raise TypeError("primal and tangent arguments to jax.jvp must be tuples or lists; "
f"found {type(primals).__name__} and {type(tangents).__name__}.")
ps_flat, tree_def = tree_flatten(primals)
ts_flat, tree_def_2 = tree_flatten(tangents)
if tree_def != tree_def_2:
raise TypeError("primal and tangent arguments to jax.jvp must have the same tree "
f"structure; primals have tree structure {tree_def} whereas tangents have "
f"tree structure {tree_def_2}.")
for p, t in safe_zip(ps_flat, ts_flat):
if core.primal_dtype_to_tangent_dtype(_dtype(p)) != _dtype(t):
raise TypeError("primal and tangent arguments to jax.jvp do not match; "
"dtypes must be equal, or in case of int/bool primal dtype "
"the tangent dtype must be float0."
f"Got primal dtype {_dtype(p)} and so expected tangent dtype "
f"{core.primal_dtype_to_tangent_dtype(_dtype(p))}, but got "
f"tangent dtype {_dtype(t)} instead.")
if np.shape(p) != np.shape(t):
raise ValueError("jvp called with different primal and tangent shapes;"
f"Got primal shape {np.shape(p)} and tangent shape as {np.shape(t)}")
if not has_aux:
flat_fun, out_tree = flatten_fun_nokwargs(fun, tree_def)
out_primals, out_tangents = ad.jvp(flat_fun).call_wrapped(ps_flat, ts_flat)
out_tree = out_tree()
return (tree_unflatten(out_tree, out_primals),
tree_unflatten(out_tree, out_tangents))
else:
flat_fun, out_aux_trees = flatten_fun_nokwargs2(fun, tree_def)
jvp_fun, aux = ad.jvp(flat_fun, has_aux=True)
out_primals, out_tangents = jvp_fun.call_wrapped(ps_flat, ts_flat)
out_tree, aux_tree = out_aux_trees()
return (tree_unflatten(out_tree, out_primals),
tree_unflatten(out_tree, out_tangents),
tree_unflatten(aux_tree, aux()))
def linearize(fun: Callable, *primals) -> Tuple[Any, Callable]:
"""Produces a linear approximation to ``fun`` using :py:func:`jvp` and partial eval.
Args:
fun: Function to be differentiated. Its arguments should be arrays, scalars,
or standard Python containers of arrays or scalars. It should return an
array, scalar, or standard python container of arrays or scalars.
primals: The primal values at which the Jacobian of ``fun`` should be
evaluated. Should be a tuple of arrays, scalar, or standard Python
container thereof. The length of the tuple is equal to the number of
positional parameters of ``fun``.
Returns:
A pair where the first element is the value of ``f(*primals)`` and the
second element is a function that evaluates the (forward-mode)
Jacobian-vector product of ``fun`` evaluated at ``primals`` without re-doing
the linearization work.
In terms of values computed, :py:func:`linearize` behaves much like a curried
:py:func:`jvp`, where these two code blocks compute the same values::
y, out_tangent = jax.jvp(f, (x,), (in_tangent,))
y, f_jvp = jax.linearize(f, x)
out_tangent = f_jvp(in_tangent)
However, the difference is that :py:func:`linearize` uses partial evaluation
so that the function ``f`` is not re-linearized on calls to ``f_jvp``. In
general that means the memory usage scales with the size of the computation,
much like in reverse-mode. (Indeed, :py:func:`linearize` has a similar
signature to :py:func:`vjp`!)
This function is mainly useful if you want to apply ``f_jvp`` multiple times,
i.e. to evaluate a pushforward for many different input tangent vectors at the
same linearization point. Moreover if all the input tangent vectors are known
at once, it can be more efficient to vectorize using :py:func:`vmap`, as in::
pushfwd = partial(jvp, f, (x,))
y, out_tangents = vmap(pushfwd, out_axes=(None, 0))((in_tangents,))
By using :py:func:`vmap` and :py:func:`jvp` together like this we avoid the stored-linearization
memory cost that scales with the depth of the computation, which is incurred
by both :py:func:`linearize` and :py:func:`vjp`.
Here's a more complete example of using :py:func:`linearize`:
>>> import jax
>>> import jax.numpy as jnp
>>>
>>> def f(x): return 3. * jnp.sin(x) + jnp.cos(x / 2.)
...
>>> jax.jvp(f, (2.,), (3.,))
(DeviceArray(3.26819, dtype=float32, weak_type=True), DeviceArray(-5.00753, dtype=float32, weak_type=True))
>>> y, f_jvp = jax.linearize(f, 2.)
>>> print(y)
3.2681944
>>> print(f_jvp(3.))
-5.007528
>>> print(f_jvp(4.))
-6.676704
"""
_check_callable(fun)
f = lu.wrap_init(fun)
primals_flat, in_tree = tree_flatten((primals, {}))
jaxtree_fun, out_tree = flatten_fun(f, in_tree)
out_primals, out_pvals, jaxpr, consts = ad.linearize(jaxtree_fun, *primals_flat)
out_tree = out_tree()
out_primal_py = tree_unflatten(out_tree, out_primals)
primal_avals = list(map(core.get_aval, primals_flat))
# Ensure that lifted_jvp is a PyTree
lifted_jvp = Partial(partial(_lift_linearized, jaxpr, primal_avals,
(in_tree, out_tree), out_pvals), consts)
return out_primal_py, lifted_jvp
def _lift_linearized(jaxpr, primal_avals, io_tree, out_pvals, consts, *py_args):
def fun(*tangents):
tangent_avals = list(map(core.get_aval, tangents))
for primal_aval, tangent_aval in zip(primal_avals, tangent_avals):
if not core.typecompat(primal_aval.at_least_vspace(), tangent_aval):
raise ValueError("linearized function called on tangent values inconsistent with "
"the original primal values: "
f"got {tangent_aval} for primal aval {primal_aval}")
tangents_out = eval_jaxpr(jaxpr, consts, *tangents)
return tuple(map(lambda out_pv, tan_out: out_pv.merge_with_known(tan_out),
out_pvals, tangents_out))
return apply_flat_fun(fun, io_tree, *py_args)
def _vjp_pullback_wrapper(cotangent_dtypes, cotangent_shapes,
io_tree, fun, py_args):
in_tree_expected, out_tree = io_tree
args, in_tree = tree_flatten(py_args)
if in_tree != in_tree_expected:
raise TypeError(f"Tree structure of cotangent input {in_tree}, does not match structure of "
f"primal output {in_tree_expected}.")
for arg, ct_dtype, ct_shape in safe_zip(args, cotangent_dtypes, cotangent_shapes):
expected_tangent_dtype = core.primal_dtype_to_tangent_dtype(_dtype(arg))
if expected_tangent_dtype != ct_dtype:
raise TypeError(
f"Type of cotangent input to vjp pullback function ({ct_dtype}) is not "
f"the expected tangent type ({expected_tangent_dtype}) of corresponding primal output "
f"with dtype {_dtype(arg)}.")
if np.shape(arg) != ct_shape:
raise ValueError(
f"Shape of cotangent input to vjp pullback function {np.shape(arg)} "
"must be the same as the shape of corresponding primal input "
f"{ct_shape}.")
ans = fun(*args)
return tree_unflatten(out_tree, ans)
if sys.version_info >= (3, 8):
from typing import Literal
@overload # type: ignore
def vjp(fun: Callable[..., T],
*primals: Any,
has_aux: Literal[False] = False,
reduce_axes: Sequence[AxisName] = ()) -> Tuple[T, Callable]:
...
@overload
def vjp(fun: Callable[..., Tuple[T, U]], *primals: Any,
has_aux: Literal[True],
reduce_axes: Sequence[AxisName] = ()) -> Tuple[T, Callable, U]:
...
else:
@overload # type: ignore
def vjp(fun: Callable[..., T], *primals: Any) -> Tuple[T, Callable]:
...
@overload
def vjp(
fun: Callable[..., Any], *primals: Any,
has_aux: bool,
reduce_axes: Sequence[AxisName] = ()
) -> Union[Tuple[Any, Callable], Tuple[Any, Callable, Any]]:
...
def vjp( # type: ignore
fun: Callable, *primals, has_aux: bool = False, reduce_axes=()
) -> Union[Tuple[Any, Callable], Tuple[Any, Callable, Any]]:
"""Compute a (reverse-mode) vector-Jacobian product of ``fun``.
:py:func:`grad` is implemented as a special case of :py:func:`vjp`.
Args:
fun: Function to be differentiated. Its arguments should be arrays, scalars,
or standard Python containers of arrays or scalars. It should return an
array, scalar, or standard Python container of arrays or scalars.
primals: A sequence of primal values at which the Jacobian of ``fun``
should be evaluated. The length of ``primals`` should be equal to the
number of positional parameters to ``fun``. Each primal value should be a
tuple of arrays, scalar, or standard Python containers thereof.
has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where the
first element is considered the output of the mathematical function to be
differentiated and the second element is auxiliary data. Default False.
reduce_axes: Optional, tuple of axis names. If an axis is listed here, and
``fun`` implicitly broadcasts a value over that axis, the backward pass
will perform a ``psum`` of the corresponding gradient. Otherwise, the
VJP will be per-example over named axes. For example, if ``'batch'``
is a named batch axis, ``vjp(f, *args, reduce_axes=('batch',))`` will
create a VJP function that sums over the batch while ``vjp(f, *args)``
will create a per-example VJP.
Returns:
If ``has_aux`` is ``False``, returns a ``(primals_out, vjpfun)`` pair, where
``primals_out`` is ``fun(*primals)``.
``vjpfun`` is a function from a cotangent vector with the same shape as
``primals_out`` to a tuple of cotangent vectors with the same shape as
``primals``, representing the vector-Jacobian product of ``fun`` evaluated at
``primals``. If ``has_aux`` is ``True``, returns a
``(primals_out, vjpfun, aux)`` tuple where ``aux`` is the auxiliary data
returned by ``fun``.
>>> import jax
>>>
>>> def f(x, y):
... return jax.numpy.sin(x), jax.numpy.cos(y)
...
>>> primals, f_vjp = jax.vjp(f, 0.5, 1.0)
>>> xbar, ybar = f_vjp((-0.7, 0.3))
>>> print(xbar)
-0.61430776
>>> print(ybar)
-0.2524413
"""
_check_callable(fun)
reduce_axes = _ensure_str_tuple(reduce_axes)
return _vjp(
lu.wrap_init(fun), *primals, has_aux=has_aux, reduce_axes=reduce_axes)
def _vjp(fun: lu.WrappedFun, *primals, has_aux=False, reduce_axes=()):
"""Variant of vjp() that takes an lu.WrappedFun."""
primals_flat, in_tree = tree_flatten(primals)
for arg in primals_flat: _check_arg(arg)
if not has_aux:
flat_fun, out_tree = flatten_fun_nokwargs(fun, in_tree)
out_primal, out_vjp = ad.vjp(
flat_fun, primals_flat, reduce_axes=reduce_axes)
out_tree = out_tree()
else:
flat_fun, out_aux_trees = flatten_fun_nokwargs2(fun, in_tree)
out_primal, out_vjp, aux = ad.vjp(
flat_fun, primals_flat, has_aux=True, reduce_axes=reduce_axes)
out_tree, aux_tree = out_aux_trees()
out_primal_py = tree_unflatten(out_tree, out_primal)
ct_dtypes = [core.primal_dtype_to_tangent_dtype(_dtype(x)) for x in out_primal]
ct_shapes = [np.shape(x) for x in out_primal]
# Ensure that vjp_py is a PyTree so that we can pass it from the forward to the
# backward pass in a custom VJP.
vjp_py = Partial(partial(_vjp_pullback_wrapper,
ct_dtypes, ct_shapes,
(out_tree, in_tree)),
out_vjp)
if not has_aux:
return out_primal_py, vjp_py
else:
return out_primal_py, vjp_py, tree_unflatten(aux_tree, aux)
def linear_transpose(fun: Callable, *primals, reduce_axes=()) -> Callable:
"""Transpose a function that is promised to be linear.
For linear functions, this transformation is equivalent to ``vjp``, but
avoids the overhead of computing the forward pass.
The outputs of the transposed function will always have the exact same dtypes
as ``primals``, even if some values are truncated (e.g., from complex to
float, or from float64 to float32). To avoid truncation, use dtypes in
``primals`` that match the full range of desired outputs from the transposed
function. Integer dtypes are not supported.
Args:
fun: the linear function to be transposed.
*primals: a positional argument tuple of arrays, scalars, or (nested)
standard Python containers (tuples, lists, dicts, namedtuples, i.e.,
pytrees) of those types used for evaluating the shape/dtype of
``fun(*primals)``. These arguments may be real scalars/ndarrays, but that
is not required: only the ``shape`` and ``dtype`` attributes are accessed.
See below for an example. (Note that the duck-typed objects cannot be
namedtuples because those are treated as standard Python containers.)
reduce_axes: Optional, tuple of axis names. If an axis is listed here, and
``fun`` implicitly broadcasts a value over that axis, the backward pass
will perform a ``psum`` of the corresponding cotangent. Otherwise, the
transposed function will be per-example over named axes. For example, if
``'batch'`` is a named batch axis, ``linear_transpose(f, *args,
reduce_axes=('batch',))`` will create a transpose function that sums over
the batch while ``linear_transpose(f, args)`` will create a per-example
transpose.
Returns:
A callable that calculates the transpose of ``fun``. Valid input into this
function must have the same shape/dtypes/structure as the result of
``fun(*primals)``. Output will be a tuple, with the same
shape/dtypes/structure as ``primals``.
>>> import jax
>>> import types
>>>
>>> f = lambda x, y: 0.5 * x - 0.5 * y
>>> scalar = types.SimpleNamespace(shape=(), dtype=np.dtype(np.float32))
>>> f_transpose = jax.linear_transpose(f, scalar, scalar)
>>> f_transpose(1.0)
(DeviceArray(0.5, dtype=float32), DeviceArray(-0.5, dtype=float32))
"""
reduce_axes = _ensure_str_tuple(reduce_axes)
primals_flat, in_tree = tree_flatten(primals)
flat_fun, out_tree = flatten_fun_nokwargs(lu.wrap_init(fun), in_tree)
in_avals = map(shaped_abstractify, primals_flat)
in_dtypes = map(dtypes.dtype, in_avals)
in_pvals = map(pe.PartialVal.unknown, in_avals)
jaxpr, out_pvals, consts = pe.trace_to_jaxpr(flat_fun, in_pvals,
instantiate=True)
out_avals, _ = unzip2(out_pvals)
out_dtypes = map(dtypes.dtype, out_avals)
if not (all(dtypes.issubdtype(d, np.inexact) for d in in_dtypes + out_dtypes)
or all(dtypes.issubdtype(d, np.integer)
for d in in_dtypes + out_dtypes)):
raise TypeError("linear_transpose only supports [float or complex] -> "
"[float or complex], and integer -> integer functions, "
f"but got {in_dtypes} -> {out_dtypes}.")
def transposed_fun(consts, out_cotangent):
out_cotangents, out_tree2 = tree_flatten(out_cotangent)
if out_tree() != out_tree2:
raise TypeError("cotangent tree does not match function output, "
f"expected {out_tree()} but got {out_tree2}")
if not all(map(core.typecheck, out_avals, out_cotangents)):
raise TypeError("cotangent type does not match function output, "
f"expected {out_avals} but got {out_cotangents}")
dummies = [ad.UndefinedPrimal(a) for a in in_avals]
in_cotangents = map(
ad.instantiate_zeros,
ad.backward_pass(jaxpr, reduce_axes, consts, dummies, out_cotangents))
return tree_unflatten(in_tree, in_cotangents)
# Ensure that transposed_fun is a PyTree
return Partial(transposed_fun, consts)
def make_jaxpr(fun: Callable,
static_argnums: Union[int, Iterable[int]] = (),
axis_env: Optional[Sequence[Tuple[AxisName, int]]] = None,
return_shape: bool = False,
) -> Callable[..., core.ClosedJaxpr]:
"""Creates a function that produces its jaxpr given example args.
Args:
fun: The function whose ``jaxpr`` is to be computed. Its positional
arguments and return value should be arrays, scalars, or standard Python
containers (tuple/list/dict) thereof.
static_argnums: See the :py:func:`jax.jit` docstring.
axis_env: Optional, a sequence of pairs where the first element is an axis
name and the second element is a positive integer representing the size of
the mapped axis with that name. This parameter is useful when lowering
functions that involve parallel communication collectives, and it
specifies the axis name/size environment that would be set up by
applications of :py:func:`jax.pmap`.
return_shape: Optional boolean, defaults to ``False``. If ``True``, the
wrapped function returns a pair where the first element is the XLA
computation and the second element is a pytree with the same structure as
the output of ``fun`` and where the leaves are objects with ``shape``,
``dtype``, and ``named_shape`` attributes representing the corresponding
types of the output leaves.
Returns:
A wrapped version of ``fun`` that when applied to example arguments returns
a ``ClosedJaxpr`` representation of ``fun`` on those arguments. If the
argument ``return_shape`` is ``True``, then the returned function instead
returns a pair where the first element is the ``ClosedJaxpr``
representation of ``fun`` and the second element is a pytree representing
the structure, shape, dtypes, and named shapes of the output of ``fun``.
A ``jaxpr`` is JAX's intermediate representation for program traces. The
``jaxpr`` language is based on the simply-typed first-order lambda calculus
with let-bindings. :py:func:`make_jaxpr` adapts a function to return its
``jaxpr``, which we can inspect to understand what JAX is doing internally.
The ``jaxpr`` returned is a trace of ``fun`` abstracted to
:py:class:`ShapedArray` level. Other levels of abstraction exist internally.
We do not describe the semantics of the ``jaxpr`` language in detail here, but
instead give a few examples.
>>> import jax
>>>
>>> def f(x): return jax.numpy.sin(jax.numpy.cos(x))
>>> print(f(3.0))
-0.83602
>>> jax.make_jaxpr(f)(3.0)
{ lambda ; a:f32[]. let b:f32[] = cos a; c:f32[] = sin b in (c,) }
>>> jax.make_jaxpr(jax.grad(f))(3.0)
{ lambda ; a:f32[]. let
b:f32[] = cos a
c:f32[] = sin a
_:f32[] = sin b
d:f32[] = cos b
e:f32[] = mul 1.0 d
f:f32[] = neg e
g:f32[] = mul f c
in (g,) }
"""
_check_callable(fun)
static_argnums = _ensure_index_tuple(static_argnums)
@wraps(fun)
@api_boundary
def jaxpr_maker(*args, **kwargs):
wrapped = lu.wrap_init(fun)
if static_argnums:
dyn_argnums = [i for i in range(len(args)) if i not in static_argnums]
wrapped, args = argnums_partial(wrapped, dyn_argnums, args)
jax_args, in_tree = tree_flatten((args, kwargs))
jaxtree_fun, out_tree = flatten_fun(wrapped, in_tree)
in_avals = map(shaped_abstractify, jax_args)
with ExitStack() as stack:
for axis_name, size in axis_env or []:
stack.enter_context(core.extend_axis_env(axis_name, size, None))
jaxpr, out_avals, consts = pe.trace_to_jaxpr_dynamic(jaxtree_fun, in_avals)
closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)
if return_shape:
out_shapes_flat = [
ShapeDtypeStruct(a.shape, a.dtype, a.named_shape) for a in out_avals]
return closed_jaxpr, tree_unflatten(out_tree(), out_shapes_flat)
return closed_jaxpr
jaxpr_maker.__name__ = f"make_jaxpr({jaxpr_maker.__name__})"
return jaxpr_maker
def device_put(x, device: Optional[xc.Device] = None):
"""Transfers ``x`` to ``device``.
Args:
x: An array, scalar, or (nested) standard Python container thereof.
device: The (optional) :py:class:`Device` to which ``x`` should be
transferred. If given, then the result is committed to the device.
If the ``device`` parameter is ``None``, then this operation behaves like the
identity function if the operand is on any device already, otherwise it
transfers the data to the default device, uncommitted.
For more details on data placement see the
:ref:`FAQ on data placement <faq-data-placement>`.
Returns:
A copy of ``x`` that resides on ``device``.
"""
return tree_map(lambda y: dispatch.device_put_p.bind(y, device=device), x)
def device_put_sharded(shards: Sequence[Any], devices: Sequence[xc.Device]):
"""Transfer array shards to specified devices and form ShardedDeviceArray(s).
Args:
shards: A sequence of arrays, scalars, or (nested) standard Python
containers thereof representing the shards to be stacked together to form
the output. The length of ``shards`` must equal the length of ``devices``.
devices: A sequence of :py:class:`Device` instances representing the devices
to which corresponding shards in ``shards`` will be transferred.
Returns:
A ShardedDeviceArray or (nested) Python container thereof representing the
elements of ``shards`` stacked together, with each shard backed by physical
device memory specified by the corresponding entry in ``devices``.
Examples:
Passing a list of arrays for ``shards`` results in a sharded array
containing a stacked version of the inputs:
>>> import jax
>>> devices = jax.local_devices()
>>> x = [jax.numpy.ones(5) for device in devices]
>>> y = jax.device_put_sharded(x, devices)
>>> np.allclose(y, jax.numpy.stack(x))
True
Passing a list of nested container objects with arrays at the leaves for
``shards`` corresponds to stacking the shards at each leaf. This requires
all entries in the list to have the same tree structure:
>>> x = [(i, jax.numpy.arange(i, i + 4)) for i in range(len(devices))]
>>> y = jax.device_put_sharded(x, devices)
>>> type(y)
<class 'tuple'>
>>> y0 = jax.device_put_sharded([a for a, b in x], devices)
>>> y1 = jax.device_put_sharded([b for a, b in x], devices)
>>> np.allclose(y[0], y0)
True
>>> np.allclose(y[1], y1)
True
See Also:
- device_put
- device_put_replicated
"""
# TODO(jakevdp): provide a default for devices that considers both local
# devices and pods
if not isinstance(shards, Sequence):
raise ValueError("device_put_sharded `shards` input must be a sequence; "
f"got {type(shards)}")
if not len(shards) == len(devices):
raise ValueError(f"len(shards) = {len(shards)} must equal "
f"len(devices) = {len(devices)}.")
def _device_put_sharded(*xs):
avals = [core.raise_to_shaped(core.get_aval(x)) for x in xs]
if not all(a1 == a2 for a1, a2 in zip(avals[:-1], avals[1:])):
a1, a2 = next((a1, a2) for a1, a2 in zip(avals[:-1], avals[1:])
if a1 != a2)
raise ValueError("the shards passed to device_put_sharded must have "
f"consistent shape and dtype, but got {a1} and {a2}.")
stacked_aval = avals[0].update(shape=(len(devices),) + avals[0].shape)
buffers = [buf for x, d in zip(xs, devices)
for buf in dispatch.device_put(x, d)]
return pxla.make_sharded_device_array(stacked_aval, None, buffers)
return tree_multimap(_device_put_sharded, *shards)
def device_put_replicated(x: Any, devices: Sequence[xc.Device]):
"""Transfer array(s) to each specified device and form ShardedDeviceArray(s).
Args:
x: an array, scalar, or (nested) standard Python container thereof
representing the array to be replicated to form the output.
devices: A sequence of :py:class:`Device` instances representing the devices
to which ``x`` will be transferred.
Returns:
A ShardedDeviceArray or (nested) Python container thereof representing the
value of ``x`` broadcasted along a new leading axis of size
``len(devices)``, with each slice along that new leading axis backed by
memory on the device specified by the corresponding entry in ``devices``.
Examples:
Passing an array:
>>> import jax
>>> devices = jax.local_devices()
>>> x = jax.numpy.array([1., 2., 3.])
>>> y = jax.device_put_replicated(x, devices)
>>> np.allclose(y, jax.numpy.stack([x for _ in devices]))
True
See Also:
- device_put
- device_put_sharded
"""
if not isinstance(devices, Sequence) or not devices:
raise ValueError("`devices` argument to `device_put_replicated must be "
"a non-empty sequence.")
def _device_put_replicated(x):
aval = core.unmapped_aval(len(devices), core.no_axis_name, 0,
core.raise_to_shaped(core.get_aval(x)))
assert (isinstance(aval, core.ShapedArray) and
len(xla.aval_to_xla_shapes(aval)) == 1)
buf, = dispatch.device_put(x, devices[0])
rest_bufs = [buf.copy_to_device(d) for d in devices[1:]]
return pxla.make_sharded_device_array(aval, None, [buf, *rest_bufs])
return tree_map(_device_put_replicated, x)
# TODO(mattjj): consider revising
def _device_get(x):
if isinstance(x, core.Tracer):
return x
try:
copy = x.copy
except AttributeError:
return x
else:
return copy()
def device_get(x: Any):
"""Transfer ``x`` to host.
If ``x`` is a pytree, then the individual buffers are copied in parallel.
Args:
x: An array, scalar, DeviceArray or (nested) standard Python container thereof
representing the array to be transferred to host.
Returns:
An array or (nested) Python container thereof representing the
value of ``x``.
Examples:
Passing a DeviceArray:
>>> import jax
>>> x = jax.numpy.array([1., 2., 3.])
>>> jax.device_get(x)
array([1., 2., 3.], dtype=float32)
Passing a scalar (has no effect):
>>> jax.device_get(1)
1
See Also:
- device_put
- device_put_sharded
- device_put_replicated
"""
for y in tree_leaves(x):
try:
y.copy_to_host_async()
except AttributeError:
pass
return tree_map(_device_get, x)
def _check_arg(arg):
if not (isinstance(arg, core.Tracer) or _valid_jaxtype(arg)):
raise TypeError(f"Argument '{arg}' of type {type(arg)} is not a valid JAX type.")
# TODO(mattjj,necula): this duplicates code in core.valid_jaxtype, but one
# internal user relies on it for duck-typing. must fix downstream user!
def _valid_jaxtype(arg):
try:
xla.abstractify(arg) # faster than core.get_aval
except TypeError:
return False
else:
return True
class ShapeDtypeStruct:
__slots__ = ["shape", "dtype", "named_shape"]
def __init__(self, shape, dtype, named_shape=None):
self.shape = shape
self.dtype = np.dtype(dtype)
self.named_shape = {} if named_shape is None else dict(named_shape)
size = property(lambda self: prod(self.shape))
ndim = property(lambda self: len(self.shape))
def __len__(self):
try:
return self.shape[0]
except IndexError as e:
raise TypeError("len() of unsized object") from e # same as numpy error
def __repr__(self):
ns = f", named_shape={self.named_shape}" if self.named_shape else ""
return f"{type(self).__name__}(shape={self.shape}, dtype={self.dtype.name}{ns})"
__str__ = __repr__
def __eq__(self, other):
if not isinstance(other, ShapeDtypeStruct):
return False
else:
return (other.shape, other.dtype, other.named_shape) == (
self.shape, self.dtype, self.named_shape)
def __hash__(self):
# TODO(frostig): avoid the conversion from dict by addressing
# https://github.com/google/jax/issues/8182
named = frozenset(self.named_shape.items())
return hash((self.shape, self.dtype, named))
def eval_shape(fun: Callable, *args, **kwargs):
"""Compute the shape/dtype of ``fun`` without any FLOPs.
This utility function is useful for performing shape inference. Its
input/output behavior is defined by::
def eval_shape(fun, *args, **kwargs):
out = fun(*args, **kwargs)
return jax.tree_util.tree_map(shape_dtype_struct, out)
def shape_dtype_struct(x):
return ShapeDtypeStruct(x.shape, x.dtype)
class ShapeDtypeStruct:
__slots__ = ["shape", "dtype"]
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
In particular, the output is a pytree of objects that have ``shape`` and
``dtype`` attributes, but nothing else about them is guaranteed by the API.
But instead of applying ``fun`` directly, which might be expensive, it uses
JAX's abstract interpretation machinery to evaluate the shapes without doing
any FLOPs.
Using :py:func:`eval_shape` can also catch shape errors, and will raise same
shape errors as evaluating ``fun(*args, **kwargs)``.
Args:
fun: The function whose output shape should be evaluated.
*args: a positional argument tuple of arrays, scalars, or (nested) standard
Python containers (tuples, lists, dicts, namedtuples, i.e. pytrees) of
those types. Since only the ``shape`` and ``dtype`` attributes are
accessed, only values that duck-type arrays are required, rather than real
ndarrays. The duck-typed objects cannot be namedtuples because those are
treated as standard Python containers. See the example below.
**kwargs: a keyword argument dict of arrays, scalars, or (nested) standard
Python containers (pytrees) of those types. As in ``args``, array values
need only be duck-typed to have ``shape`` and ``dtype`` attributes.
For example:
>>> import jax
>>> import jax.numpy as jnp
>>>
>>> f = lambda A, x: jnp.tanh(jnp.dot(A, x))
>>> class MyArgArray(object):
... def __init__(self, shape, dtype):
... self.shape = shape
... self.dtype = jnp.dtype(dtype)
...
>>> A = MyArgArray((2000, 3000), jnp.float32)
>>> x = MyArgArray((3000, 1000), jnp.float32)
>>> out = jax.eval_shape(f, A, x) # no FLOPs performed
>>> print(out.shape)
(2000, 1000)
>>> print(out.dtype)
float32
"""
args_flat, in_tree = tree_flatten((args, kwargs))
wrapped_fun, out_tree = flatten_fun(lu.wrap_init(fun), in_tree)
debug_info = pe.debug_info(fun, in_tree, True, "eval_shape")
out = pe.abstract_eval_fun(wrapped_fun.call_wrapped,
*map(shaped_abstractify, args_flat),
debug_info=debug_info)
out = [ShapeDtypeStruct(x.shape, x.dtype, x.named_shape) for x in out]
return tree_unflatten(out_tree(), out)
def checkpoint(fun: Callable, concrete: bool = False, prevent_cse: bool = True,
policy: Optional[Callable[..., bool]] = None,
) -> Callable:
"""Make ``fun`` recompute internal linearization points when differentiated.
The :func:`jax.checkpoint` decorator, aliased to ``jax.remat``, provides a
way to trade off computation time and memory cost in the context of automatic
differentiation, especially with reverse-mode autodiff like :func:`jax.grad`
and :func:`jax.vjp` but also with :func:`jax.linearize`.
When differentiating a function in reverse-mode, by default all the
linearization points (e.g. inputs to elementwise nonlinear primitive
operations) are stored when evaluating the forward pass so that they can be
reused on the backward pass. This evaluation strategy can lead to a high
memory cost, or even to poor performance on hardware accelerators where memory
access is much more expensive than FLOPs.
An alternative evaluation strategy is for some of the linearization points to
be recomputed (i.e. rematerialized) rather than stored. This approach can
reduce memory usage at the cost of increased computation.
This function decorator produces a new version of ``fun`` which follows
the rematerialization strategy rather than the default store-everything
strategy. That is, it returns a new version of ``fun`` which, when
differentiated, doesn't store any of its intermediate linearization points.
Instead, these linearization points are recomputed from the function's saved
inputs.
See the examples below.
Args:
fun: Function for which the autodiff evaluation strategy is to be changed
from the default of storing all intermediate linearization points to
recomputing them. Its arguments and return value should be arrays,
scalars, or (nested) standard Python containers (tuple/list/dict) thereof.
concrete: Optional, boolean indicating whether ``fun`` may involve
value-dependent Python control flow (default False). Support for such
control flow is optional, and disabled by default, because in some
edge-case compositions with :func:`jax.jit` it can lead to some extra
computation.
prevent_cse: Optional, boolean indicating whether to prevent common
subexpression elimination (CSE) optimizations in the HLO generated from
differentiation. This CSE prevention has costs because it can foil other
optimizations, and because it can incur high overheads on some backends,
especially GPU. The default is True because otherwise, under a ``jit`` or
``pmap``, CSE can defeat the purpose of this decorator. But in some
settings, like when used inside a ``scan``, this CSE prevention mechanism
is unnecessary, in which case ``prevent_cse`` can be set to False.
policy: This is an experimental feature and the API is likely to change.
Optional callable, one of the attributes of ``jax.checkpoint_policies``,
which takes as input a type-level specification of a first-order primitive
application and returns a boolean indicating whether the corresponding
output value(s) can be saved as a residual (or, if not, instead must be
recomputed in the (co)tangent computation).
Returns:
A function (callable) with the same input/output behavior as ``fun`` but
which, when differentiated using e.g. :func:`jax.grad`, :func:`jax.vjp`, or
:func:`jax.linearize`, recomputes rather than stores intermediate
linearization points, thus potentially saving memory at the cost of extra
computation.
Here is a simple example:
>>> import jax
>>> import jax.numpy as jnp
>>> @jax.checkpoint
... def g(x):
... y = jnp.sin(x)
... z = jnp.sin(y)
... return z
...
>>> jax.value_and_grad(g)(2.0)
(DeviceArray(0.78907233, dtype=float32, weak_type=True), DeviceArray(-0.2556391, dtype=float32, weak_type=True))
Here, the same value is produced whether or not the :func:`jax.checkpoint`
decorator is present. When the decorator is not present, the values
``jnp.cos(2.0)`` and ``jnp.cos(jnp.sin(2.0))`` are computed on the forward
pass and are stored for use in the backward pass, because they are needed
on the backward pass and depend only on the primal inputs. When using
:func:`jax.checkpoint`, the forward pass will compute only the primal outputs
and only the primal inputs (``2.0``) will be stored for the backward pass.
At that time, the value ``jnp.sin(2.0)`` is recomputed, along with the values
``jnp.cos(2.0)`` and ``jnp.cos(jnp.sin(2.0))``.
While ``jax.checkpoint`` controls what values are stored from the forward-pass
to be used on the backward pass, the total amount of memory required to
evaluate a function or its VJP depends on many additional internal details of
that function. Those details include which numerical primitives are used,
how they're composed, where jit and control flow primitives like scan
are used, and other factors.
The :func:`jax.checkpoint` decorator can be applied recursively to express
sophisticated autodiff rematerialization strategies. For example:
>>> def recursive_checkpoint(funs):
... if len(funs) == 1:
... return funs[0]
... elif len(funs) == 2:
... f1, f2 = funs
... return lambda x: f1(f2(x))
... else:
... f1 = recursive_checkpoint(funs[:len(funs)//2])
... f2 = recursive_checkpoint(funs[len(funs)//2:])
... return lambda x: f1(jax.checkpoint(f2)(x))
...
"""
@wraps(fun)
@api_boundary
def fun_remat(*args, **kwargs):
args_flat, in_tree = tree_flatten((args, kwargs))
flat_fun, out_tree = flatten_fun(lu.wrap_init(fun), in_tree)
out_flat = pe.remat_call(flat_fun, *args_flat, name=flat_fun.__name__,
concrete=concrete, prevent_cse=prevent_cse,
differentiated=False,
policy=policy)
return tree_unflatten(out_tree(), out_flat)
return fun_remat
remat = checkpoint # type: ignore
def named_call(
fun: Callable[..., Any],
*,
name: Optional[str] = None,
) -> Callable[..., Any]:
"""Adds a user specified name to a function when staging out JAX computations.
When staging out computations for just-in-time compilation to XLA (or other
backends such as TensorFlow) JAX runs your Python program but by default does
not preserve any of the function names or other metadata associated with it.
This can make debugging the staged out (and/or compiled) representation of
your program complicated because there is limited context information for each
operation being executed.
`named_call` tells JAX to stage the given function out as a subcomputation
with a specific name. When the staged out program is compiled with XLA these
named subcomputations are preserved and show up in debugging utilities like
the TensorFlow Profiler in TensorBoard. Names are also preserved when staging
out JAX programs to TensorFlow using :func:`experimental.jax2tf.convert`.
Args:
fun: Function to be wrapped. This can be any Callable.
name: Optional. The prefix to use to name all sub computations created
within the name scope. Use the fun.__name__ if not specified.
Returns:
A version of `fun` that is wrapped in a name_scope.
"""
if name is None:
name = fun.__name__
_, in_tree = tree_flatten(())
@functools.wraps(fun)
def named_f(*args, **kwargs):
lu_f = lu.wrap_init(lambda: fun(*args, **kwargs))
flat_f, out_tree = flatten_fun_nokwargs(lu_f, in_tree)
out_flat = core.named_call_p.bind(flat_f, name=name)
return tree_unflatten(out_tree(), out_flat)
return named_f
def invertible(fun: Callable) -> Callable:
"""Asserts that the decorated function is invertible.
Applying reverse-mode AD to a decorated function will use a more memory efficient
procedure than usual, which will reconstruct the necessary intermediate values
by inverting the function. Note that this might degrade the numerical accuracy of
obtained gradients if the inverse is unstable.
Args:
fun: The function assumed to be invertible.
"""
return iad.invertible(fun)
def block_until_ready(x):
"""
Tries to call a ``block_until_ready`` method on pytree leaves.
Args:
x: a pytree, usually with at least some JAX array instances at its leaves.
Returns:
A pytree with the same structure and values of the input, where the values
of all JAX array leaves are ready.
"""
def try_to_block(x):
try:
return x.block_until_ready()
except AttributeError:
return x
return jax.tree_util.tree_map(try_to_block, x)
| 43.244382 | 128 | 0.683093 |
ace2a932aed0f971d6635e6f26cc1d84a28bfd40 | 1,951 | py | Python | mmdet/utils/collect_env.py | HCYXAS/mmdetection-rocm | 5fbfedd4ea73645750626d70ac8ed456759b6b4f | [
"Apache-2.0"
] | 3 | 2020-07-03T06:15:18.000Z | 2020-08-06T08:28:28.000Z | mmdet/utils/collect_env.py | HCYXAS/mmdetection-rocm | 5fbfedd4ea73645750626d70ac8ed456759b6b4f | [
"Apache-2.0"
] | null | null | null | mmdet/utils/collect_env.py | HCYXAS/mmdetection-rocm | 5fbfedd4ea73645750626d70ac8ed456759b6b4f | [
"Apache-2.0"
] | 3 | 2020-06-30T03:25:18.000Z | 2020-07-23T03:15:41.000Z | import os.path as osp
import subprocess
import sys
from collections import defaultdict
import cv2
import mmcv
import torch
import torchvision
import mmdet
def collect_env():
env_info = {}
env_info['sys.platform'] = sys.platform
env_info['Python'] = sys.version.replace('\n', '')
cuda_available = torch.cuda.is_available()
env_info['CUDA available'] = cuda_available
if cuda_available:
from torch.utils.cpp_extension import CUDA_HOME
env_info['CUDA_HOME'] = CUDA_HOME
if CUDA_HOME is not None and osp.isdir(CUDA_HOME):
try:
nvcc = osp.join(CUDA_HOME, 'bin/nvcc')
nvcc = subprocess.check_output(
f'"{nvcc}" -V | tail -n1', shell=True)
nvcc = nvcc.decode('utf-8').strip()
except subprocess.SubprocessError:
nvcc = 'Not Available'
env_info['NVCC'] = nvcc
devices = defaultdict(list)
for k in range(torch.cuda.device_count()):
devices[torch.cuda.get_device_name(k)].append(str(k))
for name, devids in devices.items():
env_info['GPU ' + ','.join(devids)] = name
gcc = subprocess.check_output('gcc --version | head -n1', shell=True)
gcc = gcc.decode('utf-8').strip()
env_info['GCC'] = gcc
env_info['PyTorch'] = torch.__version__
env_info['PyTorch compiling details'] = torch.__config__.show()
env_info['TorchVision'] = torchvision.__version__
env_info['OpenCV'] = cv2.__version__
env_info['MMCV'] = mmcv.__version__
env_info['MMDetection'] = mmdet.__version__
from mmdet.ops import get_compiler_version, get_compiling_hip_version
env_info['MMDetection Compiler'] = get_compiler_version()
env_info['MMDetection CUDA Compiler'] = get_compiling_hip_version()
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print(f'{name}: {val}')
| 30.484375 | 73 | 0.644285 |
ace2a964c2b11e32afaddce90a318310d57d164b | 604 | py | Python | files/security.py | ieugen/another-ldap-auth | fcf04a8801f25e301aadc32093ca4b0390b9356e | [
"MIT"
] | 29 | 2019-07-18T07:57:41.000Z | 2022-03-16T08:47:01.000Z | files/security.py | ieugen/another-ldap-auth | fcf04a8801f25e301aadc32093ca4b0390b9356e | [
"MIT"
] | 16 | 2020-09-15T11:32:23.000Z | 2022-01-03T19:03:43.000Z | files/security.py | ieugen/another-ldap-auth | fcf04a8801f25e301aadc32093ca4b0390b9356e | [
"MIT"
] | 17 | 2020-05-29T19:29:39.000Z | 2022-03-16T08:47:09.000Z | from flask import request
class Security:
def __init__(self):
return None
def getUserIP(self):
'''
Returns the user's IP
'''
if request.environ.get('HTTP_X_REAL_IP') is not None:
return request.environ.get('HTTP_X_REAL_IP')
elif request.environ.get('HTTP_X_FORWARDED_FOR') is not None:
return request.environ.get('HTTP_X_FORWARDED_FOR')
else:
return request.remote_addr
def getUserReferrer(self):
'''
Returns the user's referrer
'''
return request.base_url
| 26.26087 | 69 | 0.594371 |
ace2a9909e54ffdd453f16389631c23acdacb0a4 | 5,888 | py | Python | silk/views/requests.py | pikhovkin/django-silk | 669dcda2420802468596c16cd5e2a7068a03a96f | [
"MIT"
] | 2 | 2018-11-20T13:06:51.000Z | 2019-08-09T08:50:52.000Z | silk/views/requests.py | pikhovkin/django-silk | 669dcda2420802468596c16cd5e2a7068a03a96f | [
"MIT"
] | null | null | null | silk/views/requests.py | pikhovkin/django-silk | 669dcda2420802468596c16cd5e2a7068a03a96f | [
"MIT"
] | 1 | 2019-06-10T17:48:38.000Z | 2019-06-10T17:48:38.000Z | from django.db.models import Sum
from django.shortcuts import render
from django.template.context_processors import csrf
from django.utils.decorators import method_decorator
from django.views.generic import View
from silk.auth import login_possibly_required, permissions_possibly_required
from silk.models import Request, Response
from silk.request_filters import BaseFilter, filters_from_request
__author__ = 'mtford'
class RequestsView(View):
show = [5, 10, 25, 100, 250]
default_show = 25
order_by = {
'start_time': {
'label': 'Recent',
'additional_query_filter': None
},
'path': {
'label': 'Path',
'additional_query_filter': None
},
'num_sql_queries': {
'label': 'Num. Queries',
'additional_query_filter': None
},
'time_taken': {
'label': 'Time',
'additional_query_filter': lambda x: x.filter(time_taken__gte=0)
},
'db_time': {
'label': 'Time on queries',
'additional_query_filter': lambda x: x.only('pk').annotate(db_time=Sum('queries__time_taken'))
.filter(db_time__gte=0)
},
}
order_dir = {
'ASC': {
'label': 'Ascending'
},
'DESC': {
'label': 'Descending'
}
}
view_style = {
'card': {
'label': 'Cards'
},
'row': {
'label': 'Rows'
}
}
default_order_by = 'start_time'
default_order_dir = 'DESC'
default_view_style = 'card'
session_key_request_filters = 'request_filters'
@property
def options_order_by(self):
return [{'value': x, 'label': self.order_by[x]['label']} for x in self.order_by.keys()]
@property
def options_order_dir(self):
return [{'value': x, 'label': self.order_dir[x]['label']} for x in self.order_dir.keys()]
@property
def options_view_style(self):
return [{'value': x, 'label': self.view_style[x]['label']} for x in self.view_style.keys()]
def _get_paths(self):
return Request.objects.values_list(
'path',
flat=True
).order_by(
'path'
).distinct()
def _get_views(self):
return Request.objects.values_list(
'view_name',
flat=True
).exclude(
view_name=''
).order_by(
'view_name'
).distinct()
def _get_status_codes(self):
return Response.objects.values_list(
'status_code',
flat=True
).order_by(
'status_code'
).distinct()
def _get_methods(self):
return Request.objects.values_list(
'method',
flat=True
).order_by(
'method'
).distinct()
def _get_objects(self, show=None, order_by=None, order_dir=None, path=None, filters=None):
if not filters:
filters = []
if not show:
show = self.default_show
query_set = Request.objects.all()
if not order_by:
order_by = self.default_order_by
if not order_dir:
order_dir = self.default_order_dir
if order_by not in self.order_by.keys():
raise RuntimeError('Unknown order_by: "%s"' % order_by)
ob = self.order_by[order_by]
if ob['additional_query_filter'] is not None:
query_set = ob['additional_query_filter'](query_set)
query_set = query_set.order_by('%s%s' % ('-' if order_dir == 'DESC' else '', order_by))
if path:
query_set = query_set.filter(path=path)
for f in filters:
query_set = f.contribute_to_query_set(query_set)
query_set = query_set.filter(f)
return query_set[:show]
def _create_context(self, request):
show = request.GET.get('show', self.default_show)
order_by = request.GET.get('order_by', self.default_order_by)
order_dir = request.GET.get('order_dir', self.default_order_dir)
view_style = request.GET.get('view_style', self.default_view_style)
if show:
show = int(show)
path = request.GET.get('path', None)
raw_filters = request.session.get(self.session_key_request_filters, {})
context = {
'show': show,
'order_by': order_by,
'order_dir': order_dir,
'view_style': view_style,
'request': request,
'options_show': self.show,
'options_order_by': self.options_order_by,
'options_order_dir': self.options_order_dir,
'options_view_style': self.options_view_style,
'options_paths': self._get_paths(),
'options_status_codes': self._get_status_codes(),
'options_methods': self._get_methods(),
'view_names': self._get_views(),
'filters': raw_filters
}
context.update(csrf(request))
if path:
context['path'] = path
context['results'] = self._get_objects(show, order_by, order_dir, path,
filters=[BaseFilter.from_dict(x) for _, x in raw_filters.items()])
return context
@method_decorator(login_possibly_required)
@method_decorator(permissions_possibly_required)
def get(self, request):
return render(request, 'silk/requests.html', self._create_context(request))
@method_decorator(login_possibly_required)
@method_decorator(permissions_possibly_required)
def post(self, request):
filters = filters_from_request(request)
request.session[self.session_key_request_filters] = {ident: f.as_dict() for ident, f in filters.items()}
return render(request, 'silk/requests.html', self._create_context(request))
| 33.454545 | 113 | 0.590014 |
ace2a9cdb9d849a8fcb2e2382e69022f371f400e | 2,531 | py | Python | tests/functional/regressions/test_issue109.py | alexchamberlain/tartiflette | 6904b0f47770c348553e907be5f5bdb0929fe149 | [
"MIT"
] | null | null | null | tests/functional/regressions/test_issue109.py | alexchamberlain/tartiflette | 6904b0f47770c348553e907be5f5bdb0929fe149 | [
"MIT"
] | 1 | 2020-08-11T15:41:41.000Z | 2020-08-11T15:41:41.000Z | tests/functional/regressions/test_issue109.py | alexchamberlain/tartiflette | 6904b0f47770c348553e907be5f5bdb0929fe149 | [
"MIT"
] | null | null | null | import pytest
@pytest.mark.asyncio
@pytest.mark.parametrize(
"query,expected",
[
(
"""
query {
dog {
name {
a
}
}
}
""",
{
"data": None,
"errors": [
{
"message": "field < name > is a leaf and thus can't have a selection set",
"path": ["dog", "name"],
"locations": [{"line": 4, "column": 21}],
}
],
},
),
(
"""
query {
dog
}
""",
{
"data": None,
"errors": [
{
"message": "field < dog > is not a leaf and thus must have a selection set",
"path": ["dog"],
"locations": [{"line": 3, "column": 17}],
}
],
},
),
(
"""
fragment doggy on Dog {
name {
a
}
}
query {
dog {
... doggy
}
}
""",
{
"data": None,
"errors": [
{
"message": "field < name > is a leaf and thus can't have a selection set",
"path": ["dog", "name"],
"locations": [{"line": 3, "column": 17}],
}
],
},
),
(
"""
query {
dog {
... on Dog {
name {
a
}
}
}
}
""",
{
"data": None,
"errors": [
{
"message": "field < name > is a leaf and thus can't have a selection set",
"path": ["dog", "name"],
"locations": [{"line": 5, "column": 25}],
}
],
},
),
],
)
@pytest.mark.ttftt_engine()
async def test_issue109(query, expected, engine):
assert await engine.execute(query) == expected
| 25.826531 | 100 | 0.247728 |
ace2ab231f48b91e07a0eef3efe0be64f04a92d5 | 8,195 | py | Python | ansys/dpf/core/operators/math/scale_by_field_fc.py | jfthuong/pydpf-core | bf2895ebc546e0004f759289bfc9a23196559ac3 | [
"MIT"
] | 18 | 2021-10-16T10:38:29.000Z | 2022-03-29T11:26:42.000Z | ansys/dpf/core/operators/math/scale_by_field_fc.py | jfthuong/pydpf-core | bf2895ebc546e0004f759289bfc9a23196559ac3 | [
"MIT"
] | 79 | 2021-10-11T23:18:54.000Z | 2022-03-29T14:53:14.000Z | ansys/dpf/core/operators/math/scale_by_field_fc.py | jfthuong/pydpf-core | bf2895ebc546e0004f759289bfc9a23196559ac3 | [
"MIT"
] | 5 | 2021-11-29T18:35:37.000Z | 2022-03-16T16:49:21.000Z | """
scale_by_field_fc
===============
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class scale_by_field_fc(Operator):
"""Scales a field (in 0) by a scalar field (in 1). If one field's scoping
has 'overall' location, then these field's values are applied on
the entire other field.
Parameters
----------
field_or_fields_container_A : Field or FieldsContainer
Field or fields container with only one field
is expected
field_or_fields_container_B : Field or FieldsContainer
Field or fields container with only one field
is expected
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.math.scale_by_field_fc()
>>> # Make input connections
>>> my_field_or_fields_container_A = dpf.Field()
>>> op.inputs.field_or_fields_container_A.connect(my_field_or_fields_container_A)
>>> my_field_or_fields_container_B = dpf.Field()
>>> op.inputs.field_or_fields_container_B.connect(my_field_or_fields_container_B)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.math.scale_by_field_fc(
... field_or_fields_container_A=my_field_or_fields_container_A,
... field_or_fields_container_B=my_field_or_fields_container_B,
... )
>>> # Get output data
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(
self,
field_or_fields_container_A=None,
field_or_fields_container_B=None,
config=None,
server=None,
):
super().__init__(name="scale_by_field_fc", config=config, server=server)
self._inputs = InputsScaleByFieldFc(self)
self._outputs = OutputsScaleByFieldFc(self)
if field_or_fields_container_A is not None:
self.inputs.field_or_fields_container_A.connect(field_or_fields_container_A)
if field_or_fields_container_B is not None:
self.inputs.field_or_fields_container_B.connect(field_or_fields_container_B)
@staticmethod
def _spec():
description = """Scales a field (in 0) by a scalar field (in 1). If one field's scoping
has 'overall' location, then these field's values are
applied on the entire other field."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="field_or_fields_container_A",
type_names=["field", "fields_container"],
optional=False,
document="""Field or fields container with only one field
is expected""",
),
1: PinSpecification(
name="field_or_fields_container_B",
type_names=["field", "fields_container"],
optional=False,
document="""Field or fields container with only one field
is expected""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="fields_container",
type_names=["fields_container"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the the global server.
"""
return Operator.default_config(name="scale_by_field_fc", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsScaleByFieldFc
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluationg it
Returns
--------
outputs : OutputsScaleByFieldFc
"""
return super().outputs
class InputsScaleByFieldFc(_Inputs):
"""Intermediate class used to connect user inputs to
scale_by_field_fc operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.scale_by_field_fc()
>>> my_field_or_fields_container_A = dpf.Field()
>>> op.inputs.field_or_fields_container_A.connect(my_field_or_fields_container_A)
>>> my_field_or_fields_container_B = dpf.Field()
>>> op.inputs.field_or_fields_container_B.connect(my_field_or_fields_container_B)
"""
def __init__(self, op: Operator):
super().__init__(scale_by_field_fc._spec().inputs, op)
self._field_or_fields_container_A = Input(
scale_by_field_fc._spec().input_pin(0), 0, op, -1
)
self._inputs.append(self._field_or_fields_container_A)
self._field_or_fields_container_B = Input(
scale_by_field_fc._spec().input_pin(1), 1, op, -1
)
self._inputs.append(self._field_or_fields_container_B)
@property
def field_or_fields_container_A(self):
"""Allows to connect field_or_fields_container_A input to the operator.
Field or fields container with only one field
is expected
Parameters
----------
my_field_or_fields_container_A : Field or FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.scale_by_field_fc()
>>> op.inputs.field_or_fields_container_A.connect(my_field_or_fields_container_A)
>>> # or
>>> op.inputs.field_or_fields_container_A(my_field_or_fields_container_A)
"""
return self._field_or_fields_container_A
@property
def field_or_fields_container_B(self):
"""Allows to connect field_or_fields_container_B input to the operator.
Field or fields container with only one field
is expected
Parameters
----------
my_field_or_fields_container_B : Field or FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.scale_by_field_fc()
>>> op.inputs.field_or_fields_container_B.connect(my_field_or_fields_container_B)
>>> # or
>>> op.inputs.field_or_fields_container_B(my_field_or_fields_container_B)
"""
return self._field_or_fields_container_B
class OutputsScaleByFieldFc(_Outputs):
"""Intermediate class used to get outputs from
scale_by_field_fc operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.scale_by_field_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(self, op: Operator):
super().__init__(scale_by_field_fc._spec().outputs, op)
self._fields_container = Output(scale_by_field_fc._spec().output_pin(0), 0, op)
self._outputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to get fields_container output of the operator
Returns
----------
my_fields_container : FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.scale_by_field_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
""" # noqa: E501
return self._fields_container
| 34.432773 | 95 | 0.637828 |
ace2aca51a71873dc4044d68af340b22b0d2ef2e | 1,697 | py | Python | setup.py | vesposito/ucsmsdk | 03bc1e19b8582c68d3a94a20a939a48fe91332f8 | [
"Apache-2.0"
] | null | null | null | setup.py | vesposito/ucsmsdk | 03bc1e19b8582c68d3a94a20a939a48fe91332f8 | [
"Apache-2.0"
] | null | null | null | setup.py | vesposito/ucsmsdk | 03bc1e19b8582c68d3a94a20a939a48fe91332f8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open('requirements.txt') as rf:
requirements = rf.readlines()
with open('test-requirements.txt') as rf:
test_requirements = rf.readlines()
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
setup(
name='ucsmsdk',
version='0.9.10',
description="Python SDK for Cisco UCS",
long_description=readme + '\n\n' + history,
long_description_content_type='text/markdown',
author="Cisco Systems Inc",
author_email='ucs-python@cisco.com',
url='https://github.com/CiscoUcs/ucsmsdk',
packages=[
'ucsmsdk',
],
package_dir={'ucsmsdk':
'ucsmsdk'},
include_package_data=True,
install_requires=requirements,
license="http://www.apache.org/licenses/LICENSE-2.0",
zip_safe=False,
keywords='ucsmsdk',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
tests_require=test_requirements,
test_suite='nose.collector',
extras_require={
'ssl': ['pyOpenSSL'],
'docs': ['sphinx<1.3', 'sphinxcontrib-napoleon', 'sphinx_rtd_theme'],
}
)
| 29.77193 | 77 | 0.624632 |
ace2acb0608958f861e2255a02d977d44e01b667 | 152 | py | Python | avocato/__init__.py | tsifrer/avocato | 9d5572913437646435cf0c51d36cad72dc7c8425 | [
"MIT"
] | 4 | 2019-01-11T08:41:08.000Z | 2019-01-11T08:52:30.000Z | avocato/__init__.py | tsifrer/avocato | 9d5572913437646435cf0c51d36cad72dc7c8425 | [
"MIT"
] | 2 | 2020-02-11T23:38:28.000Z | 2020-06-05T19:46:11.000Z | avocato/__init__.py | tsifrer/avocato | 9d5572913437646435cf0c51d36cad72dc7c8425 | [
"MIT"
] | 1 | 2019-01-11T08:41:15.000Z | 2019-01-11T08:41:15.000Z | from .exceptions import * # noqa
from .fields import * # noqa
from .objects import * # noqa
from .validators import * # noqa
__version__ = '0.1.0'
| 21.714286 | 33 | 0.677632 |
ace2ad9f91263e7093040a22ef6dc4c267d06c02 | 3,187 | py | Python | kepler/emulator/Emulator.py | ringer-atlas/kepler | 49d2c1a442995e776976780535c8116fc8a96731 | [
"MIT"
] | null | null | null | kepler/emulator/Emulator.py | ringer-atlas/kepler | 49d2c1a442995e776976780535c8116fc8a96731 | [
"MIT"
] | null | null | null | kepler/emulator/Emulator.py | ringer-atlas/kepler | 49d2c1a442995e776976780535c8116fc8a96731 | [
"MIT"
] | 3 | 2021-09-10T18:11:21.000Z | 2022-03-17T15:36:29.000Z |
__all__ = ["EmulationTool", "Accept", "attach"]
from Gaugi import ToolSvc
from Gaugi import Algorithm
from Gaugi import StatusCode
from Gaugi.messenger.macros import *
import collections
#
# Emulator
#
class EmulationTool( Algorithm ):
#
# Constructor
#
def __init__(self):
Algorithm.__init__(self, "Emulator")
self.__tools = {}
#
# Add a selector to the list
#
def __add__( self, tool ):
self.__tools[tool.name()] = tool
return self
#
# Get the hypo tool
#
def retrieve(self, key):
return self.__tools[key] if self.isValid(key) else None
#
# Initialize method
#
def initialize(self):
tools = [ tool for _, tool in self.__tools.items() ]
for tool in tools:
MSG_INFO( self, 'Initializing %s tool',tool.name())
tool.dataframe = self.dataframe
tool.setContext( self.getContext() )
tool.level = self.level
if tool.initialize().isFailure():
MSG_ERROR( self, 'Can not initialize %s',tool.name())
return StatusCode.SUCCESS
#
# Execute method
#
def execute(self, context):
return StatusCode.SUCCESS
#
# Accept method
#
def accept( self, context, key ):
if self.isValid(key):
return self.__tools[key].accept( context )
else:
MSG_FATAL( self, "The key %s is not in the emulation" , key )
#
# Finalized method
#
def finalize(self):
for key, tool in self.__tools.items():
MSG_INFO( self, 'Finalizing %s tool',key)
if tool.finalize().isFailure():
MSG_ERROR( self, 'Can not finalizing %s',tool.name)
return StatusCode.SUCCESS
#
# Check if the selector is installed
#
def isValid(self, key ):
return True if key in self.__tools.keys() else False
#
# Add the emulator tool into the tool service by default
#
ToolSvc += EmulationTool()
#
# Helper to avoid to much repetition code into this file
#
def attach( hypos ):
from Gaugi import ToolSvc
emulator = ToolSvc.retrieve( "Emulator" )
names = []
for hypo in hypos:
if not emulator.isValid( hypo.name() ):
emulator+=hypo
names.append( hypo.name() )
return names
#
# Accept
#
class Accept( object ):
#
# Constructor
#
def __init__(self, name, results=[] ):
self.__name = name
self.__results = collections.OrderedDict()
for (key,value) in results:
self.__results[key] = value
self.__decoration = {}
#
# Get the accept name
#
def name(self):
return self.__name
#
# Add new cut
#
def addCut( self, key ):
self.__results[key] = False
#
# Set cut result value
#
def setCutResult( self, key, value ):
self.__results[key] = value
#
# Get cut result value
#
def getCutResult( self, key ):
try:
return self.__results[key]
except KeyError as e:
print( e )
#
# Is passed
#
def __bool__(self):
x = [v for _, v in self.__results.items()]
return all( [value for _, value in self.__results.items()] )
#
# Add decoration
#
def setDecor( self, key, value ):
self.__decoration[key] = value
#
# Get decoration
#
def getDecor( self, key ):
return self.__decoration[key]
| 16.773684 | 67 | 0.628805 |
ace2adb79ef1f178b4b85ca5b879f6e3ca5fd0b8 | 22,595 | py | Python | esp_sdk/models/stat_signature.py | zimmermanc/esp-sdk-python | cdef13c0dc6c3996b6c444160c71b2f1e3910c97 | [
"MIT"
] | 6 | 2017-06-05T20:37:19.000Z | 2019-04-10T08:43:59.000Z | esp_sdk/models/stat_signature.py | zimmermanc/esp-sdk-python | cdef13c0dc6c3996b6c444160c71b2f1e3910c97 | [
"MIT"
] | 18 | 2016-06-22T16:14:33.000Z | 2018-10-29T21:53:15.000Z | esp_sdk/models/stat_signature.py | zimmermanc/esp-sdk-python | cdef13c0dc6c3996b6c444160c71b2f1e3910c97 | [
"MIT"
] | 18 | 2016-07-27T19:20:01.000Z | 2020-11-17T02:09:58.000Z | # coding: utf-8
"""
ESP Documentation
The Evident Security Platform API (version 2.0) is designed to allow users granular control over their Amazon Web Service security experience by allowing them to review alerts, monitor signatures, and create custom signatures.
OpenAPI spec version: v2_sdk
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
from ..extensions.base_object import BaseObject
import re
class StatSignature(BaseObject):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, new_1h_pass=None, new_1d_pass=None, new_1w_pass=None, old_pass=None, new_1h_fail=None, new_1d_fail=None, new_1w_fail=None, old_fail=None, new_1h_warn=None, new_1d_warn=None, new_1w_warn=None, old_warn=None, new_1h_error=None, new_1d_error=None, new_1w_error=None, old_error=None, suppressed_pass=None, suppressed_fail=None, suppressed_warn=None, suppressed_error=None, new_1h_info=None, new_1d_info=None, new_1w_info=None, old_info=None, suppressed_info=None, signature=None, signature_id=None, stat=None, stat_id=None):
"""
StatSignature - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'new_1h_pass': 'int',
'new_1d_pass': 'int',
'new_1w_pass': 'int',
'old_pass': 'int',
'new_1h_fail': 'int',
'new_1d_fail': 'int',
'new_1w_fail': 'int',
'old_fail': 'int',
'new_1h_warn': 'int',
'new_1d_warn': 'int',
'new_1w_warn': 'int',
'old_warn': 'int',
'new_1h_error': 'int',
'new_1d_error': 'int',
'new_1w_error': 'int',
'old_error': 'int',
'suppressed_pass': 'int',
'suppressed_fail': 'int',
'suppressed_warn': 'int',
'suppressed_error': 'int',
'new_1h_info': 'int',
'new_1d_info': 'int',
'new_1w_info': 'int',
'old_info': 'int',
'suppressed_info': 'int',
'signature': 'Signature',
'signature_id': 'int',
'stat': 'Stat',
'stat_id': 'int'
}
self.attribute_map = {
'id': 'id',
'new_1h_pass': 'new_1h_pass',
'new_1d_pass': 'new_1d_pass',
'new_1w_pass': 'new_1w_pass',
'old_pass': 'old_pass',
'new_1h_fail': 'new_1h_fail',
'new_1d_fail': 'new_1d_fail',
'new_1w_fail': 'new_1w_fail',
'old_fail': 'old_fail',
'new_1h_warn': 'new_1h_warn',
'new_1d_warn': 'new_1d_warn',
'new_1w_warn': 'new_1w_warn',
'old_warn': 'old_warn',
'new_1h_error': 'new_1h_error',
'new_1d_error': 'new_1d_error',
'new_1w_error': 'new_1w_error',
'old_error': 'old_error',
'suppressed_pass': 'suppressed_pass',
'suppressed_fail': 'suppressed_fail',
'suppressed_warn': 'suppressed_warn',
'suppressed_error': 'suppressed_error',
'new_1h_info': 'new_1h_info',
'new_1d_info': 'new_1d_info',
'new_1w_info': 'new_1w_info',
'old_info': 'old_info',
'suppressed_info': 'suppressed_info',
'signature': 'signature',
'signature_id': 'signature_id',
'stat': 'stat',
'stat_id': 'stat_id'
}
self._id = id
self._new_1h_pass = new_1h_pass
self._new_1d_pass = new_1d_pass
self._new_1w_pass = new_1w_pass
self._old_pass = old_pass
self._new_1h_fail = new_1h_fail
self._new_1d_fail = new_1d_fail
self._new_1w_fail = new_1w_fail
self._old_fail = old_fail
self._new_1h_warn = new_1h_warn
self._new_1d_warn = new_1d_warn
self._new_1w_warn = new_1w_warn
self._old_warn = old_warn
self._new_1h_error = new_1h_error
self._new_1d_error = new_1d_error
self._new_1w_error = new_1w_error
self._old_error = old_error
self._suppressed_pass = suppressed_pass
self._suppressed_fail = suppressed_fail
self._suppressed_warn = suppressed_warn
self._suppressed_error = suppressed_error
self._new_1h_info = new_1h_info
self._new_1d_info = new_1d_info
self._new_1w_info = new_1w_info
self._old_info = old_info
self._suppressed_info = suppressed_info
self._signature = signature
self._signature_id = signature_id
self._stat = stat
self._stat_id = stat_id
@property
def id(self):
"""
Gets the id of this StatSignature.
Unique ID
:return: The id of this StatSignature.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this StatSignature.
Unique ID
:param id: The id of this StatSignature.
:type: int
"""
self._id = id
@property
def new_1h_pass(self):
"""
Gets the new_1h_pass of this StatSignature.
new_1h_pass
:return: The new_1h_pass of this StatSignature.
:rtype: int
"""
return self._new_1h_pass
@new_1h_pass.setter
def new_1h_pass(self, new_1h_pass):
"""
Sets the new_1h_pass of this StatSignature.
new_1h_pass
:param new_1h_pass: The new_1h_pass of this StatSignature.
:type: int
"""
self._new_1h_pass = new_1h_pass
@property
def new_1d_pass(self):
"""
Gets the new_1d_pass of this StatSignature.
new_1d_pass
:return: The new_1d_pass of this StatSignature.
:rtype: int
"""
return self._new_1d_pass
@new_1d_pass.setter
def new_1d_pass(self, new_1d_pass):
"""
Sets the new_1d_pass of this StatSignature.
new_1d_pass
:param new_1d_pass: The new_1d_pass of this StatSignature.
:type: int
"""
self._new_1d_pass = new_1d_pass
@property
def new_1w_pass(self):
"""
Gets the new_1w_pass of this StatSignature.
new_1w_pass
:return: The new_1w_pass of this StatSignature.
:rtype: int
"""
return self._new_1w_pass
@new_1w_pass.setter
def new_1w_pass(self, new_1w_pass):
"""
Sets the new_1w_pass of this StatSignature.
new_1w_pass
:param new_1w_pass: The new_1w_pass of this StatSignature.
:type: int
"""
self._new_1w_pass = new_1w_pass
@property
def old_pass(self):
"""
Gets the old_pass of this StatSignature.
old_pass
:return: The old_pass of this StatSignature.
:rtype: int
"""
return self._old_pass
@old_pass.setter
def old_pass(self, old_pass):
"""
Sets the old_pass of this StatSignature.
old_pass
:param old_pass: The old_pass of this StatSignature.
:type: int
"""
self._old_pass = old_pass
@property
def new_1h_fail(self):
"""
Gets the new_1h_fail of this StatSignature.
new_1h_fail
:return: The new_1h_fail of this StatSignature.
:rtype: int
"""
return self._new_1h_fail
@new_1h_fail.setter
def new_1h_fail(self, new_1h_fail):
"""
Sets the new_1h_fail of this StatSignature.
new_1h_fail
:param new_1h_fail: The new_1h_fail of this StatSignature.
:type: int
"""
self._new_1h_fail = new_1h_fail
@property
def new_1d_fail(self):
"""
Gets the new_1d_fail of this StatSignature.
new_1d_fail
:return: The new_1d_fail of this StatSignature.
:rtype: int
"""
return self._new_1d_fail
@new_1d_fail.setter
def new_1d_fail(self, new_1d_fail):
"""
Sets the new_1d_fail of this StatSignature.
new_1d_fail
:param new_1d_fail: The new_1d_fail of this StatSignature.
:type: int
"""
self._new_1d_fail = new_1d_fail
@property
def new_1w_fail(self):
"""
Gets the new_1w_fail of this StatSignature.
new_1w_fail
:return: The new_1w_fail of this StatSignature.
:rtype: int
"""
return self._new_1w_fail
@new_1w_fail.setter
def new_1w_fail(self, new_1w_fail):
"""
Sets the new_1w_fail of this StatSignature.
new_1w_fail
:param new_1w_fail: The new_1w_fail of this StatSignature.
:type: int
"""
self._new_1w_fail = new_1w_fail
@property
def old_fail(self):
"""
Gets the old_fail of this StatSignature.
old_fail
:return: The old_fail of this StatSignature.
:rtype: int
"""
return self._old_fail
@old_fail.setter
def old_fail(self, old_fail):
"""
Sets the old_fail of this StatSignature.
old_fail
:param old_fail: The old_fail of this StatSignature.
:type: int
"""
self._old_fail = old_fail
@property
def new_1h_warn(self):
"""
Gets the new_1h_warn of this StatSignature.
new_1h_warn
:return: The new_1h_warn of this StatSignature.
:rtype: int
"""
return self._new_1h_warn
@new_1h_warn.setter
def new_1h_warn(self, new_1h_warn):
"""
Sets the new_1h_warn of this StatSignature.
new_1h_warn
:param new_1h_warn: The new_1h_warn of this StatSignature.
:type: int
"""
self._new_1h_warn = new_1h_warn
@property
def new_1d_warn(self):
"""
Gets the new_1d_warn of this StatSignature.
new_1d_warn
:return: The new_1d_warn of this StatSignature.
:rtype: int
"""
return self._new_1d_warn
@new_1d_warn.setter
def new_1d_warn(self, new_1d_warn):
"""
Sets the new_1d_warn of this StatSignature.
new_1d_warn
:param new_1d_warn: The new_1d_warn of this StatSignature.
:type: int
"""
self._new_1d_warn = new_1d_warn
@property
def new_1w_warn(self):
"""
Gets the new_1w_warn of this StatSignature.
new_1w_warn
:return: The new_1w_warn of this StatSignature.
:rtype: int
"""
return self._new_1w_warn
@new_1w_warn.setter
def new_1w_warn(self, new_1w_warn):
"""
Sets the new_1w_warn of this StatSignature.
new_1w_warn
:param new_1w_warn: The new_1w_warn of this StatSignature.
:type: int
"""
self._new_1w_warn = new_1w_warn
@property
def old_warn(self):
"""
Gets the old_warn of this StatSignature.
old_warn
:return: The old_warn of this StatSignature.
:rtype: int
"""
return self._old_warn
@old_warn.setter
def old_warn(self, old_warn):
"""
Sets the old_warn of this StatSignature.
old_warn
:param old_warn: The old_warn of this StatSignature.
:type: int
"""
self._old_warn = old_warn
@property
def new_1h_error(self):
"""
Gets the new_1h_error of this StatSignature.
new_1h_error
:return: The new_1h_error of this StatSignature.
:rtype: int
"""
return self._new_1h_error
@new_1h_error.setter
def new_1h_error(self, new_1h_error):
"""
Sets the new_1h_error of this StatSignature.
new_1h_error
:param new_1h_error: The new_1h_error of this StatSignature.
:type: int
"""
self._new_1h_error = new_1h_error
@property
def new_1d_error(self):
"""
Gets the new_1d_error of this StatSignature.
new_1d_error
:return: The new_1d_error of this StatSignature.
:rtype: int
"""
return self._new_1d_error
@new_1d_error.setter
def new_1d_error(self, new_1d_error):
"""
Sets the new_1d_error of this StatSignature.
new_1d_error
:param new_1d_error: The new_1d_error of this StatSignature.
:type: int
"""
self._new_1d_error = new_1d_error
@property
def new_1w_error(self):
"""
Gets the new_1w_error of this StatSignature.
new_1w_error
:return: The new_1w_error of this StatSignature.
:rtype: int
"""
return self._new_1w_error
@new_1w_error.setter
def new_1w_error(self, new_1w_error):
"""
Sets the new_1w_error of this StatSignature.
new_1w_error
:param new_1w_error: The new_1w_error of this StatSignature.
:type: int
"""
self._new_1w_error = new_1w_error
@property
def old_error(self):
"""
Gets the old_error of this StatSignature.
old_error
:return: The old_error of this StatSignature.
:rtype: int
"""
return self._old_error
@old_error.setter
def old_error(self, old_error):
"""
Sets the old_error of this StatSignature.
old_error
:param old_error: The old_error of this StatSignature.
:type: int
"""
self._old_error = old_error
@property
def suppressed_pass(self):
"""
Gets the suppressed_pass of this StatSignature.
suppressed_pass
:return: The suppressed_pass of this StatSignature.
:rtype: int
"""
return self._suppressed_pass
@suppressed_pass.setter
def suppressed_pass(self, suppressed_pass):
"""
Sets the suppressed_pass of this StatSignature.
suppressed_pass
:param suppressed_pass: The suppressed_pass of this StatSignature.
:type: int
"""
self._suppressed_pass = suppressed_pass
@property
def suppressed_fail(self):
"""
Gets the suppressed_fail of this StatSignature.
suppressed_fail
:return: The suppressed_fail of this StatSignature.
:rtype: int
"""
return self._suppressed_fail
@suppressed_fail.setter
def suppressed_fail(self, suppressed_fail):
"""
Sets the suppressed_fail of this StatSignature.
suppressed_fail
:param suppressed_fail: The suppressed_fail of this StatSignature.
:type: int
"""
self._suppressed_fail = suppressed_fail
@property
def suppressed_warn(self):
"""
Gets the suppressed_warn of this StatSignature.
suppressed_warn
:return: The suppressed_warn of this StatSignature.
:rtype: int
"""
return self._suppressed_warn
@suppressed_warn.setter
def suppressed_warn(self, suppressed_warn):
"""
Sets the suppressed_warn of this StatSignature.
suppressed_warn
:param suppressed_warn: The suppressed_warn of this StatSignature.
:type: int
"""
self._suppressed_warn = suppressed_warn
@property
def suppressed_error(self):
"""
Gets the suppressed_error of this StatSignature.
suppressed_error
:return: The suppressed_error of this StatSignature.
:rtype: int
"""
return self._suppressed_error
@suppressed_error.setter
def suppressed_error(self, suppressed_error):
"""
Sets the suppressed_error of this StatSignature.
suppressed_error
:param suppressed_error: The suppressed_error of this StatSignature.
:type: int
"""
self._suppressed_error = suppressed_error
@property
def new_1h_info(self):
"""
Gets the new_1h_info of this StatSignature.
new_1h_info
:return: The new_1h_info of this StatSignature.
:rtype: int
"""
return self._new_1h_info
@new_1h_info.setter
def new_1h_info(self, new_1h_info):
"""
Sets the new_1h_info of this StatSignature.
new_1h_info
:param new_1h_info: The new_1h_info of this StatSignature.
:type: int
"""
self._new_1h_info = new_1h_info
@property
def new_1d_info(self):
"""
Gets the new_1d_info of this StatSignature.
new_1d_info
:return: The new_1d_info of this StatSignature.
:rtype: int
"""
return self._new_1d_info
@new_1d_info.setter
def new_1d_info(self, new_1d_info):
"""
Sets the new_1d_info of this StatSignature.
new_1d_info
:param new_1d_info: The new_1d_info of this StatSignature.
:type: int
"""
self._new_1d_info = new_1d_info
@property
def new_1w_info(self):
"""
Gets the new_1w_info of this StatSignature.
new_1w_info
:return: The new_1w_info of this StatSignature.
:rtype: int
"""
return self._new_1w_info
@new_1w_info.setter
def new_1w_info(self, new_1w_info):
"""
Sets the new_1w_info of this StatSignature.
new_1w_info
:param new_1w_info: The new_1w_info of this StatSignature.
:type: int
"""
self._new_1w_info = new_1w_info
@property
def old_info(self):
"""
Gets the old_info of this StatSignature.
old_info
:return: The old_info of this StatSignature.
:rtype: int
"""
return self._old_info
@old_info.setter
def old_info(self, old_info):
"""
Sets the old_info of this StatSignature.
old_info
:param old_info: The old_info of this StatSignature.
:type: int
"""
self._old_info = old_info
@property
def suppressed_info(self):
"""
Gets the suppressed_info of this StatSignature.
suppressed_info
:return: The suppressed_info of this StatSignature.
:rtype: int
"""
return self._suppressed_info
@suppressed_info.setter
def suppressed_info(self, suppressed_info):
"""
Sets the suppressed_info of this StatSignature.
suppressed_info
:param suppressed_info: The suppressed_info of this StatSignature.
:type: int
"""
self._suppressed_info = suppressed_info
@property
def signature(self):
"""
Gets the signature of this StatSignature.
Associated Signature
:return: The signature of this StatSignature.
:rtype: Signature
"""
return self._signature
@signature.setter
def signature(self, signature):
"""
Sets the signature of this StatSignature.
Associated Signature
:param signature: The signature of this StatSignature.
:type: Signature
"""
self._signature = signature
@property
def signature_id(self):
"""
Gets the signature_id of this StatSignature.
Associated Signature ID
:return: The signature_id of this StatSignature.
:rtype: int
"""
return self._signature_id
@signature_id.setter
def signature_id(self, signature_id):
"""
Sets the signature_id of this StatSignature.
Associated Signature ID
:param signature_id: The signature_id of this StatSignature.
:type: int
"""
self._signature_id = signature_id
@property
def stat(self):
"""
Gets the stat of this StatSignature.
Associated Stat
:return: The stat of this StatSignature.
:rtype: Stat
"""
return self._stat
@stat.setter
def stat(self, stat):
"""
Sets the stat of this StatSignature.
Associated Stat
:param stat: The stat of this StatSignature.
:type: Stat
"""
self._stat = stat
@property
def stat_id(self):
"""
Gets the stat_id of this StatSignature.
Associated Stat ID
:return: The stat_id of this StatSignature.
:rtype: int
"""
return self._stat_id
@stat_id.setter
def stat_id(self, stat_id):
"""
Sets the stat_id of this StatSignature.
Associated Stat ID
:param stat_id: The stat_id of this StatSignature.
:type: int
"""
self._stat_id = stat_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, StatSignature):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 25.882016 | 552 | 0.586103 |
ace2adcbf68681201b682aa2f6f5219679b68365 | 550 | py | Python | ex103.py | igormba/python-exercises | 000190c4b62dc64bbb2fb039a103890945b88fa5 | [
"MIT"
] | null | null | null | ex103.py | igormba/python-exercises | 000190c4b62dc64bbb2fb039a103890945b88fa5 | [
"MIT"
] | null | null | null | ex103.py | igormba/python-exercises | 000190c4b62dc64bbb2fb039a103890945b88fa5 | [
"MIT"
] | null | null | null | '''Faça um programa que tenha uma função chamada ficha(), que receba dois parâmetros opcionais: o nome de um jogador e quantos gols ele marcou. O programa deverá ser capaz de mostrar a ficha do jogador, mesmo que algum dado não tenha sido informado corretamente.'''
def ficha(jog='<Desconhecido>', gol=0):
print(f'O jogador {jog} fez {gol} gol(s) no campeonato.')
n = str(input('Nome do Jogador: '))
g = str(input('Número de Gols: '))
if g.isnumeric():
g = int(g)
else:
g = 0
if n.strip() == '':
ficha(gol=g)
else:
ficha(n, g)
| 32.352941 | 265 | 0.670909 |
ace2af78926d22784a898a384e456bae6e64a5ab | 1,561 | py | Python | Test/En/run_05_3_ac_overlapping_hyponyms.py | mack-the-psych/plimac3 | 7b47abf7a087961e5a3e9c90b1ae20d07b3d6898 | [
"MIT"
] | null | null | null | Test/En/run_05_3_ac_overlapping_hyponyms.py | mack-the-psych/plimac3 | 7b47abf7a087961e5a3e9c90b1ae20d07b3d6898 | [
"MIT"
] | null | null | null | Test/En/run_05_3_ac_overlapping_hyponyms.py | mack-the-psych/plimac3 | 7b47abf7a087961e5a3e9c90b1ae20d07b3d6898 | [
"MIT"
] | null | null | null | import pandas as pd
import filecmp
from ac_overlapping_synset_lemma import *
data_dir = r'../../Data/En/'
orig_dir = r'./orig_data/'
test_file = r'Overlapping-Hyponyms-Lemma.csv'
Lemma_count_start_from_question = 23
hyponyms_count_start_from_question = Lemma_count_start_from_question
Lemma_count_start_from_passage = 5
df_ac_lemma_q = pd.read_csv(data_dir + r'Lemma-Question.csv')
df_ac_lemma_q = df_ac_lemma_q.set_index('AC_Doc_ID')
df_ac_lemma_p = pd.read_csv(data_dir + r'Lemma-Passage.csv')
df_ac_lemma_p = df_ac_lemma_p.set_index('AC_Doc_ID')
df_ac_hyponyms_q = pd.read_csv(data_dir + r'Hyponyms-Question.csv')
df_ac_hyponyms_q = df_ac_hyponyms_q.set_index('AC_Doc_ID')
# modified by Makoto.Sano@Mack-the-Psych.com 09/21/2020
df_ac_overlapping_hyp_lemma = ac_overlapping_synset_lemma(df_ac_lemma_q, 'Question#', 'Pre_Col_Name',
Lemma_count_start_from_question - 1, df_ac_hyponyms_q,
hyponyms_count_start_from_question - 1, None,
'Passage_Name', 'Reference_Passage_Section',
df_ac_lemma_p, 'Passage_Name',
'Passage_Section', Lemma_count_start_from_passage -1, 'hypo')
'''
column_list = []
for x in df_ac_overlapping_hyp_lemma.columns:
column_list = column_list + [x.replace('_s_', '_hypo_')]
df_ac_overlapping_hyp_lemma.columns = column_list
'''
df_ac_overlapping_hyp_lemma.to_csv(data_dir + test_file)
from file_cmp_diff_ratio import *
file_cmp_diff_ratio(data_dir + test_file, orig_dir + test_file)
| 36.302326 | 101 | 0.735426 |
ace2b04e66be90963ae905d16e55959e8a7a66f5 | 2,137 | py | Python | WeChatWorkTool/models.py | zxj17815/wechatwork-tool | a2977fbd367e2f7d426429ad20d246a950e05f05 | [
"MIT"
] | null | null | null | WeChatWorkTool/models.py | zxj17815/wechatwork-tool | a2977fbd367e2f7d426429ad20d246a950e05f05 | [
"MIT"
] | null | null | null | WeChatWorkTool/models.py | zxj17815/wechatwork-tool | a2977fbd367e2f7d426429ad20d246a950e05f05 | [
"MIT"
] | null | null | null | import datetime
import json
import requests
from django.db import models
# WeCHatWor API URL
BASE_URL = "https://qyapi.weixin.qq.com/cgi-bin/"
# Create your models here.
class AccessToken(models.Model):
"""企业微信access_token
"""
appname = models.CharField(max_length=128, verbose_name='appname')
appid = models.CharField(max_length=128, verbose_name='appid')
corpid = models.CharField(max_length=128, verbose_name='corpid') # corpid
appsecret = models.CharField(max_length=256, verbose_name='APPSECRET')
expires_in = models.DateTimeField(verbose_name='expires_in', auto_now=False, auto_now_add=False) # 过期时间
token = models.CharField(max_length=255, unique=True, blank=True, null=True, verbose_name='token',
db_index=True) # access_token 这里要注意长度,太短存储会失败 token官方给出的长度是512个字符空间
call_back_url = models.URLField(null=True, blank=True)
call_back_token = models.CharField(max_length=256, null=True, blank=True)
call_back_key = models.CharField(max_length=256, null=True, blank=True)
class Meta:
verbose_name = '企业微信access_token信息'
verbose_name_plural = verbose_name
def request_access_token(self):
"""request access_token
"""
res = requests.get(BASE_URL + 'gettoken',
{'corpid': self.corpid, 'corpsecret': self.appsecret}
)
data = json.loads(res.content.decode('utf-8'))
return data
def get_access_token(self):
"""get access_token by model
"""
datetime_now = datetime.datetime.now()
if self.expires_in <= datetime_now:
data = self.request_access_token()
if 'access_token' in data:
expires_in = (datetime.datetime.now(
) + datetime.timedelta(seconds=int(data['expires_in']))).strftime('%Y-%m-%d %H:%M:%S')
self.expires_in = expires_in
self.token = data['access_token']
self.save()
return self.token
else:
return None
else:
return self.token
| 37.491228 | 108 | 0.630323 |
ace2b0abc2b087521f2b588d090d4f1834d44773 | 164 | py | Python | Chapter 05/apyori_ex.py | bpbpublications/Essentials-of-Deep-Learning-and-AI | 6ef6a6958afe88c11b1bbb18932cc43df2d43b29 | [
"MIT"
] | null | null | null | Chapter 05/apyori_ex.py | bpbpublications/Essentials-of-Deep-Learning-and-AI | 6ef6a6958afe88c11b1bbb18932cc43df2d43b29 | [
"MIT"
] | null | null | null | Chapter 05/apyori_ex.py | bpbpublications/Essentials-of-Deep-Learning-and-AI | 6ef6a6958afe88c11b1bbb18932cc43df2d43b29 | [
"MIT"
] | 1 | 2021-11-29T10:18:57.000Z | 2021-11-29T10:18:57.000Z | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from apyori import apriori
store_data = pd.read_csv('./store_data.csv')
store_data.head(0)
| 18.222222 | 44 | 0.786585 |
ace2b0ffc04f34514435659e40b3017d45a7b701 | 10,092 | py | Python | vaccinate/api/serialize.py | CAVaccineInventory/vial | 5c5f7eb344d28afb91e947243a6f96b337ea0ce2 | [
"MIT"
] | 7 | 2021-06-28T17:33:47.000Z | 2022-02-12T21:54:59.000Z | vaccinate/api/serialize.py | CAVaccineInventory/vial | 5c5f7eb344d28afb91e947243a6f96b337ea0ce2 | [
"MIT"
] | 104 | 2021-06-17T21:25:30.000Z | 2022-03-28T14:21:57.000Z | vaccinate/api/serialize.py | CAVaccineInventory/vial | 5c5f7eb344d28afb91e947243a6f96b337ea0ce2 | [
"MIT"
] | 1 | 2021-06-25T17:52:23.000Z | 2021-06-25T17:52:23.000Z | import itertools
from collections import namedtuple
from typing import Dict
import beeline
import orjson
from core.models import Location
from django.db.models.query import QuerySet
VTS_USAGE = {
"notice": (
"Please contact Vaccinate The States and let "
"us know if you plan to rely on or publish this data. This "
"data is provided with best-effort accuracy. If you are "
"displaying this data, we expect you to display it responsibly. "
"Please do not display it in a way that is easy to misread."
),
"contact": {"partnersEmail": "api@vaccinatethestates.com"},
}
OutputFormat = namedtuple(
"OutputFormat",
# transform_bach runs once against a batch that has been prepared by calling transform on each item
(
"prepare_queryset",
"start",
"transform",
"transform_batch",
"serialize",
"separator",
"end",
"content_type",
),
)
def build_stream(
qs, stream_qs, formatter, beeline_trace_name, transform_batch_size=1000
):
trace_id = None
parent_id = None
bl = beeline.get_beeline()
if bl:
trace_id = bl.tracer_impl.get_active_trace_id()
parent_id = bl.tracer_impl.get_active_span().id
@beeline.traced(beeline_trace_name, trace_id=trace_id, parent_id=parent_id)
def stream():
if callable(formatter.start):
yield formatter.start(qs)
else:
yield formatter.start
started = False
for record_batch in chunks(stream_qs, transform_batch_size):
records = formatter.transform_batch(
[formatter.transform(record) for record in record_batch]
)
for record in records:
if started and formatter.separator:
yield formatter.separator
started = True
yield formatter.serialize(record)
yield formatter.end(qs)
return stream
def location_json_queryset(queryset: QuerySet[Location]) -> QuerySet[Location]:
return (
queryset.select_related(
"state",
"county",
"location_type",
"provider__provider_type",
).prefetch_related("concordances")
).only(
"public_id",
"name",
"state__abbreviation",
"latitude",
"longitude",
"location_type__name",
"import_ref",
"phone_number",
"full_address",
"city",
"county__name",
"google_places_id",
"vaccinefinder_location_id",
"vaccinespotter_location_id",
"vaccines_offered",
"accepts_appointments",
"accepts_walkins",
"zip_code",
"hours",
"hours_json",
"website",
"preferred_contact_method",
"provider__name",
"provider__vaccine_info_url",
"provider__provider_type__name",
"dn_latest_non_skip_report",
)
def location_json(
location: Location, include_soft_deleted: bool = False
) -> Dict[str, object]:
data = {
"id": location.public_id,
"name": location.name,
"state": location.state.abbreviation,
"latitude": float(location.latitude),
"longitude": float(location.longitude),
"location_type": location.location_type.name,
"import_ref": location.import_ref,
"phone_number": location.phone_number,
"full_address": location.full_address,
"city": location.city,
"county": location.county.name if location.county else None,
"google_places_id": location.google_places_id,
"vaccinefinder_location_id": location.vaccinefinder_location_id,
"vaccinespotter_location_id": location.vaccinespotter_location_id,
"zip_code": location.zip_code,
"hours": location.hours,
"website": location.website,
"preferred_contact_method": location.preferred_contact_method,
"provider": {
"name": location.provider.name,
"type": location.provider.provider_type.name,
}
if location.provider
else None,
"concordances": [str(c) for c in location.concordances.all()],
}
if include_soft_deleted:
data["soft_deleted"] = location.soft_deleted
return data
def location_geojson(location: Location) -> Dict[str, object]:
return to_geojson(location_json(location))
def to_geojson(properties):
return {
"type": "Feature",
"id": properties["id"],
"properties": {
key: value
for key, value in properties.items()
if key not in ("id", "latitude", "longitude")
},
"geometry": {
"type": "Point",
"coordinates": [properties["longitude"], properties["latitude"]],
},
}
def location_v0_json(location: Location) -> Dict[str, object]:
return {
"id": location.public_id,
"name": location.name,
"provider": {
"name": location.provider.name,
"provider_type": location.provider.provider_type.name,
"vaccine_info_url": location.provider.vaccine_info_url,
}
if location.provider
else None,
"state": location.state.abbreviation,
"latitude": float(location.latitude),
"longitude": float(location.longitude),
"location_type": location.location_type.name,
"phone_number": location.phone_number,
"full_address": location.full_address,
"city": location.city,
"county": location.county.name if location.county else None,
"zip_code": location.zip_code,
"hours": {"unstructured": location.hours, "structured": location.hours_json},
"website": location.website,
"vaccines_offered": location.vaccines_offered,
"accepts_appointments": location.accepts_appointments,
"accepts_walkins": location.accepts_walkins,
"concordances": [str(c) for c in location.concordances.all()],
"last_verified_by_vts": location.dn_latest_non_skip_report.created_at.isoformat()
if location.dn_latest_non_skip_report
else None,
"vts_url": "https://www.vaccinatethestates.com/?lng={}&lat={}#{}".format(
location.longitude, location.latitude, location.public_id
),
}
def split_geojson_by_state(locations_geojson):
by_state = {}
for feature in locations_geojson["features"]:
by_state.setdefault(feature["properties"]["state"], []).append(feature)
for state, features in by_state.items():
yield state, {
"type": "FeatureCollection",
"usage": VTS_USAGE,
"features": features,
}
def location_formats():
formats = make_formats(location_json, location_geojson)
formats["v0preview"] = OutputFormat(
prepare_queryset=lambda qs: qs.select_related("dn_latest_non_skip_report"),
start=(
b'{"usage":{"notice":"Please contact Vaccinate The States and let '
b"us know if you plan to rely on or publish this data. This "
b"data is provided with best-effort accuracy. If you are "
b"displaying this data, we expect you to display it responsibly. "
b'Please do not display it in a way that is easy to misread.",'
b'"contact":{"partnersEmail":"api@vaccinatethestates.com"}},'
b'"content":['
),
transform=lambda l: location_v0_json(l),
transform_batch=lambda batch: batch,
serialize=orjson.dumps,
separator=b",",
end=lambda qs: b"]}",
content_type="application/json",
)
formats["v0preview-geojson"] = OutputFormat(
prepare_queryset=lambda qs: qs.select_related(
"dn_latest_non_skip_report", "provider"
),
start=(
b'{"type":"FeatureCollection","usage":USAGE,'.replace(
b"USAGE", orjson.dumps(VTS_USAGE)
)
+ b'"features":['
),
transform=lambda l: to_geojson(location_v0_json(l)),
transform_batch=lambda batch: batch,
serialize=orjson.dumps,
separator=b",",
end=lambda qs: b"]}",
content_type="application/json",
)
formats["ids"] = OutputFormat(
prepare_queryset=lambda qs: qs.only("public_id").select_related(None),
start=b"[",
transform=lambda l: l.public_id,
transform_batch=lambda batch: batch,
serialize=orjson.dumps,
separator=b",",
end=lambda qs: b"]",
content_type="application/json",
)
return formats
def make_formats(json_convert, geojson_convert):
return {
"json": OutputFormat(
prepare_queryset=lambda qs: qs,
start=b'{"results":[',
transform=lambda l: json_convert(l),
transform_batch=lambda batch: batch,
serialize=orjson.dumps,
separator=b",",
end=lambda qs: b'],"total":TOTAL}'.replace(
b"TOTAL", str(qs.count()).encode("ascii")
),
content_type="application/json",
),
"geojson": OutputFormat(
prepare_queryset=lambda qs: qs,
start=b'{"type":"FeatureCollection","features":[',
transform=lambda l: geojson_convert(l),
transform_batch=lambda batch: batch,
serialize=orjson.dumps,
separator=b",",
end=lambda qs: b"]}",
content_type="application/json",
),
"nlgeojson": OutputFormat(
prepare_queryset=lambda qs: qs,
start=b"",
transform=lambda l: geojson_convert(l),
transform_batch=lambda batch: batch,
serialize=orjson.dumps,
separator=b"\n",
end=lambda qs: b"",
content_type="text/plain",
),
}
def chunks(sequence, size):
iterator = iter(sequence)
for item in iterator:
yield itertools.chain([item], itertools.islice(iterator, size - 1))
| 33.528239 | 103 | 0.605925 |
ace2b190cba75efd78c6ce3b28f3a220bf92d1c7 | 50 | py | Python | week4_EDA_np_pd_json_apis_regex/day4_gen_annotation_eda/theory/python/annotations/_.py | paleomau/MGOL_BOOTCAMP | 8c2b018f49fd12a255ea6f323141260d04d4421d | [
"MIT"
] | null | null | null | week4_EDA_np_pd_json_apis_regex/day4_gen_annotation_eda/theory/python/annotations/_.py | paleomau/MGOL_BOOTCAMP | 8c2b018f49fd12a255ea6f323141260d04d4421d | [
"MIT"
] | null | null | null | week4_EDA_np_pd_json_apis_regex/day4_gen_annotation_eda/theory/python/annotations/_.py | paleomau/MGOL_BOOTCAMP | 8c2b018f49fd12a255ea6f323141260d04d4421d | [
"MIT"
] | null | null | null | def x():
y = 5 + 10
print(2)
print("dgsg") | 12.5 | 14 | 0.46 |
ace2b1a29a3abb15aedb474de4948707e3d81eeb | 416 | py | Python | erpnext_feature_board/hook_events/review_request.py | akurungadam/erpnext_feature_board | 8c99b4dfaa79d86d8e8b46fa1bf235d0bfa471e0 | [
"MIT"
] | 15 | 2021-05-31T16:29:22.000Z | 2021-12-02T20:18:32.000Z | erpnext_feature_board/hook_events/review_request.py | akurungadam/erpnext_feature_board | 8c99b4dfaa79d86d8e8b46fa1bf235d0bfa471e0 | [
"MIT"
] | 18 | 2021-06-01T07:39:08.000Z | 2021-07-14T09:02:35.000Z | erpnext_feature_board/hook_events/review_request.py | akurungadam/erpnext_feature_board | 8c99b4dfaa79d86d8e8b46fa1bf235d0bfa471e0 | [
"MIT"
] | 6 | 2021-06-01T07:19:53.000Z | 2021-12-28T20:06:25.000Z | import frappe
def delete_approved_build_requests():
"""
Scheduled hook to delete approved Review Requests for changing site deployments.
"""
approved_build_requests = frappe.get_all(
"Review Request",
filters={
"request_type": ["in", ["Build", "Upgrade", "Delete"]],
"request_status": "Approved",
},
)
for request in approved_build_requests:
frappe.delete_doc("Review Request", request.name)
| 21.894737 | 81 | 0.71875 |
ace2b24de7ae9f596582447a33ea02124a09a922 | 688 | py | Python | authors/apps/articles/renderers.py | andela/ah-backend-summer | f842a3e02f8418f123dc5de36809ad67557b1c1d | [
"BSD-3-Clause"
] | 1 | 2019-03-11T12:45:24.000Z | 2019-03-11T12:45:24.000Z | authors/apps/articles/renderers.py | andela/ah-backend-summer | f842a3e02f8418f123dc5de36809ad67557b1c1d | [
"BSD-3-Clause"
] | 53 | 2019-01-29T08:02:23.000Z | 2022-03-11T23:39:37.000Z | authors/apps/articles/renderers.py | andela/ah-backend-summer | f842a3e02f8418f123dc5de36809ad67557b1c1d | [
"BSD-3-Clause"
] | 5 | 2019-10-04T07:02:38.000Z | 2020-06-11T12:39:22.000Z | import json
from rest_framework import renderers
class ArticleJSONRenderer(renderers.JSONRenderer):
"""The ArticleJSONRenderer returns a Json object of articles
"""
charset = "utf-8"
def render(self, data, media_type=None, renderer_context=None):
# Method is responsible for displaying articles
return json.dumps({"articles": data})
class BookmarkJSONRenderer(renderers.JSONRenderer):
"""The BookmarkJSONRenderer returns a JSON object of bookmarks"""
charset = "utf-8"
def render(self, data, media_type=None, renderer_context=None):
# Method is responsible for displaying bookmarks
return json.dumps({"bookmarks": data})
| 29.913043 | 69 | 0.72093 |
ace2b260397ed81e3f8c5c72a231e811a1dbe50a | 1,908 | py | Python | setup.py | keithoffer/pylinac | 8cdd9b867133725da3baecb27e7c0d89c6b59a11 | [
"MIT"
] | null | null | null | setup.py | keithoffer/pylinac | 8cdd9b867133725da3baecb27e7c0d89c6b59a11 | [
"MIT"
] | null | null | null | setup.py | keithoffer/pylinac | 8cdd9b867133725da3baecb27e7c0d89c6b59a11 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
__version__ = '2.0.2'
setup(
name='pylinac',
version=__version__,
packages=find_packages(),
package_data={'pylinac': ['watcher_config.yml']},
zip_safe=False, # allows users to view files in egg/distribution
url='https://github.com/jrkerns/pylinac',
keywords="""medical physics AAPM TG142 quality assurance starshot cbct vmat dynalog starshot linac Varian Elekta
trajectory log kv MV planar Leeds Las Vegas Standard Imaging PipsPro TG51""",
author='James Kerns',
author_email='jkerns100@gmail.com',
description='A toolkit for performing TG-142 QA-related tasks on a linear accelerator',
install_requires=["numpy >= 1.11",
"scipy >= 0.17",
"pydicom >= 0.9.9",
"matplotlib >= 1.4",
"scikit-image >= 0.12",
"scikit-learn >= 0.18",
"Pillow >= 4.0",
"tqdm == 3.8",
"pyyaml >= 3.10",
"yagmail",
"reportlab >= 3.3"],
entry_points={
'console_scripts':
['pylinac=pylinac.scripts:cli']
},
license='MIT',
test_suite='tests._test_all',
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Image Recognition",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Software Development :: Libraries"]
)
| 38.938776 | 116 | 0.568134 |
ace2b2a446b6229977a81b105755a78acbb7e3c1 | 6,070 | py | Python | helpers/phpdocx/lib/openoffice/openoffice.org/basis3.4/program/python-core-2.6.1/lib/collections.py | vnnguyen/appbasic | c1d0b85c42a09f8c2d75807774f550b2b9eae33e | [
"BSD-3-Clause"
] | 550 | 2015-01-05T16:59:00.000Z | 2022-03-20T16:55:25.000Z | helpers/phpdocx/lib/openoffice/openoffice.org/basis3.4/program/python-core-2.6.1/lib/collections.py | vnnguyen/appbasic | c1d0b85c42a09f8c2d75807774f550b2b9eae33e | [
"BSD-3-Clause"
] | 15 | 2015-02-05T06:00:47.000Z | 2018-07-07T14:34:04.000Z | helpers/phpdocx/lib/openoffice/openoffice.org/basis3.4/program/python-core-2.6.1/lib/collections.py | vnnguyen/appbasic | c1d0b85c42a09f8c2d75807774f550b2b9eae33e | [
"BSD-3-Clause"
] | 119 | 2015-01-08T00:48:24.000Z | 2022-01-27T14:13:15.000Z | __all__ = ['deque', 'defaultdict', 'namedtuple']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
from _abcoll import *
import _abcoll
__all__ += _abcoll.__all__
from _collections import deque, defaultdict
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
def namedtuple(typename, field_names, verbose=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_'):
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(cls, %(argtxt)s):
return tuple.__new__(cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(t):
'Return a new dict which maps field names to their values'
return {%(dicttxt)s} \n
def _replace(self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = self._make(map(kwds.pop, %(field_names)r, self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = property(itemgetter(%d))\n' % (name, i)
if verbose:
print template
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(itemgetter=_itemgetter, __name__='namedtuple_%s' % typename)
try:
exec template in namespace
except SyntaxError, e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(_sys, '_getframe'):
result.__module__ = _sys._getframe(1).f_globals['__name__']
return result
if __name__ == '__main__':
# verify that instances can be pickled
from cPickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
__slots__ = ()
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5/7.):
print p
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
__slots__ = ()
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print Point(11, 22)._replace(x=100)
Point3D = namedtuple('Point3D', Point._fields + ('z',))
print Point3D.__doc__
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print TestResults(*doctest.testmod())
| 40.738255 | 126 | 0.608896 |
ace2b36154cef93045af10a660e3031413052b1f | 6,151 | py | Python | src/sdk/pynni/nni/compression/torch/pruning/lottery_ticket.py | hwpengms/nni | 461ef242d2efe915ac58cbea27049abdd4d6dc73 | [
"MIT"
] | 3 | 2021-01-05T07:41:58.000Z | 2021-01-11T02:08:01.000Z | src/sdk/pynni/nni/compression/torch/pruning/lottery_ticket.py | mstfbl/nni | 92149c58834ccf24d82a15f756decd0d1e613ed3 | [
"MIT"
] | 21 | 2020-11-13T19:01:01.000Z | 2022-02-27T09:12:51.000Z | src/sdk/pynni/nni/compression/torch/pruning/lottery_ticket.py | mstfbl/nni | 92149c58834ccf24d82a15f756decd0d1e613ed3 | [
"MIT"
] | 3 | 2019-01-02T06:15:50.000Z | 2019-01-30T14:31:20.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
import logging
import torch
from schema import And, Optional
from ..utils.config_validation import CompressorSchema
from ..compressor import Pruner
from .finegrained_pruning import LevelPrunerMasker
logger = logging.getLogger('torch pruner')
class LotteryTicketPruner(Pruner):
"""
This is a Pytorch implementation of the paper "The Lottery Ticket Hypothesis: Finding Sparse, Trainable Neural Networks",
following NNI model compression interface.
1. Randomly initialize a neural network f(x;theta_0) (where theta_0 follows D_{theta}).
2. Train the network for j iterations, arriving at parameters theta_j.
3. Prune p% of the parameters in theta_j, creating a mask m.
4. Reset the remaining parameters to their values in theta_0, creating the winning ticket f(x;m*theta_0).
5. Repeat step 2, 3, and 4.
"""
def __init__(self, model, config_list, optimizer=None, lr_scheduler=None, reset_weights=True):
"""
Parameters
----------
model : pytorch model
The model to be pruned
config_list : list
Supported keys:
- prune_iterations : The number of rounds for the iterative pruning.
- sparsity : The final sparsity when the compression is done.
optimizer : pytorch optimizer
The optimizer for the model
lr_scheduler : pytorch lr scheduler
The lr scheduler for the model if used
reset_weights : bool
Whether reset weights and optimizer at the beginning of each round.
"""
# save init weights and optimizer
self.reset_weights = reset_weights
if self.reset_weights:
self._model = model
self._optimizer = optimizer
self._model_state = copy.deepcopy(model.state_dict())
self._optimizer_state = copy.deepcopy(optimizer.state_dict())
self._lr_scheduler = lr_scheduler
if lr_scheduler is not None:
self._scheduler_state = copy.deepcopy(lr_scheduler.state_dict())
super().__init__(model, config_list, optimizer)
self.curr_prune_iteration = None
self.prune_iterations = config_list[0]['prune_iterations']
self.masker = LevelPrunerMasker(model, self)
def validate_config(self, model, config_list):
"""
Parameters
----------
model : torch.nn.module
Model to be pruned
config_list : list
Supported keys:
- prune_iterations : The number of rounds for the iterative pruning.
- sparsity : The final sparsity when the compression is done.
"""
schema = CompressorSchema([{
'sparsity': And(float, lambda n: 0 < n < 1),
'prune_iterations': And(int, lambda n: n > 0),
Optional('op_types'): [str],
Optional('op_names'): [str]
}], model, logger)
schema.validate(config_list)
assert len(set([x['prune_iterations'] for x in config_list])) == 1, 'The values of prune_iterations must be equal in your config'
def _calc_sparsity(self, sparsity):
keep_ratio_once = (1 - sparsity) ** (1 / self.prune_iterations)
curr_keep_ratio = keep_ratio_once ** self.curr_prune_iteration
return max(1 - curr_keep_ratio, 0)
def _calc_mask(self, wrapper, sparsity):
weight = wrapper.module.weight.data
if self.curr_prune_iteration == 0:
mask = {'weight_mask': torch.ones(weight.shape).type_as(weight)}
else:
curr_sparsity = self._calc_sparsity(sparsity)
mask = self.masker.calc_mask(sparsity=curr_sparsity, wrapper=wrapper)
return mask
def calc_mask(self, wrapper, **kwargs):
"""
Generate mask for the given ``weight``.
Parameters
----------
wrapper : Module
The layer to be pruned
Returns
-------
tensor
The mask for this weight, it is ```None``` because this pruner
calculates and assigns masks in ```prune_iteration_start```,
no need to do anything in this function.
"""
return None
def get_prune_iterations(self):
"""
Return the range for iterations.
In the first prune iteration, masks are all one, thus, add one more iteration
Returns
-------
list
A list for pruning iterations
"""
return range(self.prune_iterations + 1)
def prune_iteration_start(self):
"""
Control the pruning procedure on updated epoch number.
Should be called at the beginning of the epoch.
"""
if self.curr_prune_iteration is None:
self.curr_prune_iteration = 0
else:
self.curr_prune_iteration += 1
assert self.curr_prune_iteration < self.prune_iterations + 1, 'Exceed the configured prune_iterations'
modules_wrapper = self.get_modules_wrapper()
modules_to_compress = self.get_modules_to_compress()
for layer, config in modules_to_compress:
module_wrapper = None
for wrapper in modules_wrapper:
if wrapper.name == layer.name:
module_wrapper = wrapper
break
assert module_wrapper is not None
sparsity = config.get('sparsity')
mask = self._calc_mask(module_wrapper, sparsity)
# TODO: directly use weight_mask is not good
module_wrapper.weight_mask = mask['weight_mask']
# there is no mask for bias
# reinit weights back to original after new masks are generated
if self.reset_weights:
# should use this member function to reset model weights
self.load_model_state_dict(self._model_state)
self._optimizer.load_state_dict(self._optimizer_state)
if self._lr_scheduler is not None:
self._lr_scheduler.load_state_dict(self._scheduler_state)
| 38.93038 | 137 | 0.629491 |
ace2b60f20ab4bae10a3925dd8c6bfba1900c107 | 686 | py | Python | app/main.py | Bleskocvok/schedu-shell | 9e992480781bac8af67fbc49fb7de80e38d3e9a0 | [
"BSD-3-Clause"
] | null | null | null | app/main.py | Bleskocvok/schedu-shell | 9e992480781bac8af67fbc49fb7de80e38d3e9a0 | [
"BSD-3-Clause"
] | null | null | null | app/main.py | Bleskocvok/schedu-shell | 9e992480781bac8af67fbc49fb7de80e38d3e9a0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import colorama
from database import Database
from taskinator import Taskinator
import sys
def main():
colorama.init()
data = Database(".data")
data.load()
taks = Taskinator(data)
taks.command(sys.argv)
data.save()
colorama.deinit()
if __name__ == "__main__":
main()
'''
tasks add [date]
tasks add monday
tasks add tomorrow
tasks complete [n]
tasks add-complete ""
tasks add-complete [date] ""
tasks list today
tasks list yesterday
tasks list week
tasks list month
tasks list all
tasks list global
tasks add global ""
tasks complete global [n]
tasks rewards list
tasks rewards add ""
tasks rewards claim [n]
'''
| 12.703704 | 33 | 0.698251 |
ace2b63463789bb9741bf12fca12be684f5524c6 | 4,152 | py | Python | scalability/common/workload.py | contropist/ic | 9240bea7dc0239fcbc5d43ad11f3ca803ee9bb11 | [
"Apache-2.0"
] | null | null | null | scalability/common/workload.py | contropist/ic | 9240bea7dc0239fcbc5d43ad11f3ca803ee9bb11 | [
"Apache-2.0"
] | 4 | 2021-12-22T22:34:51.000Z | 2022-03-31T07:34:19.000Z | scalability/common/workload.py | contropist/ic | 9240bea7dc0239fcbc5d43ad11f3ca803ee9bb11 | [
"Apache-2.0"
] | null | null | null | import os
import re
import threading
import uuid
from common import ssh
from termcolor import colored
class Workload(threading.Thread):
"""
Threaded abstraction around workload generator execution.
Workload generators executed via SSH on remote machines. Workload
generator output can be copied back using fetch_results.
The benefit of implementing a threaded abstraction is that
we can execute multiple workloads in parallel easily.
"""
def __init__(
self,
load_generators: [str],
target_machines: [str],
rps_per_machine: [int],
canister_ids: [str],
duration: int,
f_stdout: str,
f_stderr: str,
timeout: int,
payload: str = None,
method: str = None,
call_method: str = None,
arguments: [str] = [],
):
"""Initialize workload."""
threading.Thread.__init__(self)
self.load_generators = load_generators
self.target_machines = target_machines
self.canister_ids = canister_ids
self.rps_per_machine = rps_per_machine
self.duration = duration
self.payload = payload
self.method = method
self.call_method = call_method
self.arguments = arguments
self.f_stdout = f_stdout
self.f_stderr = f_stderr
self.timeout = timeout
if not isinstance(self.canister_ids, list):
raise Exception("canister_ids has to be a list of canister IDs represented as string")
if len(self.canister_ids) < 1:
raise Exception("List of canister IDs is empty")
def get_commands(self) -> [str]:
"""Build a list of command line arguments to use for workload generation."""
target_list = ",".join(f"http://[{target}]:8080" for target in self.target_machines)
cmd = f'./ic-workload-generator "{target_list}"' f" -n {self.duration} -p 9090 --no-status-check"
cmd += " " + " ".join(self.arguments)
# Dump worklod generator command in output directory.
if self.payload is not None:
cmd += " --payload '{}'".format(self.payload.decode("utf-8"))
if self.method is not None:
cmd += " -m {}".format(self.method)
if self.call_method is not None:
cmd += ' --call-method "{}"'.format(self.call_method)
# Each workload generator instance can target only a single canister ID currently.
# In the case of multiple canisters, select a different canister for each machine.
num_load_generators = len(self.load_generators)
canister_ids = [self.canister_ids[i % len(self.canister_ids)] for i in range(num_load_generators)]
self.uuids = [uuid.uuid4()] * num_load_generators
assert num_load_generators == len(self.rps_per_machine)
commands = [
"{} --canister-id {} --summary-file wg_summary_{} -r {rps} ".format(
cmd,
canister_id,
self.uuids[i],
rps=rps,
)
for i, (canister_id, rps) in enumerate(zip(canister_ids, self.rps_per_machine))
]
return (commands, self.load_generators)
def run(self):
"""Start running the given workloads as a thread."""
commands, machines = self.get_commands()
ssh.run_all_ssh_in_parallel(machines, commands, self.f_stdout, self.f_stderr, self.timeout)
def fetch_results(self, destinations, out_dir):
"""Fetch results from workload generators."""
sources = ["admin@[{}]:wg_summary_{}".format(m, self.uuids[i]) for i, m in enumerate(self.load_generators)]
rc = ssh.scp_in_parallel(sources, destinations)
if not rc == [0 for _ in range(len(destinations))]:
print(colored("⚠️ Some workload generators failed:", "red"))
for fname in os.listdir(out_dir):
if re.match("workload-generator.*stderr.*", fname):
with open(os.path.join(out_dir, fname)) as ferr:
lines = ferr.read().split("\n")
print("\n".join(lines[-10:]))
return rc
| 38.803738 | 115 | 0.615366 |
ace2b6db2b5a25ac1d2ad3e69a17ff206a013e5c | 8,821 | py | Python | contrib/scrooge/tests/python/pants_test/contrib/scrooge/tasks/test_scrooge_gen.py | viktortnk/pants | 54c98206de5ac9aadfe26d83175f472941be6c7d | [
"Apache-2.0"
] | 1 | 2020-06-13T22:01:39.000Z | 2020-06-13T22:01:39.000Z | contrib/scrooge/tests/python/pants_test/contrib/scrooge/tasks/test_scrooge_gen.py | viktortnk/pants | 54c98206de5ac9aadfe26d83175f472941be6c7d | [
"Apache-2.0"
] | null | null | null | contrib/scrooge/tests/python/pants_test/contrib/scrooge/tasks/test_scrooge_gen.py | viktortnk/pants | 54c98206de5ac9aadfe26d83175f472941be6c7d | [
"Apache-2.0"
] | 2 | 2020-05-18T18:43:11.000Z | 2020-05-19T02:47:47.000Z | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from textwrap import dedent
from unittest.mock import MagicMock
from pants.backend.codegen.thrift.java.java_thrift_library import JavaThriftLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.base.exceptions import TargetDefinitionException, TaskError
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.context import Context
from pants.testutil.jvm.nailgun_task_test_base import NailgunTaskTestBase
from pants.util.ordered_set import OrderedSet
from pants.contrib.scrooge.tasks.scrooge_gen import ScroogeGen
GEN_ADAPT = "--gen-adapt"
class ScroogeGenTest(NailgunTaskTestBase):
@classmethod
def task_type(cls):
return ScroogeGen
@classmethod
def alias_groups(cls):
return (
super()
.alias_groups()
.merge(
BuildFileAliases(
targets={
"java_thrift_library": JavaThriftLibrary,
"java_library": JavaLibrary,
"scala_library": ScalaLibrary,
}
)
)
)
def test_validate_compiler_configs(self):
self.add_to_build_file(
"test_validate",
self._test_create_build_str(
name="one", sources="[]", language="uniform", compiler_args="[]", deps="[]"
),
)
self.add_to_build_file(
"test_validate",
self._test_create_build_str(
name="two", sources="[]", language="uniform", compiler_args="[]", deps="[':one']"
),
)
target = self.target("test_validate:one")
context = self.context(target_roots=[target])
task = self.prepare_execute(context)
task._validate_compiler_configs(self.target("test_validate:one"))
task._validate_compiler_configs(self.target("test_validate:two"))
def test_validate_unchecked_compiler_args(self):
self.add_to_build_file(
"test_validate",
self._test_create_build_str(
name="one", sources="[]", language="uniform", compiler_args="[]", deps="[]"
),
)
self.add_to_build_file(
"test_validate",
self._test_create_build_str(
name="two",
sources="[]",
language="uniform",
compiler_args="['--java-passthrough']",
deps="[':one']",
),
)
target = self.target("test_validate:two")
context = self.context(target_roots=[target])
task = self.prepare_execute(context)
task._validate_compiler_configs(self.target("test_validate:one"))
task._validate_compiler_configs(self.target("test_validate:two"))
def test_validate_invalid_unchecked_compiler_args(self):
self.add_to_build_file(
"test_validate",
self._test_create_build_str(
name="one", sources="[]", language="uniform", compiler_args="[]", deps="[]"
),
)
self.add_to_build_file(
"test_validate",
self._test_create_build_str(
name="two",
sources="[]",
language="uniform",
compiler_args="['--invalid_args']",
deps="[':one']",
),
)
target = self.target("test_validate:two")
context = self.context(target_roots=[target])
task = self.prepare_execute(context)
with self.assertRaises(TaskError):
task._validate_compiler_configs(self.target("test_validate:two"))
def test_scala(self):
sources = [os.path.join(self.test_workdir, "org/pantsbuild/example/Example.scala")]
self._test_help("scala", ScalaLibrary, [GEN_ADAPT], sources)
def test_compiler_args(self):
sources = [os.path.join(self.test_workdir, "org/pantsbuild/example/Example.scala")]
self._test_help("scala", ScalaLibrary, [GEN_ADAPT], sources)
def test_android(self):
sources = [os.path.join(self.test_workdir, "org/pantsbuild/android_example/Example.java")]
self._test_help("android", JavaLibrary, [GEN_ADAPT], sources)
def test_invalid_lang(self):
with self.assertRaises(TargetDefinitionException):
self._test_help("not-a-lang", JavaLibrary, [GEN_ADAPT], [])
def test_empty_compiler_args(self):
sources = [os.path.join(self.test_workdir, "org/pantsbuild/example/Example.scala")]
self._test_help("scala", ScalaLibrary, [], sources)
def compiler_args_to_string(self, compiler_args):
quoted = [f"'{x}'" for x in compiler_args]
comma_separated = ", ".join(quoted)
return f"[{comma_separated}]"
def _test_create_build_str(self, name, sources, language, compiler_args, deps):
return dedent(
"""
java_thrift_library(name='{name}',
sources={sources},
dependencies={deps},
compiler='scrooge',
language='{language}',
compiler_args={compiler_args},
strict_deps=True,
tags=['my_tag'],
)
""".format(
name=name,
sources=sources,
language=language,
compiler_args=compiler_args,
deps=deps,
)
)
def _test_help(self, language, library_type, compiler_args, sources):
contents = dedent(
"""#@namespace android org.pantsbuild.android_example
namespace java org.pantsbuild.example
struct Example {
1: optional i64 number
}
"""
)
self.create_file(relpath="test_smoke/a.thrift", contents=contents)
build_string = self._test_create_build_str(
name="a",
sources=["a.thrift"],
language=language,
compiler_args=compiler_args,
deps="[]",
)
self.add_to_build_file("test_smoke", build_string)
target = self.target("test_smoke:a")
context = self.context(target_roots=[target])
task = self.prepare_execute(context)
task.gen = MagicMock()
task.gen.return_value = {"test_smoke/a.thrift": sources}
saved_add_new_target = Context.add_new_target
try:
mock = MagicMock()
Context.add_new_target = mock
task.execute()
self.assertEqual(1, mock.call_count)
_, call_kwargs = mock.call_args
self.assertEqual(call_kwargs["target_type"], library_type)
self.assertEqual(call_kwargs["dependencies"], OrderedSet())
self.assertEqual(call_kwargs["provides"], None)
self.assertEqual(call_kwargs["derived_from"], target)
self.assertEqual(call_kwargs["strict_deps"], True)
self.assertEqual(call_kwargs["tags"], {"my_tag"})
sources = call_kwargs["sources"]
self.assertEqual(sources.files, ())
finally:
Context.add_new_target = saved_add_new_target
def test_basic_deps(self):
contents = dedent(
"""#@namespace android org.pantsbuild.android_example
namespace java org.pantsbuild.example
struct Example {
1: optional i64 number
}
"""
)
self._test_dependencies_help(contents, False, False)
def test_service_deps(self):
contents = dedent(
"""#@namespace android org.pantsbuild.android_example
namespace java org.pantsbuild.example
service MultiplicationService
{
int multiply(1:int n1, 2:int n2),
}"""
)
self._test_dependencies_help(contents, True, False)
def test_exception_deps(self):
contents = dedent(
"""#@namespace android org.pantsbuild.android_example
namespace java org.pantsbuild.example
exception InvalidOperation {
1: i32 what,
2: string why
}"""
)
self._test_dependencies_help(contents, False, True)
def _test_dependencies_help(self, contents, declares_service, declares_exception):
source = "test_smoke/a.thrift"
self.create_file(relpath=source, contents=contents)
self.assertEqual(ScroogeGen._declares_service(source), declares_service)
self.assertEqual(ScroogeGen._declares_exception(source), declares_exception)
| 35.857724 | 98 | 0.601973 |
ace2b73051c064cf86513b0b375541e7b06f368e | 1,808 | py | Python | mainapp/tests/main/test_notifyusers.py | CatoTH/opensourceris | 3b4b5e4b027a96fd08147fc57b53f078a44a00a7 | [
"MIT"
] | 1 | 2022-02-17T13:46:03.000Z | 2022-02-17T13:46:03.000Z | mainapp/tests/main/test_notifyusers.py | CatoTH/opensourceris | 3b4b5e4b027a96fd08147fc57b53f078a44a00a7 | [
"MIT"
] | 13 | 2022-02-11T14:45:36.000Z | 2022-02-22T19:54:03.000Z | mainapp/tests/main/test_notifyusers.py | CatoTH/opensourceris | 3b4b5e4b027a96fd08147fc57b53f078a44a00a7 | [
"MIT"
] | 1 | 2022-02-17T13:44:17.000Z | 2022-02-17T13:44:17.000Z | from datetime import datetime
from io import StringIO
from unittest import mock
from django.contrib.auth.models import User
from django.core.management import call_command
from django.test import TestCase
from mainapp.models import UserAlert, UserProfile
from mainapp.tests.live.helper import MockMainappSearch
class TestNotifyUsers(TestCase):
fixtures = ["initdata"]
def _create_user_with_alerts(self, email, alerts):
newuser = User()
newuser.email = email
newuser.username = email
newuser.is_active = 1
newuser.save()
UserProfile.objects.create(user=newuser)
for alert in alerts:
alert_object = UserAlert()
alert_object.search_string = alert
alert_object.last_match = None
alert_object.user = newuser
alert_object.save()
@mock.patch("mainapp.functions.notify_users.send_mail")
@mock.patch(
"mainapp.functions.search.MainappSearch.execute", new=MockMainappSearch.execute
)
def test_notify(self, send_mail_function):
self._create_user_with_alerts("test@example.org", ["test"])
out = StringIO()
call_command(
"notify_users",
stdout=out,
override_since=datetime.fromisoformat("2017-01-01"),
)
self.assertEqual(send_mail_function.call_count, 1)
self.assertEqual(send_mail_function.call_args[0][0], "test@example.org")
self.assertTrue("Title Highlight" in send_mail_function.call_args[0][2])
self.assertTrue(
"Title <mark>Highlight</mark>" in send_mail_function.call_args[0][3]
)
self.assertTrue("Unsubscribe" in send_mail_function.call_args[0][2])
self.assertTrue("Unsubscribe" in send_mail_function.call_args[0][3])
| 33.481481 | 87 | 0.679757 |
ace2b82f6f5ff1372071a1a6a530456cdcd07f26 | 780 | py | Python | leetcode-random/combination-sum-3.py | kd82/interview-preparation | a47de06c677b37c8160d1429f43e38288c157754 | [
"MIT"
] | null | null | null | leetcode-random/combination-sum-3.py | kd82/interview-preparation | a47de06c677b37c8160d1429f43e38288c157754 | [
"MIT"
] | null | null | null | leetcode-random/combination-sum-3.py | kd82/interview-preparation | a47de06c677b37c8160d1429f43e38288c157754 | [
"MIT"
] | null | null | null | from typing import List
def combinationSum3(k: int, n: int) -> List[List[int]]:
def helper(n, k, curr, start):
if k == 0 and n == 0:
res.append(list(curr))
return
if k < 0 or n < 0:
return
for i in range(start, 10):
curr.append(i)
helper(n - i, k - 1, curr, i + 1)
curr.pop()
res = []
helper(n, k, [], 1)
return res
"""
Key Takeaways from the question
1. We don't need to use the set if we can use recursive call stak
2. Early Exit is the key for these kind fo problems
3. Based on the question decide if we need to keep track of starting index for recursion
""" | 35.454545 | 96 | 0.491026 |
ace2b881dd26ebba2f04a9a7105c8274d4eadb5a | 6,932 | py | Python | sandbox/apps/checkout/views.py | django-oscar/django-oscar-docdata | 8f95458d4dff7d27ec507bacfede0a19e1f0e6b2 | [
"ECL-2.0",
"Apache-2.0"
] | 15 | 2017-04-25T10:31:27.000Z | 2022-03-01T10:58:10.000Z | sandbox/apps/checkout/views.py | django-oscar/django-oscar-docdata | 8f95458d4dff7d27ec507bacfede0a19e1f0e6b2 | [
"ECL-2.0",
"Apache-2.0"
] | 45 | 2017-04-26T13:17:51.000Z | 2022-02-16T12:27:57.000Z | sandbox/apps/checkout/views.py | django-oscar/django-oscar-docdata | 8f95458d4dff7d27ec507bacfede0a19e1f0e6b2 | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2017-07-18T03:43:53.000Z | 2020-07-10T16:29:30.000Z | import logging
from django.conf import settings
from django.urls import reverse, reverse_lazy
from django.views.generic import FormView
from oscar.apps.payment.exceptions import RedirectRequired
from oscar.apps.checkout import exceptions
from oscar.apps.checkout import views as oscar_views
from oscar.apps.payment.models import Source
from six import text_type
from .docdata import CustomDocdataFacade
from . import forms
logger = logging.getLogger(__name__)
class PaymentMethodView(oscar_views.PaymentMethodView, FormView):
"""
Updated payment methods view.
"""
template_name = "checkout/payment_method.html"
step = 'payment-method'
form_class = forms.PaymentMethodForm
success_url = reverse_lazy('checkout:payment-details')
def skip_unless_payment_is_required(self, request):
if settings.SKIP_PAYMENT_CHOICES:
raise exceptions.PassedSkipCondition(url=reverse('checkout:preview'))
def get_success_response(self):
# No errors in get(), apply our form logic.
# NOTE that the checks are not make in the post() call, but this is not a problem.
# We can just store the payment method, and let the next view validate the other states again.
return FormView.get(self, self.request, self.args, self.kwargs)
def get_initial(self):
return {
'payment_method': self.checkout_session.payment_method(),
}
def form_valid(self, form):
# Store payment method in the CheckoutSessionMixin.checkout_session (a CheckoutSessionData object)
self.checkout_session.pay_by(form.cleaned_data['payment_method'])
return super(PaymentMethodView, self).form_valid(form)
class PaymentDetailsView(oscar_views.PaymentDetailsView):
"""
The final step to submit the payment.
This includes an additional form to input comments, and proceeds to the payment provider.
This connects to the django-oscar-docdata package to start the payment.
"""
def get_context_data(self, **kwargs):
context = super(PaymentDetailsView, self).get_context_data(**kwargs)
method = self.checkout_session.payment_method()
context['payment_method'] = {
'code': method,
'title': self.get_payment_method_display(method),
}
return context
def get_payment_method_display(self, payment_method):
return dict(settings.WEBSHOP_PAYMENT_CHOICES).get(payment_method)
def handle_place_order_submission(self, request):
# Collect all the data!
submission = self.build_submission()
# docdata needs to have a lot of information to start the payment.
# TODO: Is this the right way to pass the information??
submission['payment_kwargs']['submission'] = submission
# Start the payment process!
# This jumps to handle_payment()
return self.submit(**submission)
def handle_payment(self, order_number, total, **kwargs):
submission = kwargs['submission']
# Make request to Docdata.
# Any raised exceptions are handled by the PaymentDetail.submit() code.
facade = CustomDocdataFacade()
docdata_ref = facade.create_payment(
order_number=order_number,
total=total,
user=submission['user'],
# Extra parameters to add the "Invoice" element in Docdata:
billing_address=submission['shipping_address'], # NOTE: no billing address collected in steps.
shipping_address=submission['shipping_address'],
basket=submission['basket'],
description=''
)
# NOTE: at this point, the payment is registered as the gateway,
# and there is no way back. Any errors after this part require manual intervention!
# Request was successful - record the "payment source".
# This represents the origin where the payment should come from.
# When an order is paid in multiple parts, multiple Source objects should be created.
# As this request was a 'pre-auth', we set the 'amount_allocated'.
# If we had performed an 'auth' request, then we would set 'amount_debited'.
source = Source(
source_type=facade.get_source_type(),
currency=total.currency,
amount_allocated=total.incl_tax, # amount_* field depends on type of transaction.
reference=docdata_ref
)
self.add_payment_source(source)
# Also record payment event.
# This will be visible in the Dashboard
self.add_payment_event('pre-auth', total.incl_tax, reference=docdata_ref)
# Ask oscar to redirect to docdata
# TODO: test default_act="yes", skips menu entirely
# TODO: add issuer_id for iDEAL.
payment_url_args = {}
if self.checkout_session.payment_method() is not None:
payment_url_args['default_pm'] = self.checkout_session.payment_method()
url = facade.get_payment_menu_url(self.request, docdata_ref, **payment_url_args)
logger.info("Redirecting user to {0}".format(url))
# Regardless of whether the order is paid, write it in the database before redirecting.
# Oscar actually skips this when redirecting the user to the payment provider.
self._save_order(order_number, submission)
# Redirect the user to the payment provider.
raise RedirectRequired(url)
def _save_order(self, order_number, submission):
# Finalize the order that PaymentDetailsView.submit() started
# If all is ok with payment, try and place order
logger.info(u"Order #%s: payment started, placing order", order_number)
try:
# Call OrderPlacementMixin.handle_order_placement()
return self.handle_order_placement(
order_number, submission['user'], submission['basket'],
submission['shipping_address'], submission['shipping_method'],
submission['shipping_charge'], submission['billing_address'],
submission['order_total'], **(submission['order_kwargs'])
)
except oscar_views.UnableToPlaceOrder as e:
# It's possible that something will go wrong while trying to
# actually place an order. Not a good situation to be in as a
# payment transaction may already have taken place, but needs
# to be handled gracefully.
logger.error(u"Order #%s: unable to place order - %s", order_number, e, exc_info=True)
msg = text_type(e)
self.restore_frozen_basket()
return self.render_to_response(self.get_context_data(error=msg))
def send_confirmation_message(self, order, code, **kwargs):
# Yes the order is already saved, because this is needed for Docdata.
# However, delay sending the order confirmation!
pass
| 42.527607 | 107 | 0.680467 |
ace2b8b017ed396fe816c1e0f49f42b2d98bad17 | 1,820 | py | Python | structured_gaussian_mixtures/load_timit_data.py | markstoehr/structured_gaussian_mixtures | f0c30770c8a851da7a7218b0b040b4f386f2bc5b | [
"Apache-2.0"
] | null | null | null | structured_gaussian_mixtures/load_timit_data.py | markstoehr/structured_gaussian_mixtures | f0c30770c8a851da7a7218b0b040b4f386f2bc5b | [
"Apache-2.0"
] | null | null | null | structured_gaussian_mixtures/load_timit_data.py | markstoehr/structured_gaussian_mixtures | f0c30770c8a851da7a7218b0b040b4f386f2bc5b | [
"Apache-2.0"
] | null | null | null | def load_data(X, Y, X_test, Y_test):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
def shared_dataset(data_x, data_y, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(X_test, Y_test)
valid_set_x, valid_set_y = shared_dataset(X_test, Y_test)
train_set_x, train_set_y = shared_dataset(X, Y)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
| 46.666667 | 78 | 0.619231 |
ace2b8db497426a5754260f0f02187c22534bf34 | 1,152 | py | Python | wandb/vendor/prompt_toolkit/eventloop/asyncio_base.py | dreamflasher/client | c8267f1c6b8b6970172d622bb8fbf7cc773d78b2 | [
"MIT"
] | 6,989 | 2017-07-18T06:23:18.000Z | 2022-03-31T15:58:36.000Z | wandb/vendor/prompt_toolkit/eventloop/asyncio_base.py | dreamflasher/client | c8267f1c6b8b6970172d622bb8fbf7cc773d78b2 | [
"MIT"
] | 1,978 | 2017-07-18T09:17:58.000Z | 2022-03-31T14:28:43.000Z | wandb/vendor/prompt_toolkit/eventloop/asyncio_base.py | dreamflasher/client | c8267f1c6b8b6970172d622bb8fbf7cc773d78b2 | [
"MIT"
] | 1,228 | 2017-07-18T09:03:13.000Z | 2022-03-29T05:57:40.000Z | """
Eventloop for integration with Python3 asyncio.
Note that we can't use "yield from", because the package should be installable
under Python 2.6 as well, and it should contain syntactically valid Python 2.6
code.
"""
from __future__ import unicode_literals
__all__ = (
'AsyncioTimeout',
)
class AsyncioTimeout(object):
"""
Call the `timeout` function when the timeout expires.
Every call of the `reset` method, resets the timeout and starts a new
timer.
"""
def __init__(self, timeout, callback, loop):
self.timeout = timeout
self.callback = callback
self.loop = loop
self.counter = 0
self.running = True
def reset(self):
"""
Reset the timeout. Starts a new timer.
"""
self.counter += 1
local_counter = self.counter
def timer_timeout():
if self.counter == local_counter and self.running:
self.callback()
self.loop.call_later(self.timeout, timer_timeout)
def stop(self):
"""
Ignore timeout. Don't call the callback anymore.
"""
self.running = False
| 24.510638 | 78 | 0.624132 |
ace2b93351c04a9d2d89c794997ad56f07f66450 | 679 | py | Python | venv/lib/python3.7/site-packages/PyInstaller/hooks/hook-wavefile.py | alexzacher/BMI-Body-Mass-Index-Calculator-APP | f54473757992568b73b066d507059e1053357174 | [
"MIT"
] | 5 | 2020-08-24T23:29:58.000Z | 2022-02-07T19:58:07.000Z | PyInstaller/hooks/hook-wavefile.py | samuelhwilliams/pyinstaller | 8714423aa56803027b5a5585257392024ea9f7a0 | [
"Apache-2.0"
] | 12 | 2020-02-15T04:04:55.000Z | 2022-02-18T20:29:49.000Z | pyinstaller-develop/PyInstaller/hooks/hook-wavefile.py | onecklam/ethereum-graphviz | 6993accf0cb85e23013bf7ae6b04145724a6dbd2 | [
"Apache-2.0"
] | 2 | 2020-08-24T23:30:06.000Z | 2021-12-23T18:23:38.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2016-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
python-wavefile: https://github.com/vokimon/python-wavefile
"""
from PyInstaller.utils.hooks import collect_dynamic_libs
binaries = collect_dynamic_libs('wavefile')
| 35.736842 | 78 | 0.597938 |
ace2bafb61478c8e26d5710e0579f0a835c2b32c | 2,498 | py | Python | Santander Customer Transaction Prediction/src/misc/ridge_baseline.py | anshulrai/Kaggle-Challenges | 0348dc00cbafcab3725a9abcbf06a364953d2a01 | [
"MIT"
] | null | null | null | Santander Customer Transaction Prediction/src/misc/ridge_baseline.py | anshulrai/Kaggle-Challenges | 0348dc00cbafcab3725a9abcbf06a364953d2a01 | [
"MIT"
] | null | null | null | Santander Customer Transaction Prediction/src/misc/ridge_baseline.py | anshulrai/Kaggle-Challenges | 0348dc00cbafcab3725a9abcbf06a364953d2a01 | [
"MIT"
] | 1 | 2019-07-09T23:20:44.000Z | 2019-07-09T23:20:44.000Z | from sklearn.linear_model import Ridge
from sklearn.metrics import roc_auc_score
import pandas as pd
import time
import datetime
import gc
import numpy as np
import os
windows_flag = False
print("Running on Windows!\n") if windows_flag else print("Running on Linux!\n")
gc.enable()
def main():
submissions = []
for submission in os.listdir('./submissions/'):
model_type = submission.split('_')[0]
if model_type not in ("rank", "ridge", "logisticstack"):
submissions.append(submission)
print("Considering {} submission outputs.".format(len(submissions)))
n_val = len(pd.read_csv("./input/train.csv", usecols=["target"]))
val_data = np.zeros((n_val, len(submissions)))
column_names = []
for i, submission in enumerate(submissions):
column_names.append(submission.split('.csv')[0])
val_data[:,i] = pd.read_csv(r"./output/oof predictions/{}".format(submission),
usecols=["target"]).values[:,0]
oof_labels = pd.read_csv("./input/train.csv", usecols=["target"]).values[:,0]
clf = Ridge(random_state=42)
clf.fit(val_data, oof_labels)
val_preds = clf.predict(val_data)
val_auc = roc_auc_score(oof_labels, val_preds)
print('Validation AUC: {}'.format(val_auc))
n_test = len(pd.read_csv("./input/test.csv", usecols=["ID_code"]))
test_data = np.zeros((n_test, len(submissions)))
column_names = []
for i, submission in enumerate(submissions):
column_names.append(submission.split('.csv')[0])
test_data[:,i] = pd.read_csv("./submissions/{}".format(submission),
usecols=["target"]).values[:,0]
test_preds = clf.predict(test_data)
file_name = str(__file__).split('.')[0].split('\\')[2] if windows_flag else str(__file__).split('.')[0].split('/')[2]
submission = pd.read_csv('./input/sample_submission.csv')
submission['target'] = test_preds
submission.to_csv('./submissions/{}_{}.csv'.format(file_name,val_auc), index=False)
if __name__ == "__main__":
change_log = "Ridge with all outputs!"
start_time = str(datetime.datetime.now().time()).split('.')[0]
begin_time = time.time()
main()
end_time = time.time()
with open('./logs/'+str(datetime.date.today())+'.txt', 'a') as f:
log = "File Name: {}\nChange Log: {}\nStart Time: {}\nExecution Time(seconds): {}\n\n\n".format(__file__, change_log, start_time, end_time-begin_time)
f.write(log) | 39.03125 | 158 | 0.645316 |
ace2bc4a65a34969327a7161259a187164496e5c | 16,972 | py | Python | search/tests/unit/proxy/v2/test_es_proxy_v2.py | SpareFoot/amundsen | 002a1c8d574ee258a96efcaf22b518a93788381c | [
"Apache-2.0"
] | null | null | null | search/tests/unit/proxy/v2/test_es_proxy_v2.py | SpareFoot/amundsen | 002a1c8d574ee258a96efcaf22b518a93788381c | [
"Apache-2.0"
] | null | null | null | search/tests/unit/proxy/v2/test_es_proxy_v2.py | SpareFoot/amundsen | 002a1c8d574ee258a96efcaf22b518a93788381c | [
"Apache-2.0"
] | null | null | null | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import unittest
from unittest.mock import MagicMock
from amundsen_common.models.search import Filter, SearchResponse
from elasticsearch_dsl import Search
from elasticsearch_dsl.response import Response
from search_service import create_app
from search_service.proxy.es_proxy_v2 import ElasticsearchProxyV2, Resource
from tests.unit.proxy.v2.fixtures_v2 import (
FILTER_QUERY, RESPONSE_1, RESPONSE_2, TERM_FILTERS_QUERY, TERM_QUERY,
)
class TestElasticsearchProxyV2(unittest.TestCase):
def setUp(self) -> None:
self.app = create_app(config_module_class='search_service.config.LocalConfig')
self.app_context = self.app.app_context()
self.app_context.push()
mock_index = 'mock_index'
mock_elasticsearch_client = MagicMock()
mock_elasticsearch_client.indices.get_alias.return_value = {
mock_index: {}
}
mock_elasticsearch_client.indices.get_mapping.return_value = {
mock_index: {
'mappings': {}
}
}
self.es_proxy = ElasticsearchProxyV2(host='mock_host',
user='mock_user',
password='mock_password',
client=mock_elasticsearch_client,
page_size=10)
def test_build_elasticsearch_query_term_filters(self) -> None:
actual = self.es_proxy._build_elasticsearch_query(resource=Resource.FEATURE,
query_term="mock_feature",
filters=[
Filter(name='badges',
values=['pii'],
operation='AND'),
Filter(name='feature_group',
values=['test_group', 'mock_group'],
operation='OR')])
expected = TERM_FILTERS_QUERY
self.assertDictEqual(actual.to_dict(), expected)
def test_build_elasticsearch_query_term_no_filters(self) -> None:
actual = self.es_proxy._build_elasticsearch_query(resource=Resource.TABLE,
query_term="mock_table",
filters=[])
expected = TERM_QUERY
self.assertDictEqual(actual.to_dict(), expected)
def test_build_elasticsearch_query_just_filters(self) -> None:
actual = self.es_proxy._build_elasticsearch_query(resource=Resource.DASHBOARD,
query_term="",
filters=[
Filter(name='name',
values=['mock_dashobard_*'],
operation='OR'),
Filter(name='group_name',
values=['test_group', 'mock_group'],
operation='OR'),
Filter(name='tag',
values=['tag_*', 'tag_2'],
operation='AND')
])
expected = FILTER_QUERY
self.assertDictEqual(actual.to_dict(), expected)
def test_es_search_format_response_1_resource(self) -> None:
mock_es_dsl_search = Search()
mock_es_dsl_responses = [Response(mock_es_dsl_search, r) for r in RESPONSE_1]
formatted_response = self.es_proxy._format_response(page_index=0,
results_per_page=10,
responses=mock_es_dsl_responses,
resource_types=[Resource.TABLE, Resource.USER])
expected = SearchResponse(msg='Success',
page_index=0,
results_per_page=10,
results={
"table": {
"results": [
{
"key": "mock_db://mock_cluster.mock_schema/mock_table_1",
"description": "mock table description",
"badges": [
"pii",
"beta"
],
"tag": [
"mock_tag_1",
"mock_tag_2",
"mock_tag_3"
],
"schema": "mock_schema",
"table": "mock_table_1",
"column": [
"mock_col_1",
"mock_col_2",
"mock_col_3"
],
"database": "mock_db",
"cluster": "mock_cluster",
"search_score": 804.52716,
"resource_type": "table"
},
{
"key": "mock_db://mock_cluster.mock_schema/mock_table_2",
"description": "mock table description",
"badges": [],
"tag": [
"mock_tag_4",
"mock_tag_5",
"mock_tag_6"
],
"schema": "mock_schema",
"table": "mock_table_2",
"column": [
"mock_col_1",
"mock_col_2",
"mock_col_3"
],
"database": "mock_db",
"cluster": "mock_cluster",
"search_score": 9.104584,
"resource_type": "table"
}
],
"total_results": 2
},
"user": {
"results": [],
"total_results": 0
}
},
status_code=200)
self.assertEqual(formatted_response, expected)
def test_es_search_format_response_multiple_resources(self) -> None:
mock_es_dsl_search = Search()
mock_es_dsl_responses = [Response(mock_es_dsl_search, r) for r in RESPONSE_2]
formatted_response = self.es_proxy._format_response(page_index=0,
results_per_page=10,
responses=mock_es_dsl_responses,
resource_types=[
Resource.TABLE,
Resource.USER,
Resource.FEATURE])
expected = SearchResponse(msg='Success',
page_index=0,
results_per_page=10,
results={
"table": {
"results": [
{
"key": "mock_db://mock_cluster.mock_schema/mock_table_1",
"description": "mock table description",
"badges": [
"pii",
"beta"
],
"tag": [
"mock_tag_1",
"mock_tag_2",
"mock_tag_3"
],
"schema": "mock_schema",
"table": "mock_table_1",
"column": [
"mock_col_1",
"mock_col_2",
"mock_col_3"
],
"database": "mock_db",
"cluster": "mock_cluster",
"search_score": 804.52716,
"resource_type": "table"
},
{
"key": "mock_db://mock_cluster.mock_schema/mock_table_2",
"description": "mock table description",
"badges": [],
"tag": [
"mock_tag_4",
"mock_tag_5",
"mock_tag_6"
],
"schema": "mock_schema",
"table": "mock_table_2",
"column": [
"mock_col_1",
"mock_col_2",
"mock_col_3"
],
"database": "mock_db",
"cluster": "mock_cluster",
"search_score": 9.104584,
"resource_type": "table"
}
],
"total_results": 2
},
"user": {
"results": [
{
"full_name": "Allison Suarez Miranda",
"first_name": "Allison",
"last_name": "Suarez Miranda",
"email": "mock_user@amundsen.com",
"search_score": 61.40606,
"resource_type": "user"
}
],
"total_results": 1
},
"feature": {
"results": [
{
"key": "none/feature_1/1",
"feature_group": "fg_2",
"feature_name": "feature_1",
"description": "mock feature description",
"entity": None,
"status": "active",
"version": 1,
"availability": None,
"tags": [],
"badges": [],
"search_score": 62.66787,
"resource_type": "feature"
},
{
"key": "fg_2/feature_2/1",
"feature_group": "fg_2",
"feature_name": "feature_2",
"description": "mock feature description",
"entity": None,
"status": "active",
"version": 1,
"availability": None,
"tags": [],
"badges": [],
"search_score": 62.66787,
"resource_type": "feature"
},
{
"key": "fg_3/feature_3/2",
"feature_group": "fg_3",
"feature_name": "feature_3",
"description": "mock feature description",
"entity": None,
"status": "active",
"version": 2,
"availability": None,
"tags": [],
"badges": [
"pii"
],
"search_score": 62.66787,
"resource_type": "feature"
}
],
"total_results": 3
}
},
status_code=200)
self.assertEqual(formatted_response, expected)
| 61.05036 | 107 | 0.264494 |
ace2bfca13b01386036cac6f14e98f96069b02b1 | 2,260 | py | Python | generated-libraries/python/netapp/perf/instance_data.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | 2 | 2017-03-28T15:31:26.000Z | 2018-08-16T22:15:18.000Z | generated-libraries/python/netapp/perf/instance_data.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | generated-libraries/python/netapp/perf/instance_data.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | from netapp.perf.counter_data import CounterData
from netapp.perf.aggregation_data import AggregationData
from netapp.netapp_object import NetAppObject
class InstanceData(NetAppObject):
"""
Instance name and counter values.
"""
_counters = None
@property
def counters(self):
"""
List of counter values of this instance. Each element of this
list contains the value of a single counter.
"""
return self._counters
@counters.setter
def counters(self, val):
if val != None:
self.validate('counters', val)
self._counters = val
_name = None
@property
def name(self):
"""
Name of the instance
"""
return self._name
@name.setter
def name(self, val):
if val != None:
self.validate('name', val)
self._name = val
_aggregation = None
@property
def aggregation(self):
"""
Information related to aggregation that was done. If element is absent, no
aggregation was performed on this instance.
"""
return self._aggregation
@aggregation.setter
def aggregation(self, val):
if val != None:
self.validate('aggregation', val)
self._aggregation = val
_uuid = None
@property
def uuid(self):
"""
UUID of the instance
"""
return self._uuid
@uuid.setter
def uuid(self, val):
if val != None:
self.validate('uuid', val)
self._uuid = val
@staticmethod
def get_api_name():
return "instance-data"
@staticmethod
def get_desired_attrs():
return [
'counters',
'name',
'aggregation',
'uuid',
]
def describe_properties(self):
return {
'counters': { 'class': CounterData, 'is_list': True, 'required': 'required' },
'name': { 'class': basestring, 'is_list': False, 'required': 'required' },
'aggregation': { 'class': AggregationData, 'is_list': False, 'required': 'optional' },
'uuid': { 'class': basestring, 'is_list': False, 'required': 'required' },
}
| 26.904762 | 98 | 0.557965 |
ace2bfd5a2a15b179fd5f8485b5dcfa01ad44d8f | 7,352 | py | Python | tests/models/commondb/load_history/load_history_integration_test.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 4 | 2018-08-26T09:11:36.000Z | 2019-05-24T12:01:02.000Z | tests/models/commondb/load_history/load_history_integration_test.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 52 | 2018-08-24T12:51:22.000Z | 2020-12-28T04:59:42.000Z | tests/models/commondb/load_history/load_history_integration_test.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 1 | 2018-10-11T07:57:55.000Z | 2018-10-11T07:57:55.000Z | # pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.models.commondb.load_history.load_history_integration_test.py is part of
# The RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Test class for testing Load History module integrations."""
# Third Party Imports
import pytest
from pubsub import pub
from treelib import Tree
# RAMSTK Package Imports
from ramstk.models.dbrecords import RAMSTKLoadHistoryRecord
from ramstk.models.dbtables import RAMSTKLoadHistoryTable
from tests import (
SystemTestDeleteMethods,
SystemTestGetterSetterMethods,
SystemTestInsertMethods,
SystemTestSelectMethods,
)
@pytest.mark.usefixtures("test_attributes", "integration_test_table_model")
class TestSelectLoadHistory(SystemTestSelectMethods):
"""Class for testing Load History table do_select() and do_select_all() methods."""
__test__ = True
_do_select_msg = "request_select_load_history"
_record = RAMSTKLoadHistoryRecord
_tag = "load_history"
@pytest.mark.usefixtures("integration_test_table_model")
class TestUpdateLoadHistory:
"""Class for testing Load History table do_update() and do_update_all() methods."""
__test__ = True
_next_id = 0
_record = RAMSTKLoadHistoryRecord
_tag = "load_history"
_update_id = 1
def on_succeed_update(self, tree):
"""Listen for succeed_update messages."""
assert isinstance(tree, Tree)
assert tree.get_node(1).data["load_history"].description == "New Load History"
print("\033[36m\nsucceed_update_load_history topic was broadcast")
def on_succeed_update_all(self):
"""Listen for succeed_update messages."""
print(
f"\033[36m\n\tsucceed_update_all topic was broadcast on update all "
f"{self._tag}s"
)
def on_fail_update_wrong_data_type(self, logger_name, message):
"""Listen for do_log_debug messages."""
assert logger_name == "DEBUG"
assert message == (
f"The value for one or more attributes for {self._tag} ID "
f"{self._update_id} was the wrong type."
)
print(
f"\033[35m\n\tfail_update_{self._tag} topic was broadcast on wrong data "
f"type."
)
def on_fail_update_root_node_wrong_data_type(self, logger_name, message):
"""Listen for do_log_debug messages."""
assert logger_name == "DEBUG"
assert message == "Attempting to update the root node 0."
print(f"\033[35m\n\tfail_update_{self._tag} topic was broadcast on root node.")
def on_fail_update_non_existent_id(self, logger_name, message):
"""Listen for do_log_debug messages."""
assert logger_name == "DEBUG"
assert (
message == f"Attempted to save non-existent {self._tag} with {self._tag} "
f"ID 100."
)
print(
f"\033[35m\n\tfail_update_{self._tag} topic was broadcast on non-existent "
f"ID."
)
def on_fail_update_no_data_package(self, logger_name, message):
"""Listen for do_log_debug messages."""
assert logger_name == "DEBUG"
assert message == f"No data package found for {self._tag} ID {self._update_id}."
print(
f"\033[35m\n\tfail_update_{self._tag} topic was broadcast on no data "
f"package."
)
@pytest.mark.integration
def test_do_update(self, integration_test_table_model):
"""Should update record attribute."""
pub.subscribe(self.on_succeed_update, "succeed_update_load_history")
integration_test_table_model.tree.get_node(1).data[
"load_history"
].description = "New Load History"
integration_test_table_model.do_update(1)
pub.unsubscribe(self.on_succeed_update, "succeed_update_load_history")
assert (
integration_test_table_model.tree.get_node(1)
.data["load_history"]
.description
== "New Load History"
)
integration_test_table_model.tree.get_node(1).data[
"load_history"
].description = "Integrated Circuit"
integration_test_table_model.do_update(1)
assert (
integration_test_table_model.tree.get_node(1)
.data["load_history"]
.description
== "Integrated Circuit"
)
@pytest.mark.integration
def test_do_update_all(self, integration_test_table_model):
"""Should update all the records in the database."""
pub.subscribe(self.on_succeed_update_all, "succeed_update_all_load_history")
pub.sendMessage("request_update_all_load_history")
pub.unsubscribe(self.on_succeed_update_all, "succeed_update_all_load_history")
@pytest.mark.integration
def test_do_update_wrong_data_type(self, integration_test_table_model):
"""Should send the do_log_debug message with wrong attribute data type."""
pub.subscribe(self.on_fail_update_wrong_data_type, "fail_update_load_history")
integration_test_table_model.tree.get_node(1).data[
"load_history"
].description = None
integration_test_table_model.do_update(1)
pub.unsubscribe(self.on_fail_update_wrong_data_type, "fail_update_load_history")
@pytest.mark.integration
def test_do_update_root_node_wrong_data_type(self, integration_test_table_model):
"""Should send the do_log_debug message when attempting to update root node."""
pub.subscribe(
self.on_fail_update_root_node_wrong_data_type, "fail_update_load_history"
)
integration_test_table_model.tree.get_node(1).data[
"load_history"
].description = None
integration_test_table_model.do_update(0)
pub.unsubscribe(
self.on_fail_update_root_node_wrong_data_type, "fail_update_load_history"
)
@pytest.mark.integration
def test_do_update_non_existent_id(self, integration_test_table_model):
"""Should send the do_log_debug message with non-existent ID in tree."""
pub.subscribe(self.on_fail_update_non_existent_id, "fail_update_load_history")
integration_test_table_model.do_select_all({"history_id": 1})
integration_test_table_model.do_update("skullduggery")
pub.unsubscribe(self.on_fail_update_non_existent_id, "fail_update_load_history")
@pytest.mark.integration
def test_do_update_no_data_package(self, integration_test_table_model):
"""Should send the do_log_debug message with no data package in tree."""
pub.subscribe(self.on_fail_update_no_data_package, "fail_update_load_history")
integration_test_table_model.tree.get_node(1).data.pop("load_history")
integration_test_table_model.do_update(1)
pub.unsubscribe(self.on_fail_update_no_data_package, "fail_update_load_history")
@pytest.mark.usefixtures("integration_test_table_model")
class TestGetterSetterLoadHistory(SystemTestGetterSetterMethods):
"""Class for testing Load History table getter and setter methods."""
__test__ = True
_package = {"description": "Heavy melons"}
_record = RAMSTKLoadHistoryRecord
_tag = "load_history"
_test_id = 1
| 36.577114 | 88 | 0.696273 |
ace2c0677c7591bd48b45d7be36c2bf7bcc2bd24 | 333 | py | Python | uri/contestNatalF.py | italo-batista/problems-solving | f83ad34f0abebd52925c4020635556f20743ba06 | [
"MIT"
] | null | null | null | uri/contestNatalF.py | italo-batista/problems-solving | f83ad34f0abebd52925c4020635556f20743ba06 | [
"MIT"
] | null | null | null | uri/contestNatalF.py | italo-batista/problems-solving | f83ad34f0abebd52925c4020635556f20743ba06 | [
"MIT"
] | null | null | null | n = int(raw_input())
comportadas = 0
children = []
for i in range(n):
ent = map(str, raw_input().split())
children.append(ent[1])
if ent[0] == "+":
comportadas += 1
children.sort()
for child in children:
print child
print "Se comportaram: %d | Nao se comportaram: %d" % (comportadas, n - comportadas)
| 15.857143 | 84 | 0.60961 |
ace2c0e0b34e68772bbfebc096f58c18ae3f27be | 3,730 | py | Python | sip/metamorphosis/beam.py | SKA-ScienceDataProcessor/sip-maps-pipeline | 07712fcdee59717b80ba69d299b627bc43f0009b | [
"Apache-2.0"
] | 1 | 2017-12-15T12:56:09.000Z | 2017-12-15T12:56:09.000Z | sip/metamorphosis/beam.py | SKA-ScienceDataProcessor/sip-maps-pipeline | 07712fcdee59717b80ba69d299b627bc43f0009b | [
"Apache-2.0"
] | null | null | null | sip/metamorphosis/beam.py | SKA-ScienceDataProcessor/sip-maps-pipeline | 07712fcdee59717b80ba69d299b627bc43f0009b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
""" beam.py: A script to load in the LOFAR station response.
"""
import casacore.tables as pt
import lofar.stationresponse as st
import numpy as np
__author__ = "Jamie Farnes"
__email__ = "jamie.farnes@oerc.ox.ac.uk"
def beam_me_up(inputs_dir, ms1):
"""Load the LOFAR station responses.
Args:
inputs_dir (str): location of input directory.
ms1 (str): name of measurement set.
Returns:
beams: array of station responses.
"""
#`msname`
#Name of the Measurement Set.
#`inverse`
#Compute the inverse of the LOFAR beam (default False).
#`useElementResponse`
#Include the effect of the dual dipole (element) beam (default True).
#`useArrayFactor`
#Include the effect of the station and tile array factor (default
#True).
#`useChanFreq`
#Compute the phase shift for the station beamformer using the channel
#frequency instead of the subband reference frequency. This option
#should be enabled for Measurement Sets that contain multiple subbands
#compressed to single channels inside a single spectral window
#(default: False).
beams = st.stationresponse(msname='%s/%s' % (inputs_dir, ms1), inverse=False, useElementResponse=True, useArrayFactor=True, useChanFreq=False)
return beams
def apply_station_beams(vis, beams, channel):
#
# This is a dummy function. It currently does nothing. It will eventually apply the LOFAR
# stations beams to the data.
# Correct for the Station Beam. This mostly compensates for the element beam effects and the projection of the dipoles on the sky. However, the LOFAR fields are big, and the projection of the dipoles vary across the field of view.
"""
# times are MJD in seconds:
times = vis.data['time']
# Find the indices of unique times within the MS:
_, indices = np.unique(times, return_index=True)
beams.setDirection(0.01,0.5*np.pi)
beams.evaluateStation(time=times[0],station=0)
for i in range( len(indices) ):
#print("times", len(indices), indices[i], indices[i+1], i, times[indices[i]])
#START = t.time()
response = beams.evaluate(time=times[indices[i]])
# :,channel_number = all stations, single channel.
response = response[:,channel]
# Can refer to a specific station via, response[station_number]
# response[28] would now contain the beam response for station 28 at the previously selected time, as a 2x2 Jones matrix.
#END = t.time()
first_index = indices[i]
if i+1 < len(indices):
second_index = indices[i+1]
elif i+1 == len(indices):
second_index = len(vis.data)
else:
print("ERROR: unknown error has occurred.")
temp_len = second_index-first_index
# Get it in the form J=[[j_xx, j_xy],[j_yx, j_yy]]
visi_data = np.reshape(vis.data['vis'][first_index:second_index],(temp_len,2,2))
response_1 = response[vis.data['antenna1'][first_index:second_index]]
response_2 = response[vis.data['antenna2'][first_index:second_index]]
# Need the inverse of the first beam:
beam_1 = np.linalg.inv(response_1)
# Need the Hermitian of the inverse of the second beam:
beam_2 = np.transpose(np.conj(np.linalg.inv(response_2)), axes=(0,2,1))
# Now calculate: corrected = beam_i^-1 uncorrected herm(beam_j^-1)
#vis.data['vis'][first_index:second_index] = np.reshape(beam_1*visi_data*beam_2,(temp_len,4))
for q in range(len(visi_data)):
vis.data['vis'][q:q+1] = np.reshape( np.dot(np.dot(beam_1[q],visi_data[q]),beam_2[q]),(1,4))
"""
return vis
| 39.263158 | 234 | 0.664343 |
ace2c18149fc6e5cb3a8fa3ee49a07f9709d35a4 | 79,562 | py | Python | numpy/core/tests/test_regression.py | argriffing/numpy | da6e4c71aa229b8bdb18d643456cda4594e6384a | [
"BSD-3-Clause"
] | 1 | 2019-04-27T16:03:46.000Z | 2019-04-27T16:03:46.000Z | numpy/core/tests/test_regression.py | argriffing/numpy | da6e4c71aa229b8bdb18d643456cda4594e6384a | [
"BSD-3-Clause"
] | null | null | null | numpy/core/tests/test_regression.py | argriffing/numpy | da6e4c71aa229b8bdb18d643456cda4594e6384a | [
"BSD-3-Clause"
] | null | null | null | from __future__ import division, absolute_import, print_function
import copy
import pickle
import sys
import platform
import gc
import warnings
import tempfile
from os import path
from io import BytesIO
from itertools import chain
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_raises, assert_warns, dec
)
from numpy.testing.utils import _assert_valid_refcount
from numpy.compat import asbytes, asunicode, asbytes_nested, long, sixu
rlevel = 1
class TestRegression(TestCase):
def test_invalid_round(self,level=rlevel):
# Ticket #3
v = 4.7599999999999998
assert_array_equal(np.array([v]), np.array(v))
def test_mem_empty(self,level=rlevel):
# Ticket #7
np.empty((1,), dtype=[('x', np.int64)])
def test_pickle_transposed(self,level=rlevel):
# Ticket #16
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
f = BytesIO()
pickle.dump(a, f)
f.seek(0)
b = pickle.load(f)
f.close()
assert_array_equal(a, b)
def test_typeNA(self,level=rlevel):
# Ticket #31
assert_equal(np.typeNA[np.int64], 'Int64')
assert_equal(np.typeNA[np.uint64], 'UInt64')
def test_dtype_names(self,level=rlevel):
# Ticket #35
# Should succeed
np.dtype([(('name', 'label'), np.int32, 3)])
def test_reduce(self,level=rlevel):
# Ticket #40
assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)
def test_zeros_order(self,level=rlevel):
# Ticket #43
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_asarray_with_order(self,level=rlevel):
# Check that nothing is done when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(a is np.asarray(a, order='F'))
def test_ravel_with_order(self,level=rlevel):
# Check that ravel works when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(not a.ravel('F').flags.owndata)
def test_sort_bigendian(self,level=rlevel):
# Ticket #47
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self,level=rlevel):
# Ticket #49
c = np.arange(125).reshape((5, 5, 5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self,level=rlevel):
# Ticket #50
f = BytesIO()
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
ca.dump(f)
f.seek(0)
ca = np.load(f)
f.close()
def test_noncontiguous_fill(self,level=rlevel):
# Ticket #58.
a = np.zeros((5, 3))
b = a[:, :2,]
def rs():
b.shape = (10,)
self.assertRaises(AttributeError, rs)
def test_bool(self,level=rlevel):
# Ticket #60
np.bool_(1) # Should succeed
def test_indexing1(self,level=rlevel):
# Ticket #64
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j, 4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self,level=rlevel):
# Ticket #65
descr = [('x', 'i4', (2,))]
buffer = ([3, 2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self,level=rlevel):
# Ticket #67
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self,level=rlevel):
# Trac Ticket #72
# https://github.com/numpy/numpy/issues/565
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False, True]))
self.assertTrue(a[1] == 'auto')
self.assertTrue(a[0] != 'auto')
b = np.linspace(0, 10, 11)
# This should return true for now, but will eventually raise an error:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.assertTrue(b != 'auto')
self.assertTrue(b[0] != 'auto')
def test_unicode_swapping(self,level=rlevel):
# Ticket #79
ulen = 1
ucs_value = sixu('\U0010FFFF')
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua.newbyteorder() # Should succeed.
def test_object_array_fill(self,level=rlevel):
# Ticket #86
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self,level=rlevel):
# Ticket #93
self.assertRaises(TypeError, np.dtype,
{'names':['a'],'formats':['foo']}, align=1)
@dec.knownfailureif((sys.version_info[0] >= 3) or
(sys.platform == "win32" and
platform.architecture()[0] == "64bit"),
"numpy.intp('0xff', 16) not supported on Py3, "
"as it does not inherit from Python int")
def test_intp(self,level=rlevel):
# Ticket #99
i_width = np.int_(0).nbytes*2 - 1
np.intp('0x' + 'f'*i_width, 16)
self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)
self.assertRaises(ValueError, np.intp, '0x1', 32)
assert_equal(255, np.intp('0xFF', 16))
assert_equal(1024, np.intp(1024))
def test_endian_bool_indexing(self,level=rlevel):
# Ticket #105
a = np.arange(10., dtype='>f8')
b = np.arange(10., dtype='<f8')
xa = np.where((a > 2) & (a < 6))
xb = np.where((b > 2) & (b < 6))
ya = ((a > 2) & (a < 6))
yb = ((b > 2) & (b < 6))
assert_array_almost_equal(xa, ya.nonzero())
assert_array_almost_equal(xb, yb.nonzero())
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
def test_endian_where(self,level=rlevel):
# GitHub issue #369
net = np.zeros(3, dtype='>f4')
net[1] = 0.00458849
net[2] = 0.605202
max_net = net.max()
test = np.where(net <= 0., max_net, net)
correct = np.array([ 0.60520202, 0.00458849, 0.60520202])
assert_array_almost_equal(test, correct)
def test_endian_recarray(self,level=rlevel):
# Ticket #2185
dt = np.dtype([
('head', '>u4'),
('data', '>u4', 2),
])
buf = np.recarray(1, dtype=dt)
buf[0]['head'] = 1
buf[0]['data'][:] = [1, 1]
h = buf[0]['head']
d = buf[0]['data'][0]
buf[0]['head'] = h
buf[0]['data'][0] = d
assert_(buf[0]['head'] == 1)
def test_mem_dot(self,level=rlevel):
# Ticket #106
x = np.random.randn(0, 1)
y = np.random.randn(10, 1)
# Dummy array to detect bad memory access:
_z = np.ones(10)
_dummy = np.empty((0, 10))
z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)
np.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
# Do the same for the built-in dot:
np.core.multiarray.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
def test_arange_endian(self,level=rlevel):
# Ticket #111
ref = np.arange(10)
x = np.arange(10, dtype='<f8')
assert_array_equal(ref, x)
x = np.arange(10, dtype='>f8')
assert_array_equal(ref, x)
def test_argmax(self,level=rlevel):
# Ticket #119
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
a.argmax(i) # Should succeed
def test_mem_divmod(self,level=rlevel):
# Ticket #126
for i in range(10):
divmod(np.array([i])[0], 10)
def test_hstack_invalid_dims(self,level=rlevel):
# Ticket #128
x = np.arange(9).reshape((3, 3))
y = np.array([0, 0, 0])
self.assertRaises(ValueError, np.hstack, (x, y))
def test_squeeze_type(self,level=rlevel):
# Ticket #133
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
def test_add_identity(self,level=rlevel):
# Ticket #143
assert_equal(0, np.add.identity)
def test_numpy_float_python_long_addition(self):
# Check that numpy float and python longs can be added correctly.
a = np.float_(23.) + 2**135
assert_equal(a, 23. + 2**135)
def test_binary_repr_0(self,level=rlevel):
# Ticket #151
assert_equal('0', np.binary_repr(0))
def test_rec_iterate(self,level=rlevel):
# Ticket #160
descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])
x = np.rec.array([(1, 1.1, '1.0'),
(2, 2.2, '2.0')], dtype=descr)
x[0].tolist()
[i for i in x[0]]
def test_unicode_string_comparison(self,level=rlevel):
# Ticket #190
a = np.array('hello', np.unicode_)
b = np.array('world')
a == b
def test_tobytes_FORTRANORDER_discontiguous(self,level=rlevel):
# Fix in r2836
# Create non-contiguous Fortran ordered array
x = np.array(np.random.rand(3, 3), order='F')[:, :2]
assert_array_almost_equal(x.ravel(), np.fromstring(x.tobytes()))
def test_flat_assignment(self,level=rlevel):
# Correct behaviour of ticket #194
x = np.empty((3, 1))
x.flat = np.arange(3)
assert_array_almost_equal(x, [[0], [1], [2]])
x.flat = np.arange(3, dtype=float)
assert_array_almost_equal(x, [[0], [1], [2]])
def test_broadcast_flat_assignment(self,level=rlevel):
# Ticket #194
x = np.empty((3, 1))
def bfa():
x[:] = np.arange(3)
def bfb():
x[:] = np.arange(3, dtype=float)
self.assertRaises(ValueError, bfa)
self.assertRaises(ValueError, bfb)
def test_nonarray_assignment(self):
# See also Issue gh-2870, test for non-array assignment
# and equivalent unsafe casted array assignment
a = np.arange(10)
b = np.ones(10, dtype=bool)
r = np.arange(10)
def assign(a, b, c):
a[b] = c
assert_raises(ValueError, assign, a, b, np.nan)
a[b] = np.array(np.nan) # but not this.
assert_raises(ValueError, assign, a, r, np.nan)
a[r] = np.array(np.nan)
def test_unpickle_dtype_with_object(self,level=rlevel):
# Implemented in r2840
dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
f = BytesIO()
pickle.dump(dt, f)
f.seek(0)
dt_ = pickle.load(f)
f.close()
assert_equal(dt, dt_)
def test_mem_array_creation_invalid_specification(self,level=rlevel):
# Ticket #196
dt = np.dtype([('x', int), ('y', np.object_)])
# Wrong way
self.assertRaises(ValueError, np.array, [1, 'object'], dt)
# Correct way
np.array([(1, 'object')], dt)
def test_recarray_single_element(self,level=rlevel):
# Ticket #202
a = np.array([1, 2, 3], dtype=np.int32)
b = a.copy()
r = np.rec.array(a, shape=1, formats=['3i4'], names=['d'])
assert_array_equal(a, b)
assert_equal(a, r[0][0])
def test_zero_sized_array_indexing(self,level=rlevel):
# Ticket #205
tmp = np.array([])
def index_tmp():
tmp[np.array(10)]
self.assertRaises(IndexError, index_tmp)
def test_chararray_rstrip(self,level=rlevel):
# Ticket #222
x = np.chararray((1,), 5)
x[0] = asbytes('a ')
x = x.rstrip()
assert_equal(x[0], asbytes('a'))
def test_object_array_shape(self,level=rlevel):
# Ticket #239
assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))
assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))
assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2))
assert_equal(np.array([], dtype=object).shape, (0,))
assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))
assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))
def test_mem_around(self,level=rlevel):
# Ticket #243
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y), decimal) <= 10.0**(-decimal)
def test_character_array_strip(self,level=rlevel):
# Ticket #246
x = np.char.array(("x", "x ", "x "))
for c in x:
assert_equal(c, "x")
def test_lexsort(self,level=rlevel):
# Lexsort memory error
v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert_equal(np.lexsort(v), 0)
def test_lexsort_invalid_sequence(self):
# Issue gh-4123
class BuggySequence(object):
def __len__(self):
return 4
def __getitem__(self, key):
raise KeyError
assert_raises(KeyError, np.lexsort, BuggySequence())
def test_pickle_py2_bytes_encoding(self):
# Check that arrays and scalars pickled on Py2 are
# unpickleable on Py3 using encoding='bytes'
test_data = [
# (original, py2_pickle)
(np.unicode_('\u6f2c'),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n"
"I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n.")),
(np.array([9e123], dtype=np.float64),
asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n"
"p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n"
"p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n"
"I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb.")),
(np.array([(9e123,)], dtype=[('name', float)]),
asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n"
"(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n"
"(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n"
"(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n"
"I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n"
"bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb.")),
]
if sys.version_info[:2] >= (3, 4):
# encoding='bytes' was added in Py3.4
for original, data in test_data:
result = pickle.loads(data, encoding='bytes')
assert_equal(result, original)
if isinstance(result, np.ndarray) and result.dtype.names:
for name in result.dtype.names:
assert_(isinstance(name, str))
def test_pickle_dtype(self,level=rlevel):
# Ticket #251
pickle.dumps(np.float)
def test_swap_real(self, level=rlevel):
# Ticket #265
assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)
assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)
def test_object_array_from_list(self, level=rlevel):
# Ticket #270
np.array([1, 'A', None]) # Should succeed
def test_multiple_assign(self, level=rlevel):
# Ticket #273
a = np.zeros((3, 1), int)
a[[1, 2]] = 1
def test_empty_array_type(self, level=rlevel):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self, level=rlevel):
dt = np.dtype([('one', '<i4'), ('two', '<i4')])
x = np.array((1, 2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
def test_method_args(self, level=rlevel):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8, 7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
res1 = arr
if res1.dtype.kind in 'uib':
assert_((res1 == res2).all(), func)
else:
assert_(abs(res1-res2).max() < 1e-8, func)
for func in funcs2:
arr1 = np.random.rand(8, 7)
arr2 = np.random.rand(8, 7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert_(abs(res1-res2).max() < 1e-8, func)
def test_mem_lexsort_strings(self, level=rlevel):
# Ticket #298
lst = ['abc', 'cde', 'fgh']
np.lexsort((lst,))
def test_fancy_index(self, level=rlevel):
# Ticket #302
x = np.array([1, 2])[np.array([0])]
assert_equal(x.shape, (1,))
def test_recarray_copy(self, level=rlevel):
# Ticket #312
dt = [('x', np.int16), ('y', np.float64)]
ra = np.array([(1, 2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert_(ra['x'] != rb['x'])
def test_rec_fromarray(self, level=rlevel):
# Ticket #322
x1 = np.array([[1, 2], [3, 4], [5, 6]])
x2 = np.array(['a', 'dd', 'xyz'])
x3 = np.array([1.1, 2, 3])
np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self, level=rlevel):
x = np.empty((2, 2), object)
x.flat[2] = (1, 2, 3)
assert_equal(x.flat[2], (1, 2, 3))
def test_ndmin_float64(self, level=rlevel):
# Ticket #324
x = np.array([1, 2, 3], dtype=np.float64)
assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)
assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)
def test_ndmin_order(self, level=rlevel):
# Issue #465 and related checks
assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)
assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)
def test_mem_axis_minimization(self, level=rlevel):
# Ticket #327
data = np.arange(5)
data = np.add.outer(data, data)
def test_mem_float_imag(self, level=rlevel):
# Ticket #330
np.float64(1.0).imag
def test_dtype_tuple(self, level=rlevel):
# Ticket #334
assert_(np.dtype('i4') == np.dtype(('i4', ())))
def test_dtype_posttuple(self, level=rlevel):
# Ticket #335
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self, level=rlevel):
# Ticket #341
assert_equal(np.array(['X'], 'c'), asbytes('X'))
def test_string_array_size(self, level=rlevel):
# Ticket #342
self.assertRaises(ValueError,
np.array, [['X'], ['X', 'X', 'X']], '|S1')
def test_dtype_repr(self, level=rlevel):
# Ticket #344
dt1 = np.dtype(('uint32', 2))
dt2 = np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self, level=rlevel):
# Make sure reshape order works.
a = np.arange(6).reshape(2, 3, order='F')
assert_equal(a, [[0, 2, 4], [1, 3, 5]])
a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
b = a[:, 1]
assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])
def test_reshape_zero_strides(self, level=rlevel):
# Issue #380, test reshaping of zero strided arrays
a = np.ones(1)
a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))
assert_(a.reshape(5, 1).strides[0] == 0)
def test_reshape_zero_size(self, level=rlevel):
# GitHub Issue #2700, setting shape failed for 0-sized arrays
a = np.ones((0, 2))
a.shape = (-1, 2)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.
@dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_reshape_trailing_ones_strides(self):
# GitHub issue gh-2949, bad strides for trailing ones of new shape
a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
strides_c = (16, 8, 8, 8)
strides_f = (8, 24, 48, 48)
assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)
assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))
def test_repeat_discont(self, level=rlevel):
# Ticket #352
a = np.arange(12).reshape(4, 3)[:, 2]
assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])
def test_array_index(self, level=rlevel):
# Make sure optimization is not called in this case.
a = np.array([1, 2, 3])
a2 = np.array([[1, 2, 3]])
assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])
def test_object_argmax(self, level=rlevel):
a = np.array([1, 2, 3], dtype=object)
assert_(a.argmax() == 2)
def test_recarray_fields(self, level=rlevel):
# Ticket #372
dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])
dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])
for a in [np.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)]),
np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"),
np.rec.fromarrays([(1, 2), (3, 4)])]:
assert_(a.dtype in [dt0, dt1])
def test_random_shuffle(self, level=rlevel):
# Ticket #374
a = np.arange(5).reshape((5, 1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0), a)
def test_refcount_vdot(self, level=rlevel):
# Changeset #3443
_assert_valid_refcount(np.vdot)
def test_startswith(self, level=rlevel):
ca = np.char.array(['Hi', 'There'])
assert_equal(ca.startswith('H'), [True, False])
def test_noncommutative_reduce_accumulate(self, level=rlevel):
# Ticket #413
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self, level=rlevel):
# Convolve should raise an error for empty input array.
self.assertRaises(ValueError, np.convolve, [], [1])
self.assertRaises(ValueError, np.convolve, [1], [])
def test_multidim_byteswap(self, level=rlevel):
# Ticket #449
r = np.array([(1, (0, 1, 2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256, (0, 256, 512))], r.dtype))
def test_string_NULL(self, level=rlevel):
# Changeset 3557
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
def test_junk_in_string_fields_of_recarray(self, level=rlevel):
# Ticket #483
r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')])
assert_(asbytes(r['var1'][0][0]) == asbytes('abc'))
def test_take_output(self, level=rlevel):
# Ensure that 'take' honours output parameter.
x = np.arange(12).reshape((3, 4))
a = np.take(x, [0, 2], axis=1)
b = np.zeros_like(a)
np.take(x, [0, 2], axis=1, out=b)
assert_array_equal(a, b)
def test_take_object_fail(self):
# Issue gh-3001
d = 123.
a = np.array([d, 1], dtype=object)
ref_d = sys.getrefcount(d)
try:
a.take([0, 100])
except IndexError:
pass
assert_(ref_d == sys.getrefcount(d))
def test_array_str_64bit(self, level=rlevel):
# Ticket #501
s = np.array([1, np.nan], dtype=np.float64)
with np.errstate(all='raise'):
np.array_str(s) # Should succeed
def test_frompyfunc_endian(self, level=rlevel):
# Ticket #503
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self, level=rlevel):
# Ticket #514
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s))
def test_arr_transpose(self, level=rlevel):
# Ticket #516
x = np.random.rand(*(2,)*16)
x.transpose(list(range(16))) # Should succeed
def test_string_mergesort(self, level=rlevel):
# Ticket #540
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self, level=rlevel):
# Ticket #546
a = np.arange(3, dtype='>f')
assert_(a[a.argmax()] == a.max())
def test_rand_seed(self, level=rlevel):
# Ticket #555
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self, level=rlevel):
# Ticket #562
a = np.zeros(5, dtype=float)
b = np.array(a, dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
self.assertRaises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
def test_dot_negative_stride(self, level=rlevel):
# Ticket #588
x = np.array([[1, 5, 25, 125., 625]])
y = np.array([[20.], [160.], [640.], [1280.], [1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x, z), np.dot(x, y2))
def test_object_casting(self, level=rlevel):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
def rs():
x = np.ones([484, 286])
y = np.zeros([484, 286])
x |= y
self.assertRaises(TypeError, rs)
def test_unicode_scalar(self, level=rlevel):
# Ticket #600
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
new = pickle.loads(pickle.dumps(el))
assert_equal(new, el)
def test_arange_non_native_dtype(self, level=rlevel):
# Ticket #616
for T in ('>f4', '<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0, dtype=dt).dtype, dt)
assert_equal(np.arange(0.5, dtype=dt).dtype, dt)
assert_equal(np.arange(5, dtype=dt).dtype, dt)
def test_bool_flat_indexing_invalid_nr_elements(self, level=rlevel):
s = np.ones(10, dtype=float)
x = np.array((15,), dtype=float)
def ia(x, s, v):
x[(s > 0)] = v
# After removing deprecation, the following are ValueErrors.
# This might seem odd as compared to the value error below. This
# is due to the fact that the new code always uses "nonzero" logic
# and the boolean special case is not taken.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float))
self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float))
# Old special case (different code path):
self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
self.assertRaises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
def test_mem_scalar_indexing(self, level=rlevel):
# Ticket #603
x = np.array([0], dtype=float)
index = np.array(0, dtype=np.int32)
x[index]
def test_binary_repr_0_width(self, level=rlevel):
assert_equal(np.binary_repr(0, width=3), '000')
def test_fromstring(self, level=rlevel):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12, 9, 9])
def test_searchsorted_variable_length(self, level=rlevel):
x = np.array(['a', 'aa', 'b'])
y = np.array(['d', 'e'])
assert_equal(x.searchsorted(y), [3, 3])
def test_string_argsort_with_zeros(self, level=rlevel):
# Check argsort for strings containing zeros.
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))
def test_string_sort_with_zeros(self, level=rlevel):
# Check sort for strings containing zeros.
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self, level=rlevel):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
def test_flat_byteorder(self, level=rlevel):
# Ticket #657
x = np.arange(10)
assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))
def test_uint64_from_negative(self, level=rlevel):
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
def test_sign_bit(self, level=rlevel):
x = np.array([0, -0.0, 0])
assert_equal(str(np.abs(x)), '[ 0. 0. 0.]')
def test_flat_index_byteswap(self, level=rlevel):
for dt in (np.dtype('<i4'), np.dtype('>i4')):
x = np.array([-1, 0, 1], dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self, level=rlevel):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,
# 0-sized reshape itself is tested elsewhere.
@dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_copy_detection_corner_case2(self, level=rlevel):
# Ticket #771: strides are not set correctly when reshaping 0-sized
# arrays
b = np.indices((0, 3, 4)).T.reshape(-1, 3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self, level=rlevel):
# Ticket #633
if not hasattr(sys, 'getrefcount'):
return
# NB. this is probably CPython-specific
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
# -- 0d -> 1-d broadcast slice assignment
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert_equal(cnt(a), cnt0_a + 5)
arr[:] = b
assert_equal(cnt(a), cnt0_a)
assert_equal(cnt(b), cnt0_b + 5)
arr[:2] = c
assert_equal(cnt(b), cnt0_b + 3)
assert_equal(cnt(c), cnt0_c + 2)
del arr
# -- 1-d -> 2-d broadcast slice assignment
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert_(cnt(a) == cnt0_a + 1)
arr0[1] = b
assert_(cnt(b) == cnt0_b + 1)
arr[:,:] = arr0
assert_(cnt(a) == cnt0_a + 6)
assert_(cnt(b) == cnt0_b + 6)
arr[:, 0] = None
assert_(cnt(a) == cnt0_a + 1)
del arr, arr0
# -- 2-d copying + flattening
arr = np.zeros((5, 2), dtype=np.object_)
arr[:, 0] = a
arr[:, 1] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
arr2 = arr[:, 0].copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.flatten()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
del arr, arr2
# -- concatenate, repeat, take, choose
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
tmp = np.concatenate((arr1, arr2))
assert_(cnt(a) == cnt0_a + 5 + 5)
assert_(cnt(b) == cnt0_b + 5 + 5)
tmp = arr1.repeat(3, axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3*5)
tmp = arr1.take([1, 2, 3], axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3)
x = np.array([[0], [1], [0], [1], [1]], int)
tmp = x.choose(arr1, arr2)
assert_(cnt(a) == cnt0_a + 5 + 2)
assert_(cnt(b) == cnt0_b + 5 + 3)
del tmp # Avoid pyflakes unused variable warning
def test_mem_custom_float_to_array(self, level=rlevel):
# Ticket 702
class MyFloat(object):
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp.astype(float) # Should succeed
def test_object_array_refcount_self_assign(self, level=rlevel):
# Ticket #711
class VictimObject(object):
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr # refcount of 'd' might hit zero here
assert_(not arr[0].deleted)
arr[:] = arr # trying to induce a segfault by doing it again...
assert_(not arr[0].deleted)
def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):
x = [1, 2, 3]
self.assertRaises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self, level=rlevel):
# Ticket #713
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert_(not np.any(a))
np.setbufsize(oldsize)
def test_mem_0d_array_index(self, level=rlevel):
# Ticket #714
np.zeros(10)[np.array(0)]
def test_floats_from_string(self, level=rlevel):
# Ticket #640, floats from string
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_nonnative_endian_fill(self, level=rlevel):
# Non-native endian arrays were incorrectly filled with scalars
# before r5034.
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self, level=rlevel):
# Test for ticket #551, changeset r5140
x = np.zeros((30, 40))
y = pickle.loads(pickle.dumps(x))
# y is now typically not aligned on a 8-byte boundary
z = np.ones((1, y.shape[0]))
# This shouldn't cause a segmentation fault:
np.dot(z, y)
def test_astype_copy(self, level=rlevel):
# Ticket #788, changeset r5155
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
if sys.version_info[0] >= 3:
f = open(filename, 'rb')
xp = pickle.load(f, encoding='latin1')
f.close()
else:
f = open(filename)
xp = pickle.load(f)
f.close()
xpd = xp.astype(np.float64)
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
def test_compress_small_type(self, level=rlevel):
# Ticket #789, changeset 5217.
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype=np.single)
try:
a.compress([True, False], axis=1, out=b)
raise AssertionError("compress with an out which cannot be "
"safely casted should not return "
"successfully")
except TypeError:
pass
def test_attributes(self, level=rlevel):
# Ticket #791
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
assert_(dat.info == 'jubba')
dat.resize((4, 2))
assert_(dat.info == 'jubba')
dat.sort()
assert_(dat.info == 'jubba')
dat.fill(2)
assert_(dat.info == 'jubba')
dat.put([2, 3, 4], [6, 3, 4])
assert_(dat.info == 'jubba')
dat.setfield(4, np.int32, 0)
assert_(dat.info == 'jubba')
dat.setflags()
assert_(dat.info == 'jubba')
assert_(dat.all(1).info == 'jubba')
assert_(dat.any(1).info == 'jubba')
assert_(dat.argmax(1).info == 'jubba')
assert_(dat.argmin(1).info == 'jubba')
assert_(dat.argsort(1).info == 'jubba')
assert_(dat.astype(TestArray).info == 'jubba')
assert_(dat.byteswap().info == 'jubba')
assert_(dat.clip(2, 7).info == 'jubba')
assert_(dat.compress([0, 1, 1]).info == 'jubba')
assert_(dat.conj().info == 'jubba')
assert_(dat.conjugate().info == 'jubba')
assert_(dat.copy().info == 'jubba')
dat2 = TestArray([2, 3, 1, 0], 'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert_(dat2.choose(choices).info == 'jubba')
assert_(dat.cumprod(1).info == 'jubba')
assert_(dat.cumsum(1).info == 'jubba')
assert_(dat.diagonal().info == 'jubba')
assert_(dat.flatten().info == 'jubba')
assert_(dat.getfield(np.int32, 0).info == 'jubba')
assert_(dat.imag.info == 'jubba')
assert_(dat.max(1).info == 'jubba')
assert_(dat.mean(1).info == 'jubba')
assert_(dat.min(1).info == 'jubba')
assert_(dat.newbyteorder().info == 'jubba')
assert_(dat.prod(1).info == 'jubba')
assert_(dat.ptp(1).info == 'jubba')
assert_(dat.ravel().info == 'jubba')
assert_(dat.real.info == 'jubba')
assert_(dat.repeat(2).info == 'jubba')
assert_(dat.reshape((2, 4)).info == 'jubba')
assert_(dat.round().info == 'jubba')
assert_(dat.squeeze().info == 'jubba')
assert_(dat.std(1).info == 'jubba')
assert_(dat.sum(1).info == 'jubba')
assert_(dat.swapaxes(0, 1).info == 'jubba')
assert_(dat.take([2, 3, 5]).info == 'jubba')
assert_(dat.transpose().info == 'jubba')
assert_(dat.T.info == 'jubba')
assert_(dat.var(1).info == 'jubba')
assert_(dat.view(TestArray).info == 'jubba')
# These methods do not preserve subclasses
assert_(type(dat.nonzero()[0]) is np.ndarray)
assert_(type(dat.nonzero()[1]) is np.ndarray)
def test_recarray_tolist(self, level=rlevel):
# Ticket #793, changeset r5215
# Comparisons fail for NaN, so we can't use random memory
# for the test.
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert_( a[0].tolist() == b[0])
assert_( a[1].tolist() == b[1])
def test_nonscalar_item_method(self):
# Make sure that .item() fails graciously when it should
a = np.arange(5)
assert_raises(ValueError, a.item)
def test_char_array_creation(self, level=rlevel):
a = np.array('123', dtype='c')
b = np.array(asbytes_nested(['1', '2', '3']))
assert_equal(a, b)
def test_unaligned_unicode_access(self, level=rlevel):
# Ticket #825
for i in range(1, 9):
msg = 'unicode offset: %d chars' % i
t = np.dtype([('a', 'S%d' % i), ('b', 'U2')])
x = np.array([(asbytes('a'), sixu('b'))], dtype=t)
if sys.version_info[0] >= 3:
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
def test_sign_for_complex_nan(self, level=rlevel):
# Ticket 794.
with np.errstate(invalid='ignore'):
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
def test_for_equal_names(self, level=rlevel):
# Ticket #674
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert_(a.dtype.names[0] == "notfoo")
assert_(a.dtype.names[1] == "bar")
def test_for_object_scalar_creation(self, level=rlevel):
# Ticket #816
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4, 5])
d = np.object_([None, {}, []])
assert_(a is None)
assert_(type(b) is int)
assert_(type(b2) is float)
assert_(type(c) is np.ndarray)
assert_(c.dtype == object)
assert_(d.dtype == object)
def test_array_resize_method_system_error(self):
# Ticket #840 - order should be an invalid keyword.
x = np.array([[0, 1], [2, 3]])
self.assertRaises(TypeError, x.resize, (2, 2), order='C')
def test_for_zero_length_in_choose(self, level=rlevel):
"Ticket #882"
a = np.array(1)
self.assertRaises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
def test_errobj_reference_leak(self, level=rlevel):
# Ticket #955
with np.errstate(all="ignore"):
z = int(0)
p = np.int32(-1)
gc.collect()
n_before = len(gc.get_objects())
z**p # this shouldn't leak a reference to errobj
gc.collect()
n_after = len(gc.get_objects())
assert_(n_before >= n_after, (n_before, n_after))
def test_void_scalar_with_titles(self, level=rlevel):
# No ticket
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert_(arr[0][0] == 'john')
assert_(arr[0][1] == 4)
def test_void_scalar_constructor(self):
#Issue #1550
#Create test string data, construct void scalar from data and assert
#that void scalar contains original data.
test_string = np.array("test")
test_string_void_scalar = np.core.multiarray.scalar(
np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes())
assert_(test_string_void_scalar.view(test_string.dtype) == test_string)
#Create record scalar, construct from data and assert that
#reconstructed scalar is correct.
test_record = np.ones((), "i,i")
test_record_void_scalar = np.core.multiarray.scalar(
test_record.dtype, test_record.tobytes())
assert_(test_record_void_scalar == test_record)
#Test pickle and unpickle of void and record scalars
assert_(pickle.loads(pickle.dumps(test_string)) == test_string)
assert_(pickle.loads(pickle.dumps(test_record)) == test_record)
def test_blasdot_uninitialized_memory(self):
# Ticket #950
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in range(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
y.resize((0, n))
# `dot` should just return zero (m,n) matrix
z = np.dot(x, y)
assert_(np.all(z == 0))
assert_(z.shape == (m, n))
def test_zeros(self):
# Regression test for #1061.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed dimension exceeded'
try:
np.empty(sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
# Regression test for #1062.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed size exceeded'
try:
np.arange(sz)
self.assertTrue(np.size == sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_fromiter_bytes(self):
# Ticket #1058
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_array_from_sequence_scalar_array(self):
# Ticket #1078: segfaults when creating an array with a sequence of
# 0d arrays.
a = np.array((np.ones(2), np.array(2)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
a = np.array(((1,), np.array(1)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
assert_equal(a[1], np.array(1))
def test_array_from_sequence_scalar_array2(self):
# Ticket #1081: weird array with strange input...
t = np.array([np.array([]), np.array(0, object)])
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
def test_array_too_big(self):
# Ticket #1080.
assert_raises(ValueError, np.zeros, [975]*7, np.int8)
assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
def test_dtype_keyerrors_(self):
# Ticket #1106.
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
# Ticket #1217, don't segfault.
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=np.int))
def test_object_array_to_fixed_string(self):
# Ticket #1235.
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
# Ticket #1240.
a = np.array([[sixu('abc'), sixu('\u03a3')],
[sixu('asdf'), sixu('erw')]],
dtype='U')
self.assertRaises(UnicodeEncodeError, np.array, a, 'S4')
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', sixu('123')])
assert_(a.itemsize == 16)
a = np.array([sixu('123'), '1234'])
assert_(a.itemsize == 16)
a = np.array(['1234', sixu('123'), '12345'])
assert_(a.itemsize == 20)
a = np.array([sixu('123'), '1234', sixu('12345')])
assert_(a.itemsize == 20)
a = np.array([sixu('123'), '1234', sixu('1234')])
assert_(a.itemsize == 16)
def test_misaligned_objects_segfault(self):
# Ticket #1198 and #1267
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
# Ticket #1267
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
# Ticket #1267
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
# Ticket #1259 and gh-441
for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:
z = np.array([2.2-1.1j], dtype)
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype.newbyteorder()))
else:
# big-endian machine
assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype))
# double check real and imaginary parts:
assert_equal(x.real, y.real.byteswap())
assert_equal(x.imag, y.imag.byteswap())
def test_structured_arrays_with_objects1(self):
# Ticket #1299
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(x[0, 1] == x[0, 0])
def test_structured_arrays_with_objects2(self):
# Ticket #1299 second test
stra = 'aaaa'
strb = 'bbbb'
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(sys.getrefcount(strb) == numb)
assert_(sys.getrefcount(stra) == numa + 2)
def test_duplicate_title_and_name(self):
# Ticket #1254
dtspec = [(('a', 'a'), 'i'), ('b', 'i')]
self.assertRaises(ValueError, np.dtype, dtspec)
def test_signed_integer_division_overflow(self):
# Ticket #1317.
def test_type(t):
min = np.array([np.iinfo(t).min])
min //= -1
with np.errstate(divide="ignore"):
for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
test_type(t)
def test_buffer_hashlib(self):
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
x = np.array([1, 2, 3], dtype=np.dtype('<i4'))
assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self, level=rlevel):
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
np.fromstring(asbytes('aa, aa, 1.0'), sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timedelta64))]
a = np.array([], dtypes[0])
failures = []
# ignore complex warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
class Subclass(np.ndarray):
def __new__(cls, i):
return np.ones((i,)).view(cls)
x = Subclass(5)
x[(0,)] = 2 # shouldn't raise an exception
assert_equal(x[0], 2)
def test_ufunc_no_unnecessary_views(self):
# ticket #1548
class Subclass(np.ndarray):
pass
x = np.array([1, 2, 3]).view(Subclass)
y = np.add(x, x, x)
assert_equal(id(x), id(y))
def test_take_refcount(self):
# ticket #939
a = np.arange(16, dtype=np.float)
a.shape = (4, 4)
lut = np.ones((5 + 3, 4), np.float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
lut.take(a, axis=0, mode='clip', out=rgba)
except TypeError:
pass
c2 = sys.getrefcount(rgba)
assert_equal(c1, c2)
def test_fromfile_tofile_seeks(self):
# On Python 3, tofile/fromfile used to get (#1610) the Python
# file handle out of sync
f0 = tempfile.NamedTemporaryFile()
f = f0.file
f.write(np.arange(255, dtype='u1').tobytes())
f.seek(20)
ret = np.fromfile(f, count=4, dtype='u1')
assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
assert_equal(f.tell(), 24)
f.seek(40)
np.array([1, 2, 3], dtype='u1').tofile(f)
assert_equal(f.tell(), 43)
f.seek(40)
data = f.read(3)
assert_equal(data, asbytes("\x01\x02\x03"))
f.seek(80)
f.read(4)
data = np.fromfile(f, dtype='u1', count=4)
assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
f.close()
def test_complex_scalar_warning(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
assert_equal(float(x), float(x.real))
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_equal(complex(x), 1+2j)
def test_complex_boolean_cast(self):
# Ticket #2218
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)
assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))
assert_(np.any(x))
assert_(np.all(x[1:]))
def test_uint_int_conversion(self):
x = 2**64 - 1
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
repr(ra) # should not cause a segmentation fault
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
def test_eq_string_and_object_array(self):
# From e-mail thread "__eq__ with str and object" (Keith Goodman)
a1 = np.array(['a', 'b'], dtype=object)
a2 = np.array(['a', 'c'])
assert_array_equal(a1 == a2, [True, False])
assert_array_equal(a2 == a1, [True, False])
def test_nonzero_byteswap(self):
a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
a.dtype = np.float32
assert_equal(a.nonzero()[0], [1])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
def test_find_common_type_boolean(self):
# Ticket #1695
assert_(np.find_common_type([], ['?', '?']) == '?')
def test_empty_mul(self):
a = np.array([1.])
a[1:1] *= 2
assert_equal(a, [1.])
def test_array_side_effect(self):
# The second use of itemsize was throwing an exception because in
# ctors.c, discover_itemsize was calling PyObject_Length without
# checking the return code. This failed to get the length of the
# number 2, and the exception hung around until something checked
# PyErr_Occurred() and returned an error.
assert_equal(np.dtype('S10').itemsize, 10)
np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
assert_equal(np.dtype('S10').itemsize, 10)
def test_any_float(self):
# all and any for floats
a = np.array([0.1, 0.9])
assert_(np.any(a))
assert_(np.all(a))
def test_large_float_sum(self):
a = np.arange(10000, dtype='f')
assert_equal(a.sum(dtype='d'), a.astype('d').sum())
def test_ufunc_casting_out(self):
a = np.array(1.0, dtype=np.float32)
b = np.array(1.0, dtype=np.float64)
c = np.array(1.0, dtype=np.float32)
np.add(a, b, out=c)
assert_equal(c, 2.0)
def test_array_scalar_contiguous(self):
# Array scalars are both C and Fortran contiguous
assert_(np.array(1.0).flags.c_contiguous)
assert_(np.array(1.0).flags.f_contiguous)
assert_(np.array(np.float32(1.0)).flags.c_contiguous)
assert_(np.array(np.float32(1.0)).flags.f_contiguous)
def test_squeeze_contiguous(self):
# Similar to GitHub issue #387
a = np.zeros((1, 2)).squeeze()
b = np.zeros((2, 2, 2), order='F')[:,:, ::2].squeeze()
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.f_contiguous)
def test_reduce_contiguous(self):
# GitHub issue #387
a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))
b = np.add.reduce(np.zeros((2, 1, 2)), 1)
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.c_contiguous)
def test_object_array_self_reference(self):
# Object arrays with references to themselves can cause problems
a = np.array(0, dtype=object)
a[()] = a
assert_raises(TypeError, int, a)
assert_raises(TypeError, long, a)
assert_raises(TypeError, float, a)
assert_raises(TypeError, oct, a)
assert_raises(TypeError, hex, a)
# Test the same for a circular reference.
b = np.array(a, dtype=object)
a[()] = b
assert_raises(TypeError, int, a)
# Numpy has no tp_traverse currently, so circular references
# cannot be detected. So resolve it:
a[()] = 0
# This was causing a to become like the above
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
def test_object_array_self_copy(self):
# An object array being copied into itself DECREF'ed before INCREF'ing
# causing segmentation faults (gh-3787)
a = np.array(object(), dtype=object)
np.copyto(a, a)
assert_equal(sys.getrefcount(a[()]), 2)
a[()].__class__ # will segfault if object was deleted
def test_zerosize_accumulate(self):
"Ticket #1733"
x = np.array([[42, 0]], dtype=np.uint32)
assert_equal(np.add.accumulate(x[:-1, 0]), [])
def test_objectarray_setfield(self):
# Setfield should not overwrite Object fields with non-Object data
x = np.array([1, 2, 3], dtype=object)
assert_raises(TypeError, x.setfield, 4, np.int32, 0)
def test_setting_rank0_string(self):
"Ticket #1736"
s1 = asbytes("hello1")
s2 = asbytes("hello2")
a = np.zeros((), dtype="S10")
a[()] = s1
assert_equal(a, np.array(s1))
a[()] = np.array(s2)
assert_equal(a, np.array(s2))
a = np.zeros((), dtype='f4')
a[()] = 3
assert_equal(a, np.array(3))
a[()] = np.array(4)
assert_equal(a, np.array(4))
def test_string_astype(self):
"Ticket #1748"
s1 = asbytes('black')
s2 = asbytes('white')
s3 = asbytes('other')
a = np.array([[s1], [s2], [s3]])
assert_equal(a.dtype, np.dtype('S5'))
b = a.astype(np.dtype('S0'))
assert_equal(b.dtype, np.dtype('S5'))
def test_ticket_1756(self):
# Ticket #1756
s = asbytes('0123456789abcdef')
a = np.array([s]*5)
for i in range(1, 17):
a1 = np.array(a, "|S%d" % i)
a2 = np.array([s[:i]]*5)
assert_equal(a1, a2)
def test_fields_strides(self):
"Ticket #1760"
r = np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
def test_alignment_update(self):
# Check that alignment flag is updated on stride setting
a = np.arange(10)
assert_(a.flags.aligned)
a.strides = 3
assert_(not a.flags.aligned)
def test_ticket_1770(self):
"Should not segfault on python 3k"
import numpy as np
try:
a = np.zeros((1,), dtype=[('f1', 'f')])
a['f1'] = 1
a['f2'] = 1
except ValueError:
pass
except:
raise AssertionError
def test_ticket_1608(self):
"x.flat shouldn't modify data"
x = np.array([[1, 2], [3, 4]]).T
np.array(x.flat)
assert_equal(x, [[1, 3], [2, 4]])
def test_pickle_string_overwrite(self):
import re
data = np.array([1], dtype='b')
blob = pickle.dumps(data, protocol=1)
data = pickle.loads(blob)
# Check that loads does not clobber interned strings
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
data[0] = 0xbb
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
def test_pickle_bytes_overwrite(self):
if sys.version_info[0] >= 3:
data = np.array([1], dtype='b')
data = pickle.loads(pickle.dumps(data))
data[0] = 0xdd
bytestring = "\x01 ".encode('ascii')
assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
def test_pickle_py2_array_latin1_hack(self):
# Check that unpickling hacks in Py3 that support
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(numpy.array([129], dtype='b'))
data = asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n"
"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n"
"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n"
"p13\ntp14\nb.")
if sys.version_info[0] >= 3:
# This should work:
result = pickle.loads(data, encoding='latin1')
assert_array_equal(result, np.array([129], dtype='b'))
# Should not segfault:
assert_raises(Exception, pickle.loads, data, encoding='koi8-r')
def test_pickle_py2_scalar_latin1_hack(self):
# Check that scalar unpickling hack in Py3 that supports
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(...)
datas = [
# (original, python2_pickle, koi8r_validity)
(np.unicode_('\u6bd2'),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n"
"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."),
'invalid'),
(np.float64(9e123),
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n"
"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n"
"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."),
'invalid'),
(np.bytes_(asbytes('\x9c')), # different 8-bit code point in KOI8-R vs latin1
asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n"
"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n"
"tp8\nRp9\n."),
'different'),
]
if sys.version_info[0] >= 3:
for original, data, koi8r_validity in datas:
result = pickle.loads(data, encoding='latin1')
assert_equal(result, original)
# Decoding under non-latin1 encoding (e.g.) KOI8-R can
# produce bad results, but should not segfault.
if koi8r_validity == 'different':
# Unicode code points happen to lie within latin1,
# but are different in koi8-r, resulting to silent
# bogus results
result = pickle.loads(data, encoding='koi8-r')
assert_(result != original)
elif koi8r_validity == 'invalid':
# Unicode code points outside latin1, so results
# to an encoding exception
assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')
else:
raise ValueError(koi8r_validity)
def test_structured_type_to_object(self):
a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')
a_obj = np.empty((2,), dtype=object)
a_obj[0] = (0, 1)
a_obj[1] = (3, 2)
# astype records -> object
assert_equal(a_rec.astype(object), a_obj)
# '=' records -> object
b = np.empty_like(a_obj)
b[...] = a_rec
assert_equal(b, a_obj)
# '=' object -> records
b = np.empty_like(a_rec)
b[...] = a_obj
assert_equal(b, a_rec)
def test_assign_obj_listoflists(self):
# Ticket # 1870
# The inner list should get assigned to the object elements
a = np.zeros(4, dtype=object)
b = a.copy()
a[0] = [1]
a[1] = [2]
a[2] = [3]
a[3] = [4]
b[...] = [[1], [2], [3], [4]]
assert_equal(a, b)
# The first dimension should get broadcast
a = np.zeros((2, 2), dtype=object)
a[...] = [[1, 2]]
assert_equal(a, [[1, 2], [1, 2]])
def test_memoryleak(self):
# Ticket #1917 - ensure that array data doesn't leak
for i in range(1000):
# 100MB times 1000 would give 100GB of memory usage if it leaks
a = np.empty((100000000,), dtype='i1')
del a
def test_ufunc_reduce_memoryleak(self):
a = np.arange(6)
acnt = sys.getrefcount(a)
np.add.reduce(a)
assert_equal(sys.getrefcount(a), acnt)
def test_search_sorted_invalid_arguments(self):
# Ticket #2021, should not segfault.
x = np.arange(0, 4, dtype='datetime64[D]')
assert_raises(TypeError, x.searchsorted, 1)
def test_string_truncation(self):
# Ticket #1990 - Data can be truncated in creation of an array from a
# mixed sequence of numeric values and strings
for val in [True, 1234, 123.4, complex(1, 234)]:
for tostr in [asunicode, asbytes]:
b = np.array([val, tostr('xx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xx'), val])
assert_equal(tostr(b[1]), tostr(val))
# test also with longer strings
b = np.array([val, tostr('xxxxxxxxxx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xxxxxxxxxx'), val])
assert_equal(tostr(b[1]), tostr(val))
def test_string_truncation_ucs2(self):
# Ticket #2081. Python compiled with two byte unicode
# can lead to truncation if itemsize is not properly
# adjusted for Numpy's four byte unicode.
if sys.version_info[0] >= 3:
a = np.array(['abcd'])
else:
a = np.array([sixu('abcd')])
assert_equal(a.dtype.itemsize, 16)
def test_unique_stable(self):
# Ticket #2063 must always choose stable sort for argsort to
# get consistent results
v = np.array(([0]*5 + [1]*6 + [2]*6)*4)
res = np.unique(v, return_index=True)
tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))
assert_equal(res, tgt)
def test_unicode_alloc_dealloc_match(self):
# Ticket #1578, the mismatch only showed up when running
# python-debug for python versions >= 2.7, and then as
# a core dump and error message.
a = np.array(['abc'], dtype=np.unicode)[0]
del a
def test_refcount_error_in_clip(self):
# Ticket #1588
a = np.zeros((2,), dtype='>i2').clip(min=0)
x = a + a
# This used to segfault:
y = str(x)
# Check the final string:
assert_(y == "[0 0]")
def test_searchsorted_wrong_dtype(self):
# Ticket #2189, it used to segfault, so we check that it raises the
# proper exception.
a = np.array([('a', 1)], dtype='S1, int')
assert_raises(TypeError, np.searchsorted, a, 1.2)
# Ticket #2066, similar problem:
dtype = np.format_parser(['i4', 'i4'], [], [])
a = np.recarray((2, ), dtype)
assert_raises(TypeError, np.searchsorted, a, 1)
def test_complex64_alignment(self):
# Issue gh-2668 (trac 2076), segfault on sparc due to misalignment
dtt = np.complex64
arr = np.arange(10, dtype=dtt)
# 2D array
arr2 = np.reshape(arr, (2, 5))
# Fortran write followed by (C or F) read caused bus error
data_str = arr2.tobytes('F')
data_back = np.ndarray(arr2.shape,
arr2.dtype,
buffer=data_str,
order='F')
assert_array_equal(arr2, data_back)
def test_structured_count_nonzero(self):
arr = np.array([0, 1]).astype('i4, (2)i4')[:1]
count = np.count_nonzero(arr)
assert_equal(count, 0)
def test_copymodule_preserves_f_contiguity(self):
a = np.empty((2, 2), order='F')
b = copy.copy(a)
c = copy.deepcopy(a)
assert_(b.flags.fortran)
assert_(b.flags.f_contiguous)
assert_(c.flags.fortran)
assert_(c.flags.f_contiguous)
def test_fortran_order_buffer(self):
import numpy as np
a = np.array([['Hello', 'Foob']], dtype='U5', order='F')
arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)
arr2 = np.array([[[sixu('H'), sixu('e'), sixu('l'), sixu('l'), sixu('o')],
[sixu('F'), sixu('o'), sixu('o'), sixu('b'), sixu('')]]])
assert_array_equal(arr, arr2)
def test_assign_from_sequence_error(self):
# Ticket #4024.
arr = np.array([1, 2, 3])
assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])
arr.__setitem__(slice(None), [9])
assert_equal(arr, [9, 9, 9])
def test_format_on_flex_array_element(self):
# Ticket #4369.
dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')])
arr = np.array([('2000-01-01', 1)], dt)
formatted = '{0}'.format(arr[0])
assert_equal(formatted, str(arr[0]))
def test_deepcopy_on_0d_array(self):
# Ticket #3311.
arr = np.array(3)
arr_cp = copy.deepcopy(arr)
assert_equal(arr, arr_cp)
assert_equal(arr.shape, arr_cp.shape)
assert_equal(int(arr), int(arr_cp))
self.assertTrue(arr is not arr_cp)
self.assertTrue(isinstance(arr_cp, type(arr)))
def test_bool_subscript_crash(self):
# gh-4494
c = np.rec.array([(1, 2, 3), (4, 5, 6)])
masked = c[np.array([True, False])]
base = masked.base
del masked, c
base.dtype
def test_richcompare_crash(self):
# gh-4613
import operator as op
# dummy class where __array__ throws exception
class Foo(object):
__array_priority__ = 1002
def __array__(self,*args,**kwargs):
raise Exception()
rhs = Foo()
lhs = np.array(1)
for f in [op.lt, op.le, op.gt, op.ge]:
if sys.version_info[0] >= 3:
assert_raises(TypeError, f, lhs, rhs)
else:
f(lhs, rhs)
assert_(not op.eq(lhs, rhs))
assert_(op.ne(lhs, rhs))
def test_richcompare_scalar_and_subclass(self):
# gh-4709
class Foo(np.ndarray):
def __eq__(self, other):
return "OK"
x = np.array([1,2,3]).view(Foo)
assert_equal(10 == x, "OK")
assert_equal(np.int32(10) == x, "OK")
assert_equal(np.array([10]) == x, "OK")
def test_pickle_empty_string(self):
# gh-3926
import pickle
test_string = np.string_('')
assert_equal(pickle.loads(pickle.dumps(test_string)), test_string)
def test_frompyfunc_many_args(self):
# gh-5672
def passer(*args):
pass
assert_raises(ValueError, np.frompyfunc, passer, 32, 1)
def test_repeat_broadcasting(self):
# gh-5743
a = np.arange(60).reshape(3, 4, 5)
for axis in chain(range(-a.ndim, a.ndim), [None]):
assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis))
def test_frompyfunc_nout_0(self):
# gh-2014
def f(x):
x[0], x[-1] = x[-1], x[0]
uf = np.frompyfunc(f, 1, 0)
a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]])
assert_equal(uf(a), ())
assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]])
def test_leak_in_structured_dtype_comparison(self):
# gh-6250
recordtype = np.dtype([('a', np.float64),
('b', np.int32),
('d', (np.str, 5))])
# Simple case
a = np.zeros(2, dtype=recordtype)
for i in range(100):
a == a
assert_(sys.getrefcount(a) < 10)
# The case in the bug report.
before = sys.getrefcount(a)
u, v = a[0], a[1]
u == v
del u, v
gc.collect()
after = sys.getrefcount(a)
assert_equal(before, after)
def test_empty_percentile(self):
# gh-6530 / gh-6553
assert_array_equal(np.percentile(np.arange(10), []), np.array([]))
def test_void_compare_segfault(self):
# gh-6922. The following should not segfault
a = np.ones(3, dtype=[('object', 'O'), ('int', '<i2')])
a.sort()
def test_reshape_size_overflow(self):
# gh-7455
a = np.ones(20)[::2]
if np.dtype(np.intp).itemsize == 8:
# 64 bit. The following are the prime factors of 2**63 + 5,
# plus a leading 2, so when multiplied together as int64,
# the result overflows to a total size of 10.
new_shape = (2, 13, 419, 691, 823, 2977518503)
else:
# 32 bit. The following are the prime factors of 2**31 + 5,
# plus a leading 2, so when multiplied together as int32,
# the result overflows to a total size of 10.
new_shape = (2, 7, 7, 43826197)
assert_raises(ValueError, a.reshape, new_shape)
if __name__ == "__main__":
run_module_suite()
| 36.115297 | 99 | 0.54927 |
ace2c26537d0a6b2331ac6da1e0b20bf08905b44 | 1,275 | py | Python | setup.py | nrhodes/coursera_autograder | 99d0b609a2ebd0384c4dd5dd29e4894d76e46e31 | [
"Apache-2.0"
] | 9 | 2020-12-04T06:08:14.000Z | 2022-01-18T09:39:54.000Z | setup.py | nrhodes/coursera_autograder | 99d0b609a2ebd0384c4dd5dd29e4894d76e46e31 | [
"Apache-2.0"
] | 20 | 2020-10-22T11:07:20.000Z | 2022-03-05T19:04:09.000Z | setup.py | nrhodes/coursera_autograder | 99d0b609a2ebd0384c4dd5dd29e4894d76e46e31 | [
"Apache-2.0"
] | 8 | 2020-10-22T11:04:08.000Z | 2022-01-21T16:34:29.000Z | from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='coursera_autograder',
version='0.1.2',
description='A toolkit to help develop asynchronous graders for Coursera\
based on docker images.',
long_description=readme(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.7',
],
keywords='grading programming coursera sdk docker cli tool',
url='https://github.com/coursera/coursera_autograder',
author='Joseph Li',
author_email='jli@coursera.org',
license='Apache',
entry_points={
'console_scripts': [
'coursera_autograder = coursera_autograder.main:main',
],
},
packages=['coursera_autograder', 'coursera_autograder.commands'],
install_requires=[
'dockerfile-parse>=0.0.6',
'docker-py>=1.10.4',
'requests>=2.9.2',
'requests-toolbelt>=0.7.1',
'semver>=2.7.5',
],
test_suite='nose.collector',
tests_require=['nose', 'nose-cover3'],
include_package_data=True,
zip_safe=False)
| 31.097561 | 79 | 0.598431 |
ace2c2a4a9546c013dd55f9cc22bd254dd579219 | 378 | py | Python | dpdk-nfs/nf/testbed/hard/util/read_pcvs.py | dslab-epfl/pix | bab9226ad307ec7f1f548e00f216de9c80b7be48 | [
"MIT"
] | 2 | 2022-03-08T16:10:29.000Z | 2022-03-24T09:13:26.000Z | dpdk-nfs/nf/testbed/hard/util/read_pcvs.py | dslab-epfl/pix | bab9226ad307ec7f1f548e00f216de9c80b7be48 | [
"MIT"
] | null | null | null | dpdk-nfs/nf/testbed/hard/util/read_pcvs.py | dslab-epfl/pix | bab9226ad307ec7f1f548e00f216de9c80b7be48 | [
"MIT"
] | null | null | null | import sys
ip_file = sys.argv[1]
op_file = sys.argv[2]
import numpy as np
def main():
numbers = list()
with open(ip_file, 'r') as f:
numbers = [int(line.strip()) for line in f]
with open(op_file,'w') as op:
numbers = np.array(numbers)
for i in range(101):
op.write("%d,%d\n" %(i,int(np.percentile(numbers,i))))
if __name__ == "__main__":
main() | 18.9 | 60 | 0.611111 |
ace2c2aa27270cf998aa89266178d75f4b1922b5 | 12,059 | py | Python | tests/sqlite/grammar/test_sqlite_select_grammar.py | mitchdennett/orm | 5ef3b93afc8ba044591b826d78cba5edfd44a868 | [
"MIT"
] | null | null | null | tests/sqlite/grammar/test_sqlite_select_grammar.py | mitchdennett/orm | 5ef3b93afc8ba044591b826d78cba5edfd44a868 | [
"MIT"
] | null | null | null | tests/sqlite/grammar/test_sqlite_select_grammar.py | mitchdennett/orm | 5ef3b93afc8ba044591b826d78cba5edfd44a868 | [
"MIT"
] | null | null | null | import inspect
import unittest
from src.masoniteorm.query.grammars import SQLiteGrammar
from src.masoniteorm.testing import BaseTestCaseSelectGrammar
class TestSQLiteGrammar(BaseTestCaseSelectGrammar, unittest.TestCase):
grammar = SQLiteGrammar
def can_compile_select(self):
"""
self.builder.to_sql()
"""
return """SELECT * FROM "users\""""
def can_compile_order_by_and_first(self):
"""
self.builder.order_by('id', 'asc').first()
"""
return """SELECT * FROM "users" ORDER BY "id" ASC LIMIT 1"""
def can_compile_with_columns(self):
"""
self.builder.select('username', 'password').to_sql()
"""
return """SELECT "users"."username", "users"."password" FROM "users\""""
def can_compile_with_where(self):
"""
self.builder.select('username', 'password').where('id', 1).to_sql()
"""
return """SELECT "users"."username", "users"."password" FROM "users" WHERE "users"."id" = '1'"""
def can_compile_with_several_where(self):
"""
self.builder.select('username', 'password').where('id', 1).where('username', 'joe').to_sql()
"""
return """SELECT "users"."username", "users"."password" FROM "users" WHERE "users"."id" = '1' AND "users"."username" = 'joe'"""
def can_compile_with_several_where_and_limit(self):
"""
self.builder.select('username', 'password').where('id', 1).where('username', 'joe').limit(10).to_sql()
"""
return """SELECT "users"."username", "users"."password" FROM "users" WHERE "users"."id" = '1' AND "users"."username" = 'joe' LIMIT 10"""
def can_compile_with_sum(self):
"""
self.builder.sum('age').to_sql()
"""
return """SELECT SUM("users"."age") AS age FROM "users\""""
def can_compile_with_max(self):
"""
self.builder.max('age').to_sql()
"""
return """SELECT MAX("users"."age") AS age FROM "users\""""
def can_compile_with_max_and_columns(self):
"""
self.builder.select('username').max('age').to_sql()
"""
return """SELECT "users"."username", MAX("users"."age") AS age FROM "users\""""
def can_compile_with_max_and_columns_different_order(self):
"""
self.builder.max('age').select('username').to_sql()
"""
return """SELECT "users"."username", MAX("users"."age") AS age FROM "users\""""
def can_compile_with_order_by(self):
"""
self.builder.select('username').order_by('age', 'desc').to_sql()
"""
return """SELECT "users"."username" FROM "users" ORDER BY "age" DESC"""
def can_compile_with_multiple_order_by(self):
"""
self.builder.select('username').order_by('age', 'desc').order_by('name').to_sql()
"""
return (
"""SELECT "users"."username" FROM "users" ORDER BY "age" DESC, "name" ASC"""
)
def can_compile_with_group_by(self):
"""
self.builder.select('username').group_by('age').to_sql()
"""
return """SELECT "users"."username" FROM "users" GROUP BY "users"."age\""""
def can_compile_where_in(self):
"""
self.builder.select('username').where_in('age', [1,2,3]).to_sql()
"""
return """SELECT "users"."username" FROM "users" WHERE "users"."age" IN ('1','2','3')"""
def can_compile_where_in_empty(self):
"""
self.builder.where_in('age', []).to_sql()
"""
return """SELECT * FROM "users" WHERE 0 = 1"""
def can_compile_where_not_in(self):
"""
self.builder.select('username').where_not_in('age', [1,2,3]).to_sql()
"""
return """SELECT "users"."username" FROM "users" WHERE "users"."age" NOT IN ('1','2','3')"""
def can_compile_where_null(self):
"""
self.builder.select('username').where_null('age').to_sql()
"""
return """SELECT "users"."username" FROM "users" WHERE "users"."age" IS NULL"""
def can_compile_where_not_null(self):
"""
self.builder.select('username').where_not_null('age').to_sql()
"""
return (
"""SELECT "users"."username" FROM "users" WHERE "users"."age" IS NOT NULL"""
)
def can_compile_where_raw(self):
"""
self.builder.where_raw(""age" = '18'").to_sql()
"""
return """SELECT * FROM "users" WHERE "users"."age" = '18'"""
def can_compile_select_raw(self):
"""
self.builder.select_raw("COUNT(*)").to_sql()
"""
return """SELECT COUNT(*) FROM "users\""""
def can_compile_limit_and_offset(self):
"""
self.builder.limit(10).offset(10).to_sql()
"""
return """SELECT * FROM "users" LIMIT 10 OFFSET 10"""
def can_compile_select_raw_with_select(self):
"""
self.builder.select('id').select_raw("COUNT(*)").to_sql()
"""
return """SELECT "users"."id", COUNT(*) FROM "users\""""
def can_compile_count(self):
"""
self.builder.count().to_sql()
"""
return """SELECT COUNT(*) AS m_count_reserved FROM "users\""""
def can_compile_count_column(self):
"""
self.builder.count().to_sql()
"""
return """SELECT COUNT("users"."money") AS money FROM "users\""""
def can_compile_where_column(self):
"""
self.builder.where_column('name', 'email').to_sql()
"""
return """SELECT * FROM "users" WHERE "users"."name" = "users"."email\""""
def can_compile_or_where(self):
"""
self.builder.where('name', 2).or_where('name', 3).to_sql()
"""
return """SELECT * FROM "users" WHERE "users"."name" = '2' OR "users"."name" = '3'"""
def can_grouped_where(self):
"""
self.builder.where(lambda query: query.where('age', 2).where('name', 'Joe')).to_sql()
"""
return """SELECT * FROM "users" WHERE ("users"."age" = '2' AND "users"."name" = 'Joe')"""
def can_compile_sub_select(self):
"""
self.builder.where_in('name',
QueryBuilder(GrammarFactory.make(self.grammar), table='users').select('age')
).to_sql()
"""
return """SELECT * FROM "users" WHERE "users"."name" IN (SELECT "users"."age" FROM "users")"""
def can_compile_sub_select_value(self):
"""
self.builder.where('name',
self.builder.new().sum('age')
).to_sql()
"""
return """SELECT * FROM "users" WHERE "users"."name" = (SELECT SUM("users"."age") AS age FROM "users")"""
def can_compile_complex_sub_select(self):
"""
self.builder.where_in('name',
(QueryBuilder(GrammarFactory.make(self.grammar), table='users')
.select('age').where_in('email',
QueryBuilder(GrammarFactory.make(self.grammar), table='users').select('email')
))
).to_sql()
"""
return """SELECT * FROM "users" WHERE "users"."name" IN (SELECT "users"."age" FROM "users" WHERE "users"."email" IN (SELECT "users"."email" FROM "users"))"""
def can_compile_exists(self):
"""
self.builder.select('age').where_exists(
self.builder.new().select('username').where('age', 12)
).to_sql()
"""
return """SELECT "users"."age" FROM "users" WHERE EXISTS (SELECT "users"."username" FROM "users" WHERE "users"."age" = '12')"""
def can_compile_having(self):
"""
builder.sum('age').group_by('age').having('age').to_sql()
"""
return """SELECT SUM("users"."age") AS age FROM "users" GROUP BY "users"."age" HAVING "users"."age\""""
def can_compile_having_with_expression(self):
"""
builder.sum('age').group_by('age').having('age', 10).to_sql()
"""
return """SELECT SUM("users"."age") AS age FROM "users" GROUP BY "users"."age" HAVING "users"."age" = '10'"""
def can_compile_having_with_greater_than_expression(self):
"""
builder.sum('age').group_by('age').having('age', '>', 10).to_sql()
"""
return """SELECT SUM("users"."age") AS age FROM "users" GROUP BY "users"."age" HAVING "users"."age" > '10'"""
def can_compile_join(self):
"""
builder.join('contacts', 'users.id', '=', 'contacts.user_id').to_sql()
"""
return """SELECT * FROM "users" INNER JOIN "contacts" ON "users"."id" = "contacts"."user_id\""""
def can_compile_left_join(self):
"""
builder.join('contacts', 'users.id', '=', 'contacts.user_id').to_sql()
"""
return """SELECT * FROM "users" LEFT JOIN "contacts" ON "users"."id" = "contacts"."user_id\""""
def can_compile_multiple_join(self):
"""
builder.join('contacts', 'users.id', '=', 'contacts.user_id').to_sql()
"""
return """SELECT * FROM "users" INNER JOIN "contacts" ON "users"."id" = "contacts"."user_id" INNER JOIN "posts" ON "comments"."post_id" = "posts"."id\""""
def can_compile_between(self):
"""
builder.between('age', 18, 21).to_sql()
"""
return """SELECT * FROM "users" WHERE "users"."age" BETWEEN '18' AND '21'"""
def can_compile_not_between(self):
"""
builder.not_between('age', 18, 21).to_sql()
"""
return """SELECT * FROM "users" WHERE "users"."age" NOT BETWEEN '18' AND '21'"""
def test_can_compile_where_raw(self):
to_sql = self.builder.where_raw(""" "age" = '18'""").to_sql()
self.assertEqual(to_sql, """SELECT * FROM "users" WHERE "age" = '18'""")
def test_can_compile_select_raw(self):
to_sql = self.builder.select_raw("COUNT(*)").to_sql()
self.assertEqual(to_sql, """SELECT COUNT(*) FROM "users\"""")
def test_can_compile_select_raw_with_select(self):
to_sql = self.builder.select("id").select_raw("COUNT(*)").to_sql()
self.assertEqual(to_sql, """SELECT "users"."id", COUNT(*) FROM "users\"""")
def can_compile_first_or_fail(self):
"""
builder = self.get_builder()
builder.where("is_admin", "=", True).first_or_fail()
"""
return """SELECT * FROM "users" WHERE "users"."is_admin" = '1' LIMIT 1"""
def where_not_like(self):
"""
builder = self.get_builder()
builder.where("age", "not like", "%name%").to_sql()
"""
return """SELECT * FROM "users" WHERE "users"."age" NOT LIKE '%name%'"""
def where_like(self):
"""
builder = self.get_builder()
builder.where("age", "not like", "%name%").to_sql()
"""
return """SELECT * FROM "users" WHERE "users"."age" LIKE '%name%'"""
def can_compile_join_clause(self):
"""
builder = self.get_builder()
builder.where("age", "not like", "%name%").to_sql()
"""
return """SELECT * FROM "users" INNER JOIN "report_groups" AS "rg" ON "bgt"."fund" = "rg"."fund" AND "bgt"."dept" = "rg"."dept" AND "bgt"."acct" = "rg"."acct" AND "bgt"."sub" = "rg"."sub\""""
def can_compile_join_clause_with_where(self):
"""
builder = self.get_builder()
builder.where("age", "not like", "%name%").to_sql()
"""
return """SELECT * FROM "users" INNER JOIN "report_groups" AS "rg" ON "bgt"."fund" = "rg"."fund" WHERE "bgt" = '1'"""
def can_compile_join_clause_with_null_where(self):
"""
builder = self.get_builder()
builder.where("age", "not like", "%name%").to_sql()
"""
return """SELECT * FROM "users" INNER JOIN "report_groups" AS "rg" ON "bgt"."fund" = "rg"."fund" WHERE "bgt" IS NULL"""
def can_compile_join_clause_with_lambda(self):
"""
builder = self.get_builder()
builder.where("age", "not like", "%name%").to_sql()
"""
return """SELECT * FROM "users" INNER JOIN "report_groups" AS "rg" ON "bgt"."fund" = "rg"."fund" WHERE "bgt" IS NULL"""
| 36.990798 | 199 | 0.562402 |
ace2c30a584ee6f94747c924720ec140d0b1b1fb | 3,010 | py | Python | Algorithms_medium/0388. Longest Absolute File Path.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | 4 | 2020-08-11T20:45:15.000Z | 2021-03-12T00:33:34.000Z | Algorithms_medium/0388. Longest Absolute File Path.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | null | null | null | Algorithms_medium/0388. Longest Absolute File Path.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | null | null | null | """
0388. Longest Absolute File Path
Medium
700
1665
Add to List
Share
Suppose we have a file system that stores both files and directories. An example of one system is represented in the following picture:
Here, we have dir as the only directory in the root. dir contains two subdirectories, subdir1 and subdir2. subdir1 contains a file file1.ext and subdirectory subsubdir1. subdir2 contains a subdirectory subsubdir2, which contains a file file2.ext.
In text form, it looks like this (with ⟶ representing the tab character):
dir
⟶ subdir1
⟶ ⟶ file1.ext
⟶ ⟶ subsubdir1
⟶ subdir2
⟶ ⟶ subsubdir2
⟶ ⟶ ⟶ file2.ext
If we were to write this representation in code, it will look like this: "dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext". Note that the '\n' and '\t' are the new-line and tab characters.
Every file and directory has a unique absolute path in the file system, which is the order of directories that must be opened to reach the file/directory itself, all concatenated by '/'s. Using the above example, the absolute path to file2.ext is "dir/subdir2/subsubdir2/file2.ext". Each directory name consists of letters, digits, and/or spaces. Each file name is of the form name.extension, where name and extension consist of letters, digits, and/or spaces.
Given a string input representing the file system in the explained format, return the length of the longest absolute path to a file in the abstracted file system. If there is no file in the system, return 0.
Example 1:
Input: input = "dir\n\tsubdir1\n\tsubdir2\n\t\tfile.ext"
Output: 20
Explanation: We have only one file, and the absolute path is "dir/subdir2/file.ext" of length 20.
Example 2:
Input: input = "dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext"
Output: 32
Explanation: We have two files:
"dir/subdir1/file1.ext" of length 21
"dir/subdir2/subsubdir2/file2.ext" of length 32.
We return 32 since it is the longest absolute path to a file.
Example 3:
Input: input = "a"
Output: 0
Explanation: We do not have any files, just a single directory named "a".
Example 4:
Input: input = "file1.txt\nfile2.txt\nlongfile.txt"
Output: 12
Explanation: There are 3 files at the root directory.
Since the absolute path for anything at the root directory is just the name itself, the answer is "longfile.txt" with length 12.
Constraints:
1 <= input.length <= 104
input may contain lowercase or uppercase English letters, a new line character '\n', a tab character '\t', a dot '.', a space ' ', and digits.
"""
class Solution:
def lengthLongestPath(self, input: str) -> int:
maxlen = 0
pathlen = {0: 0}
for line in input.splitlines():
name = line.lstrip('\t')
depth = len(line) - len(name)
if '.' in name:
maxlen = max(maxlen, pathlen[depth] + len(name))
else:
pathlen[depth + 1] = pathlen[depth] + len(name) + 1
return maxlen | 38.101266 | 460 | 0.719934 |
ace2c455a82048f20e220187c397b7e3e2db4dcc | 190 | py | Python | config.py | V-Sekai/webview_module | 3a1170edaa2fbf9c6e5cd75d341f1e89a6cfa414 | [
"MIT"
] | 39 | 2021-02-11T14:47:34.000Z | 2022-03-26T04:04:16.000Z | config.py | V-Sekai/webview_module | 3a1170edaa2fbf9c6e5cd75d341f1e89a6cfa414 | [
"MIT"
] | 2 | 2021-03-13T18:34:52.000Z | 2022-01-13T08:01:16.000Z | config.py | V-Sekai/webview_module | 3a1170edaa2fbf9c6e5cd75d341f1e89a6cfa414 | [
"MIT"
] | 8 | 2021-07-31T17:01:16.000Z | 2022-03-29T15:14:09.000Z | def can_build(env, platform):
return True
def get_doc_classes():
return [
"WebViewOverlay"
]
def get_doc_path():
return "doc_classes"
def configure(env):
pass
| 13.571429 | 29 | 0.642105 |
ace2c628725a73767eb6f2a1dc75e20451b3065c | 2,234 | py | Python | 26_create_bill_xml.py | b-itkin/CUSG-Digitization | 8d3f9d73c54687fe58e61383981867af58f55577 | [
"MIT"
] | 1 | 2021-11-16T17:37:31.000Z | 2021-11-16T17:37:31.000Z | 26_create_bill_xml.py | b-itkin/CUSG-Digitization | 8d3f9d73c54687fe58e61383981867af58f55577 | [
"MIT"
] | 2 | 2018-07-09T08:10:23.000Z | 2018-07-12T19:55:34.000Z | 26_create_bill_xml.py | b-itkin/CUSG-Digitization | 8d3f9d73c54687fe58e61383981867af58f55577 | [
"MIT"
] | 1 | 2021-11-16T17:37:34.000Z | 2021-11-16T17:37:34.000Z | import newbill
import sys
import re
from jinja2 import Environment, FileSystemLoader, select_autoescape
class TwentySixBill(newbill.Bill):
NEWENDBILLMATCHRE="This (Bill|Resolution|Legislation) takes effect upon passage"
TOPHEADERRE="26 EXECUTIVE COUNCIL (BILL|RESOLUTION) [0-9][0-9]*"
INTRODUCEDDATERE="[0-9][0-2]*\/[0-9][0-9]*\/[0-9][0-9]"
MONTHINTRODUCEDDATERE="(SEPTEMBER|OCTOBER|NOVEMBER|DECEMBER|JANUARY|FEBRUARY|MARCH|APRIL|MAY|JUNE|JULY|AUGUST)\s*[0-9][0-9]*,\s*198[0-9]"
def parseBillText(self):
self.endbillMatch=re.search(self.NEWENDBILLMATCHRE,self.inputStr,re.I)
self.billText=self.inputStr[self.beginbillMatch.start():self.endbillMatch.end()]
#self.addActions
def parseIntroducedDate(self):
try:
topHeaderMatch=re.search(self.TOPHEADERRE,self.inputStr,re.I)
introducedDateMatch=re.search(self.MONTHINTRODUCEDDATERE,self.inputStr,re.I)
self.introducedDate=self.inputStr[introducedDateMatch.start():introducedDateMatch.end()]
except:
print "Unable to parse Introduced Date\n"
f=open('txtfiles.txt','r')
mybill=None
legislations=[]
legislation_name=""
env=Environment(
loader=FileSystemLoader('./'),
autoescape=select_autoescape(['html','xml'])
)
template=env.get_template('legislation_template.html')
for line in f:
try:
mybill=TwentySixBill(line.strip())
mybill.completeParse()
mybill.createXML()
legislation_name=line.strip('.txt\n')+'.html'
with open(legislation_name,'w') as hf:
hf.write(template.render(billNum=line.strip('.txt\n')))
legislations.append(legislation_name)
except Exception as e:
print "error processing " + line + "\n" + str(e)
f.close()
template=env.get_template('legislation_webpage_template.html')
f=open('legislation_web.html','w+').write(template.render(legislation=legislations,session='26',legislationtype=mybill.billType))
#print "SPONSORS"
#print "--------"
#print mybill.sponsors
#print "AUTHORS"
#print "--------"
#print mybill.authors
#print "Bill History"
#print "--------"
#print mybill.billHistoryString
#print "Bill Summary"
#print "--------"
#print mybill.billSummaryString
#print "Bill Text"
#print "--------"
#print mybill.billText
#print "Bill Introduced Date"
#print "--------"
#print mybill.introducedDate
#mybill.createXML()
| 33.343284 | 138 | 0.744405 |
ace2c6ff4ee3592c266bfa8180f6e90909b20db4 | 3,287 | py | Python | backend/github_repo_monitor/settings/production.py | rvlb/github-repo-monitor | 480d44c1fe59f52af9f12138e947daea75bd71e2 | [
"MIT"
] | null | null | null | backend/github_repo_monitor/settings/production.py | rvlb/github-repo-monitor | 480d44c1fe59f52af9f12138e947daea75bd71e2 | [
"MIT"
] | 10 | 2021-03-09T23:58:33.000Z | 2022-02-26T20:28:56.000Z | backend/github_repo_monitor/settings/production.py | rvlb/github-repo-monitor | 480d44c1fe59f52af9f12138e947daea75bd71e2 | [
"MIT"
] | null | null | null | import sentry_sdk
from decouple import Csv, config
from dj_database_url import parse as db_url
from sentry_sdk.integrations.django import DjangoIntegration
from .base import * # noqa
DEBUG = False
DATABASES = {
'default': config('DATABASE_URL', cast=db_url),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
STATIC_ROOT = base_dir_join('staticfiles')
STATIC_URL = '/static/'
MEDIA_ROOT = base_dir_join('mediafiles')
MEDIA_URL = '/media/'
SERVER_EMAIL = 'foo@example.com'
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = config('SENDGRID_USERNAME')
EMAIL_HOST_PASSWORD = config('SENDGRID_PASSWORD')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Security
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 3600
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
# Webpack
WEBPACK_LOADER['DEFAULT']['CACHE'] = True
# Celery
CELERY_BROKER_URL = config('REDIS_URL')
CELERY_RESULT_BACKEND = config('REDIS_URL')
CELERY_SEND_TASK_ERROR_EMAILS = False
# Whitenoise
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MIDDLEWARE.insert( # insert WhiteNoiseMiddleware right after SecurityMiddleware
MIDDLEWARE.index('django.middleware.security.SecurityMiddleware') + 1,
'whitenoise.middleware.WhiteNoiseMiddleware')
# django-log-request-id
MIDDLEWARE.insert( # insert RequestIDMiddleware on the top
0, 'log_request_id.middleware.RequestIDMiddleware')
LOG_REQUEST_ID_HEADER = 'HTTP_X_REQUEST_ID'
LOG_REQUESTS = True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'request_id': {
'()': 'log_request_id.filters.RequestIDFilter'
},
},
'formatters': {
'standard': {
'format': '%(levelname)-8s [%(asctime)s] [%(request_id)s] %(name)s: %(message)s'
},
},
'handlers': {
'null': {
'class': 'logging.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'filters': ['require_debug_false'],
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'filters': ['request_id'],
'formatter': 'standard',
},
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'INFO'
},
'django.security.DisallowedHost': {
'handlers': ['null'],
'propagate': False,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'log_request_id.middleware': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
}
}
JS_REVERSE_EXCLUDE_NAMESPACES = ['admin']
# Sentry
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[DjangoIntegration()],
release=COMMIT_SHA
)
| 25.679688 | 92 | 0.63523 |
ace2c87404fca6c13b52800dfec9e868ab340de2 | 25,086 | py | Python | DefaultModules.py | JKKwasTaken/meerk40t | e3ee5d0bf1610e7e0ad8635d3a76ea787353229a | [
"MIT"
] | null | null | null | DefaultModules.py | JKKwasTaken/meerk40t | e3ee5d0bf1610e7e0ad8635d3a76ea787353229a | [
"MIT"
] | null | null | null | DefaultModules.py | JKKwasTaken/meerk40t | e3ee5d0bf1610e7e0ad8635d3a76ea787353229a | [
"MIT"
] | null | null | null | import os
from base64 import b64encode
from io import BytesIO
from xml.etree.cElementTree import Element, ElementTree, SubElement
from LaserOperation import LaserOperation
from svgelements import *
MILS_PER_MM = 39.3701
class SVGWriter:
@staticmethod
def save_types():
yield "Scalable Vector Graphics", "svg", "image/svg+xml"
@staticmethod
def versions():
yield 'default'
@staticmethod
def save(device, f, version='default'):
root = Element(SVG_NAME_TAG)
root.set(SVG_ATTR_VERSION, SVG_VALUE_VERSION)
root.set(SVG_ATTR_XMLNS, SVG_VALUE_XMLNS)
root.set(SVG_ATTR_XMLNS_LINK, SVG_VALUE_XLINK)
root.set(SVG_ATTR_XMLNS_EV, SVG_VALUE_XMLNS_EV)
root.set("xmlns:meerK40t", "https://github.com/meerk40t/meerk40t/wiki/Namespace")
# Native unit is mils, these must convert to mm and to px
mils_per_mm = 39.3701
mils_per_px = 1000.0 / 96.0
px_per_mils = 96.0 / 1000.0
device.setting(int, "bed_width", 310)
device.setting(int, "bed_height", 210)
mm_width = device.bed_width
mm_height = device.bed_height
root.set(SVG_ATTR_WIDTH, '%fmm' % mm_width)
root.set(SVG_ATTR_HEIGHT, '%fmm' % mm_height)
px_width = mm_width * mils_per_mm * px_per_mils
px_height = mm_height * mils_per_mm * px_per_mils
viewbox = '%d %d %d %d' % (0, 0, round(px_width), round(px_height))
scale = 'scale(%f)' % px_per_mils
root.set(SVG_ATTR_VIEWBOX, viewbox)
elements = device.elements
for operation in elements.ops():
subelement = SubElement(root, "operation")
c = getattr(operation, 'color')
if c is not None:
subelement.set('color', str(c))
for key in dir(operation):
if key.startswith('_'):
continue
value = getattr(operation, key)
if type(value) not in (int, float, str, bool):
continue
subelement.set(key, str(value))
if elements.note is not None:
subelement = SubElement(root, "note")
subelement.set(SVG_TAG_TEXT, elements.note)
for element in elements.elems():
if isinstance(element, Path):
element = abs(element)
subelement = SubElement(root, SVG_TAG_PATH)
subelement.set(SVG_ATTR_DATA, element.d(transformed=False))
subelement.set(SVG_ATTR_TRANSFORM, scale)
if element.values is not None:
for key, val in element.values.items():
if key in ('speed', 'overscan', 'power', 'passes',
'raster_direction', 'raster_step', 'd_ratio'):
subelement.set(key, str(val))
elif isinstance(element, SVGText):
subelement = SubElement(root, SVG_TAG_TEXT)
subelement.text = element.text
t = Matrix(element.transform)
t *= scale
subelement.set('transform', 'matrix(%f, %f, %f, %f, %f, %f)' % (t.a, t.b, t.c, t.d, t.e, t.f))
if element.values is not None:
for key, val in element.values.items():
if key in ('speed', 'overscan', 'power', 'passes',
'raster_direction', 'raster_step', 'd_ratio',
'font-family', 'font_face', 'font-size', 'font-weight',
'anchor', 'x', 'y'):
subelement.set(key, str(val))
else: # Image.
subelement = SubElement(root, SVG_TAG_IMAGE)
stream = BytesIO()
element.image.save(stream, format='PNG')
png = b64encode(stream.getvalue()).decode('utf8')
subelement.set('xlink:href', "data:image/png;base64,%s" % (png))
subelement.set(SVG_ATTR_X, '0')
subelement.set(SVG_ATTR_Y, '0')
subelement.set(SVG_ATTR_WIDTH, str(element.image.width))
subelement.set(SVG_ATTR_HEIGHT, str(element.image.height))
subelement.set(SVG_ATTR_TRANSFORM, scale)
t = Matrix(element.transform)
t *= scale
subelement.set('transform', 'matrix(%f, %f, %f, %f, %f, %f)' % (t.a, t.b, t.c, t.d, t.e, t.f))
if element.values is not None:
for key, val in element.values.items():
if key in ('speed', 'overscan', 'power', 'passes',
'raster_direction', 'raster_step', 'd_ratio'):
subelement.set(key, str(val))
stroke = element.stroke
if stroke is not None:
stroke_opacity = stroke.opacity
stroke = str(abs(stroke)) if stroke is not None and stroke.value is not None else SVG_VALUE_NONE
subelement.set(SVG_ATTR_STROKE, stroke)
if stroke_opacity != 1.0 and stroke_opacity is not None:
subelement.set(SVG_ATTR_STROKE_OPACITY, str(stroke_opacity))
try:
stroke_width = str(element.stroke_width) if element.stroke_width is not None else SVG_VALUE_NONE
subelement.set(SVG_ATTR_STROKE_WIDTH, stroke_width)
except AttributeError:
pass
fill = element.fill
if fill is not None:
fill_opacity = fill.opacity
fill = str(abs(fill)) if fill is not None and fill.value is not None else SVG_VALUE_NONE
subelement.set(SVG_ATTR_FILL, fill)
if fill_opacity != 1.0 and fill_opacity is not None:
subelement.set(SVG_ATTR_FILL_OPACITY, str(fill_opacity))
if element.id is not None:
subelement.set(SVG_ATTR_ID, str(element.id))
tree = ElementTree(root)
tree.write(f)
class SVGLoader:
@staticmethod
def load_types():
yield "Scalable Vector Graphics", ("svg",), "image/svg+xml"
@staticmethod
def load(kernel, pathname, **kwargs):
kernel.setting(int, "bed_width", 310)
kernel.setting(int, "bed_height", 210)
elements = []
if 'svg_ppi' in kwargs:
ppi = float(kwargs['svg_ppi'])
else:
ppi = 96.0
if ppi == 0:
ppi = 96.0
basename = os.path.basename(pathname)
scale_factor = 1000.0 / ppi
svg = SVG.parse(source=pathname,
width='%fmm' % (kernel.bed_width),
height='%fmm' % (kernel.bed_height),
reify=False,
ppi=ppi,
color='none',
transform='scale(%f)' % scale_factor)
ops = None
note = None
for element in svg.elements():
try:
if element.values['visibility'] == 'hidden':
continue
except KeyError:
pass
except AttributeError:
pass
if isinstance(element, SVGText):
if element.text is not None:
elements.append(element)
elif isinstance(element, Path):
if len(element) != 0:
element.reify()
elements.append(element)
elif isinstance(element, Shape):
e = Path(element)
e.reify() # In some cases the shape could not have reified, the path must.
if len(e) != 0:
elements.append(e)
elif isinstance(element, SVGImage):
try:
element.load(os.path.dirname(pathname))
if element.image is not None:
elements.append(element)
except OSError:
pass
elif isinstance(element, SVG):
continue
elif isinstance(element, Group):
continue
elif isinstance(element, SVGElement):
try:
if str(element.values[SVG_ATTR_TAG]).lower() == 'note':
try:
note = element.values[SVG_TAG_TEXT]
except KeyError:
pass
except KeyError:
pass
try:
if str(element.values[SVG_ATTR_TAG]).lower() == 'operation':
op = LaserOperation()
for key in dir(op):
if key.startswith('_'):
continue
v = getattr(op, key)
if key in element.values:
type_v = type(v)
if type_v in (str, int, float, Color):
try:
setattr(op, key, type_v(element.values[key]))
except (ValueError, KeyError):
pass
elif type_v == bool:
setattr(op, key, str(element.values[key]).lower() in ("true", "1"))
if ops is None:
ops = list()
ops.append(op)
except KeyError:
pass
return elements, ops, note, pathname, basename
class ImageLoader:
@staticmethod
def load_types():
yield "Portable Network Graphics", ("png",), "image/png"
yield "Bitmap Graphics", ("bmp",), "image/bmp"
yield "EPS Format", ("eps",), "image/eps"
yield "GIF Format", ("gif",), "image/gif"
yield "Icon Format", ("ico",), "image/ico"
yield "JPEG Format", ("jpg", "jpeg", "jpe"), "image/jpeg"
yield "Webp Format", ("webp",), "image/webp"
@staticmethod
def load(kernel, pathname, **kwargs):
basename = os.path.basename(pathname)
image = SVGImage({'href': pathname, 'width': "100%", 'height': "100%", 'id': basename})
image.load()
try:
kernel.setting(bool, 'image_dpi', True)
if kernel.image_dpi:
dpi = image.image.info['dpi']
if isinstance(dpi, tuple):
image *= 'scale(%f,%f)' % (1000.0 / dpi[0], 1000.0 / dpi[1])
except (KeyError, IndexError, AttributeError):
pass
return [image], None, None, pathname, basename
class DxfLoader:
@staticmethod
def load_types():
yield "Drawing Exchange Format", ("dxf",), "image/vnd.dxf"
@staticmethod
def load(kernel, pathname, **kwargs):
""""
Load dxf content. Requires ezdxf which tends to also require Python 3.6 or greater.
Dxf data has an origin point located in the lower left corner. +y -> top
"""
kernel.setting(int, "bed_width", 310)
kernel.setting(int, "bed_height", 210)
import ezdxf
from ezdxf import units
from ezdxf.units import decode
basename = os.path.basename(pathname)
dxf = ezdxf.readfile(pathname)
elements = []
unit = dxf.header.get('$INSUNITS')
if unit is not None and unit != 0:
du = units.DrawingUnits(1000.0, unit='in')
scale = du.factor(decode(unit))
else:
scale = MILS_PER_MM
for entity in dxf.entities:
DxfLoader.entity_to_svg(elements, dxf, entity, scale, kernel.bed_height * MILS_PER_MM)
kernel.setting(bool, "dxf_center", True)
if kernel.dxf_center:
g = Group()
g.extend(elements)
bbox = g.bbox()
if bbox is not None:
bw = kernel.bed_width * MILS_PER_MM
bh = kernel.bed_height * MILS_PER_MM
bx = 0
by = 0
x = bbox[0]
y = bbox[1]
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
if w > bw or h > bh:
# Cannot fit to bed. Scale.
vb = Viewbox("%f %f %f %f" % (bx, by, bw, bh))
bb = Viewbox("%f %f %f %f" % (x, y, w, h),
preserve_aspect_ratio="xMidyMid")
matrix = bb.transform(vb)
for e in elements:
e *= matrix
elif x < bx or y < by or x + w > bw or y + h > bh:
# Is outside the bed but sized correctly, center
bcx = bw / 2.0
bcy = bh / 2.0
cx = (bbox[0] + bbox[2]) / 2.0
cy = (bbox[1] + bbox[3]) / 2.0
matrix = Matrix.translate(bcx - cx, bcy - cy)
for e in elements:
e *= matrix
# else, is within the bed dimensions correctly, change nothing.
for e in elements:
try:
e.reify()
except AttributeError:
pass
return elements, None, None, pathname, basename
@staticmethod
def entity_to_svg(elements, dxf, entity, scale, translate_y):
element = None
try:
entity.transform_to_wcs(entity.ocs())
except AttributeError:
pass
if entity.dxftype() == 'CIRCLE':
element = Circle(center=entity.dxf.center, r=entity.dxf.radius)
elif entity.dxftype() == 'ARC':
circ = Circle(center=entity.dxf.center,
r=entity.dxf.radius)
start_angle = Angle.degrees(entity.dxf.start_angle)
end_angle = Angle.degrees(entity.dxf.end_angle)
if end_angle < start_angle:
end_angle += Angle.turns(1)
element = Path(circ.arc_angle(start_angle,
end_angle))
elif entity.dxftype() == 'ELLIPSE':
# TODO: needs more math, axis is vector, ratio is to minor.
element = Ellipse(center=entity.dxf.center,
# major axis is vector
# ratio is the ratio of major to minor.
start_point=entity.start_point,
end_point=entity.end_point,
start_angle=entity.dxf.start_param,
end_angle=entity.dxf.end_param)
elif entity.dxftype() == 'LINE':
# https://ezdxf.readthedocs.io/en/stable/dxfentities/line.html
element = SimpleLine(x1=entity.dxf.start[0], y1=entity.dxf.start[1],
x2=entity.dxf.end[0], y2=entity.dxf.end[1])
elif entity.dxftype() == 'POLYLINE':
# https://ezdxf.readthedocs.io/en/stable/dxfentities/lwpolyline.html
if entity.is_2d_polyline:
if not entity.has_arc:
if entity.is_closed:
element = Polygon([(p[0], p[1]) for p in entity.points()])
else:
element = Polyline([(p[0], p[1]) for p in entity.points()])
else:
element = Path()
bulge = 0
for e in entity:
point = e.dxf.location
if bulge == 0:
element.line((point[0], point[1]))
else:
element += Arc(start=element.current_point,
end=(point[0], point[1]),
bulge=bulge)
bulge = e.dxf.bulge
if entity.is_closed:
if bulge == 0:
element.closed()
else:
element += Arc(start=element.current_point,
end=element.z_point,
bulge=bulge)
element.closed()
elif entity.dxftype() == 'LWPOLYLINE':
# https://ezdxf.readthedocs.io/en/stable/dxfentities/lwpolyline.html
if not entity.has_arc:
if entity.closed:
element = Polygon(*[(p[0], p[1]) for p in entity])
else:
element = Polyline(*[(p[0], p[1]) for p in entity])
else:
element = Path()
bulge = 0
for e in entity:
if bulge == 0:
element.line((e[0], e[1]))
else:
element += Arc(start=element.current_point,
end=(e[0], e[1]),
bulge=bulge)
bulge = e[4]
if entity.closed:
if bulge == 0:
element.closed()
else:
element += Arc(start=element.current_point,
end=element.z_point,
bulge=bulge)
element.closed()
elif entity.dxftype() == 'HATCH':
# https://ezdxf.readthedocs.io/en/stable/dxfentities/hatch.html
element = Path()
if entity.bgcolor is not None:
Path.fill = Color(entity.bgcolor)
for p in entity.paths:
if p.path_type_flags & 2:
for v in p.vertices:
element.line(v[0], v[1])
if p.is_closed:
element.closed()
else:
for e in p.edges:
if type(e) == "LineEdge":
# https://ezdxf.readthedocs.io/en/stable/dxfentities/hatch.html#ezdxf.entities.LineEdge
element.line(e.start, e.end)
elif type(e) == "ArcEdge":
# https://ezdxf.readthedocs.io/en/stable/dxfentities/hatch.html#ezdxf.entities.ArcEdge
circ = Circle(center=e.center,
radius=e.radius, )
element += circ.arc_angle(Angle.degrees(e.start_angle), Angle.degrees(e.end_angle))
elif type(e) == "EllipseEdge":
# https://ezdxf.readthedocs.io/en/stable/dxfentities/hatch.html#ezdxf.entities.EllipseEdge
element += Arc(radius=e.radius,
start_angle=Angle.degrees(e.start_angle),
end_angle=Angle.degrees(e.end_angle),
ccw=e.is_counter_clockwise)
elif type(e) == "SplineEdge":
# https://ezdxf.readthedocs.io/en/stable/dxfentities/hatch.html#ezdxf.entities.SplineEdge
if e.degree == 3:
for i in range(len(e.knot_values)):
control = e.control_values[i]
knot = e.knot_values[i]
element.quad(control, knot)
elif e.degree == 4:
for i in range(len(e.knot_values)):
control1 = e.control_values[2 * i]
control2 = e.control_values[2 * i + 1]
knot = e.knot_values[i]
element.cubic(control1, control2, knot)
else:
for i in range(len(e.knot_values)):
knot = e.knot_values[i]
element.line(knot)
elif entity.dxftype() == 'IMAGE':
bottom_left_position = entity.dxf.insert
size = entity.dxf.image_size
imagedef = entity.dxf.image_def_handle
if not isinstance(imagedef, str):
imagedef = imagedef.filename
element = SVGImage(href=imagedef,
x=bottom_left_position[0],
y=bottom_left_position[1] - size[1],
width=size[0],
height=size[1])
elif entity.dxftype() == 'MTEXT':
insert = entity.dxf.insert
element = SVGText(x=insert[0], y=insert[1], text=entity.text)
elif entity.dxftype() == 'TEXT':
insert = entity.dxf.insert
element = SVGText(x=insert[0], y=insert[1], text=entity.dxf.text)
elif entity.dxftype() == 'SOLID' or entity.dxftype() == 'TRACE':
# https://ezdxf.readthedocs.io/en/stable/dxfentities/solid.html
element = Path()
element.move((entity[0][0], entity[0][1]))
element.line((entity[1][0], entity[1][1]))
element.line((entity[2][0], entity[2][1]))
element.line((entity[3][0], entity[3][1]))
element.closed()
element.fill = Color('Black')
elif entity.dxftype() == 'SPLINE':
element = Path()
try:
for b in entity.construction_tool().bezier_decomposition():
if len(element) == 0:
element.move((b[0][0], b[0][1]))
element.cubic(
(b[1][0], b[1][1]),
(b[2][0], b[2][1]),
(b[3][0], b[3][1])
)
except (AttributeError, TypeError):
# Fallback for rational b-splines.
try:
for bezier in entity.construction_tool().cubic_bezier_approximation(4):
b = bezier.control_points
element.cubic(
(b[1][0], b[1][1]),
(b[2][0], b[2][1]),
(b[3][0], b[3][1]))
except (AttributeError, TypeError):
# Fallback for versions of EZDXF prior to 0.13
element.move(entity.control_points[0])
for i in range(1, entity.dxf.n_control_points):
element.line(entity.control_points[i])
if entity.closed:
element.closed()
elif entity.dxftype() == 'INSERT':
for e in entity.virtual_entities():
if e is None:
continue
DxfLoader.entity_to_svg(elements, dxf, e, scale, translate_y)
return
else:
return # Might be something unsupported.
from ezdxf.tools.rgb import DXF_DEFAULT_COLORS, int2rgb
if entity.rgb is not None:
if isinstance(entity.rgb, tuple):
element.stroke = Color(*entity.rgb)
else:
element.stroke = Color(entity.rgb)
else:
c = entity.dxf.color
if c == 256: # Bylayer.
if entity.dxf.layer in dxf.layers:
layer = dxf.layers.get(entity.dxf.layer)
c = layer.color
try:
if c == 7:
color = Color("black") # Color 7 is black on light backgrounds, light on black.
else:
color = Color(*int2rgb(DXF_DEFAULT_COLORS[c]))
except:
color = Color('black')
element.stroke = color
element.transform.post_scale(scale, -scale)
element.transform.post_translate_y(translate_y)
if isinstance(element, SVGText):
elements.append(element)
else:
element.values[SVG_ATTR_VECTOR_EFFECT] = SVG_VALUE_NON_SCALING_STROKE
path = abs(Path(element))
if len(path) != 0:
if not isinstance(path[0], Move):
path = Move(path.first_point) + path
elements.append(path)
| 45.777372 | 119 | 0.463326 |
ace2c9e8a3bee2673df6106a55f5c796ec35543b | 14,418 | py | Python | test/functional/abc-segwit-recovery-activation.py | hanchon/bitcoin-abc-insight | 86e0020874fcee4eaa45b728253a1087cbfc1c36 | [
"MIT"
] | null | null | null | test/functional/abc-segwit-recovery-activation.py | hanchon/bitcoin-abc-insight | 86e0020874fcee4eaa45b728253a1087cbfc1c36 | [
"MIT"
] | null | null | null | test/functional/abc-segwit-recovery-activation.py | hanchon/bitcoin-abc-insight | 86e0020874fcee4eaa45b728253a1087cbfc1c36 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017-2019 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
This test checks activation of the SCRIPT_ALLOW_SEGWIT_RECOVERY flag
"""
from test_framework.blocktools import (
create_block,
create_coinbase,
make_conform_to_ctor,
)
from test_framework.comptool import RejectResult, TestInstance, TestManager
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
msg_tx,
ToHex,
)
from test_framework.mininode import (
mininode_lock,
network_thread_start,
P2PInterface,
)
from test_framework.script import (
CScript,
hash160,
OP_EQUAL,
OP_HASH160,
OP_TRUE,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
sync_blocks,
)
# far into the future
GREAT_WALL_START_TIME = 2000000000
# First blocks (initial coinbases, pre-fork test blocks) happen 1 day before.
FIRST_BLOCK_TIME = GREAT_WALL_START_TIME - 86400
# Error due to non clean stack
CLEANSTACK_ERROR = b'non-mandatory-script-verify-flag (Script did not clean its stack)'
RPC_CLEANSTACK_ERROR = "64: " + \
CLEANSTACK_ERROR.decode("utf-8")
class PreviousSpendableOutput(object):
def __init__(self, tx=CTransaction(), n=-1):
self.tx = tx
self.n = n
class SegwitRecoveryActivationTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.block_heights = {}
self.tip = None
self.blocks = {}
# We have 2 nodes:
# 1) node_nonstd (nodes[0]) accepts non-standard txns. It's used to
# test the activation itself via TestManager.
# 2) node_std (nodes[1]) doesn't accept non-standard txns and
# doesn't have us whitelisted. It's used to test for bans, as we
# connect directly to it via mininode and send a segwit spending
# txn. This transaction is non-standard and, before activation,
# also invalid. We check, before and after activation, that
# sending this transaction doesn't result in a ban.
# Nodes are connected to each other, so node_std receives blocks and
# transactions that node_nonstd has accepted. Since we are checking
# that segwit spending txn are not resulting in bans, node_nonstd
# doesn't get banned when forwarding this kind of transactions to
# node_std.
self.extra_args = [['-whitelist=127.0.0.1',
"-acceptnonstdtxn",
"-greatwallactivationtime={}".format(
GREAT_WALL_START_TIME),
"-replayprotectionactivationtime={}".format(
2 * GREAT_WALL_START_TIME)],
["-acceptnonstdtxn=0",
"-greatwallactivationtime={}".format(
GREAT_WALL_START_TIME),
"-replayprotectionactivationtime={}".format(
2 * GREAT_WALL_START_TIME)]]
def run_test(self):
# Move the mocktime up to activation
for node in self.nodes:
node.setmocktime(GREAT_WALL_START_TIME)
test = TestManager(self, self.options.tmpdir)
# TestManager only connects to node_nonstd (nodes[0])
test.add_all_connections([self.nodes[0]])
# We connect directly to node_std (nodes[1])
self.nodes[1].add_p2p_connection(P2PInterface())
network_thread_start()
test.run()
def next_block(self, number):
if self.tip == None:
base_block_hash = self.genesis_hash
block_time = FIRST_BLOCK_TIME
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height)
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
# Do PoW, which is cheap on regnet
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
def get_tests(self):
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# shorthand
block = self.next_block
node_nonstd = self.nodes[0]
node_std = self.nodes[1]
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# returns a test case that asserts that the current tip was accepted
def accepted():
return TestInstance([[self.tip, True]])
# returns a test case that asserts that the current tip was rejected
def rejected(reject=None):
if reject is None:
return TestInstance([[self.tip, False]])
else:
return TestInstance([[self.tip, reject]])
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
# adds transactions to the block and updates state
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
block.vtx.extend(new_transactions)
old_sha256 = block.sha256
make_conform_to_ctor(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[
block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
# Returns 2 transactions:
# 1) txfund: create outputs in segwit addresses
# 2) txspend: spends outputs from segwit addresses
def create_segwit_fund_and_spend_tx(spend):
# To make sure we'll be able to recover coins sent to segwit addresses,
# we test using historical recoveries from btc.com:
# Spending from a P2SH-P2WPKH coin,
# txhash:a45698363249312f8d3d93676aa714be59b0bd758e62fa054fb1ea6218480691
redeem_script0 = bytearray.fromhex(
'0014fcf9969ce1c98a135ed293719721fb69f0b686cb')
# Spending from a P2SH-P2WSH coin,
# txhash:6b536caf727ccd02c395a1d00b752098ec96e8ec46c96bee8582be6b5060fa2f
redeem_script1 = bytearray.fromhex(
'0020fc8b08ed636cb23afcb425ff260b3abd03380a2333b54cfa5d51ac52d803baf4')
redeem_scripts = [redeem_script0, redeem_script1]
# Fund transaction to segwit addresses
txfund = CTransaction()
txfund.vin = [CTxIn(COutPoint(spend.tx.sha256, spend.n))]
amount = (50 * COIN - 1000) // len(redeem_scripts)
for redeem_script in redeem_scripts:
txfund.vout.append(
CTxOut(amount, CScript([OP_HASH160, hash160(redeem_script), OP_EQUAL])))
txfund.rehash()
# Segwit spending transaction
# We'll test if a node that checks for standardness accepts this
# txn. It should fail exclusively because of the restriction in
# the scriptSig (non clean stack..), so all other characteristcs
# must pass standardness checks. For this reason, we create
# standard P2SH outputs.
txspend = CTransaction()
for i in range(len(redeem_scripts)):
txspend.vin.append(
CTxIn(COutPoint(txfund.sha256, i), CScript([redeem_scripts[i]])))
txspend.vout = [CTxOut(50 * COIN - 2000,
CScript([OP_HASH160, hash160(CScript([OP_TRUE])), OP_EQUAL]))]
txspend.rehash()
return txfund, txspend
# Check we are not banned when sending a txn that node_nonstd rejects.
def check_for_no_ban_on_rejected_tx(tx, reject_code, reject_reason):
# Check that our connection to node_std is open
assert(node_std.p2p.state == 'connected')
# The P2PConnection stores a public counter for each message type
# and the last receive message of each type. We use this counter to
# identify that we received a new reject message.
with mininode_lock:
rejects_count = node_std.p2p.message_count['reject']
# Send the transaction directly. We use a ping for synchronization:
# if we have been banned, the pong message won't be received, a
# timeout occurs and the test fails.
node_std.p2p.send_message(msg_tx(tx))
node_std.p2p.sync_with_ping()
# Check we haven't been disconnected
assert(node_std.p2p.state == 'connected')
# Check the reject message matches what we expected
with mininode_lock:
assert(node_std.p2p.message_count['reject'] ==
rejects_count + 1)
reject_msg = node_std.p2p.last_message['reject']
assert(reject_msg.code == reject_code and
reject_msg.reason == reject_reason and
reject_msg.data == tx.sha256)
# Create a new block
block(0)
save_spendable_output()
yield accepted()
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(99):
block(5000 + i)
test.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
yield test
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(100):
out.append(get_spendable_output())
# Create segwit funding and spending transactions
txfund, txspend = create_segwit_fund_and_spend_tx(out[0])
# Create blocks to get closer to activate the fork.
# Mine txfund, as it can't go into node_std mempool because it's
# nonstandard.
b = block(5555)
b.nTime = GREAT_WALL_START_TIME - 1
update_block(5555, [txfund])
yield accepted()
for i in range(5):
block(5100 + i)
test.blocks_and_transactions.append([self.tip, True])
yield test
# Since the TestManager is not connected to node_std, we must check
# both nodes are synchronized before continuing.
sync_blocks(self.nodes)
# Check we are just before the activation time
assert_equal(node_nonstd.getblockheader(
node_nonstd.getbestblockhash())['mediantime'], GREAT_WALL_START_TIME - 1)
assert_equal(node_std.getblockheader(
node_std.getbestblockhash())['mediantime'], GREAT_WALL_START_TIME - 1)
# Before the fork, segwit spending txns are rejected.
assert_raises_rpc_error(-26, RPC_CLEANSTACK_ERROR,
node_nonstd.sendrawtransaction, ToHex(txspend))
assert_raises_rpc_error(-26, RPC_CLEANSTACK_ERROR,
node_std.sendrawtransaction, ToHex(txspend))
# Blocks containing segwit spending txns are rejected as well.
block(2)
update_block(2, [txspend])
yield rejected(RejectResult(16, b'blk-bad-inputs'))
# Rewind bad block
tip(5104)
# Check that non-upgraded nodes checking for standardness are not
# banning nodes sending segwit spending txns.
check_for_no_ban_on_rejected_tx(txspend, 64, CLEANSTACK_ERROR)
# Activate the fork in both nodes!
forkblock = block(5556)
yield accepted()
sync_blocks(self.nodes)
# Check we just activated the fork
assert_equal(node_nonstd.getblockheader(
node_nonstd.getbestblockhash())['mediantime'], GREAT_WALL_START_TIME)
assert_equal(node_std.getblockheader(
node_std.getbestblockhash())['mediantime'], GREAT_WALL_START_TIME)
# Segwit spending txns are accepted in the mempool of nodes not checking
# for standardness, but rejected in nodes that check.
node_nonstd.sendrawtransaction(ToHex(txspend))
assert(txspend.hash in node_nonstd.getrawmempool())
assert_raises_rpc_error(-26, RPC_CLEANSTACK_ERROR,
node_std.sendrawtransaction, ToHex(txspend))
# Check that upgraded nodes checking for standardness are not banning
# nodes sending segwit spending txns.
check_for_no_ban_on_rejected_tx(txspend, 64, CLEANSTACK_ERROR)
# Blocks containing segwit spending txns are now accepted in both
# nodes.
block(5)
postforkblock = update_block(5, [txspend])
yield accepted()
sync_blocks(self.nodes)
# Ok, now we check if a reorg work properly accross the activation.
node_nonstd.invalidateblock(postforkblock.hash)
assert(txspend.hash in node_nonstd.getrawmempool())
# Also check that nodes checking for standardness don't return a segwit
# spending txn into the mempool when disconnecting a block.
node_std.invalidateblock(postforkblock.hash)
assert(txspend.hash not in node_std.getrawmempool())
# Deactivate the fork. The spending tx has been evicted from the
# mempool
node_nonstd.invalidateblock(forkblock.hash)
assert(len(node_nonstd.getrawmempool()) == 0)
if __name__ == '__main__':
SegwitRecoveryActivationTest().main()
| 40.273743 | 97 | 0.633722 |
ace2ca75afbea8b05e919b5df630b9e2dc9cb779 | 7,833 | py | Python | alipay/gatewayinfo.py | amyhoo/django-oscar-alipay | eacad65416428c6e09974219487a206577d34312 | [
"BSD-3-Clause"
] | 59 | 2015-08-29T10:51:34.000Z | 2021-11-03T10:00:25.000Z | alipay/gatewayinfo.py | amyhoo/django-oscar-alipay | eacad65416428c6e09974219487a206577d34312 | [
"BSD-3-Clause"
] | 4 | 2015-09-02T08:55:42.000Z | 2017-05-15T10:33:00.000Z | alipay/gatewayinfo.py | amyhoo/django-oscar-alipay | eacad65416428c6e09974219487a206577d34312 | [
"BSD-3-Clause"
] | 22 | 2015-08-10T10:46:18.000Z | 2020-04-04T07:11:55.000Z | #######################################################################################
# 支付宝网关:
# 生成请求url,验证支付宝支付信息
#######################################################################################
import requests
import six
import time
from pytz import timezone
from hashlib import md5
from datetime import datetime
from xml.etree import ElementTree
from collections import OrderedDict
from .exceptions import MissingParameter
from .exceptions import ParameterValueError
from .exceptions import TokenAuthorizationError
from .conf import *
from django.http import HttpResponseRedirect, HttpResponse
import urllib
if six.PY3:
from urllib.parse import parse_qs, urlparse, unquote
else:
from urlparse import parse_qs, urlparse, unquote
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
def encode_dict(params):
return {k: six.u(v).encode('utf-8')
if isinstance(v, str) else v.encode('utf-8')
if isinstance(v, six.string_types) else v
for k, v in six.iteritems(params)}
class Alipay(object):
GATEWAY_URL = ALIPAY_GATEWAY
NOTIFY_GATEWAY_URL = NOTIFY_GATEWAY_URL
def __init__(self, **kwargs):
#pid=ALIPAY_PARTNER, key=ALIPAY_KEY, seller_email=ALIPAY_SELL_EMAIL, seller_id=ALIPAY_SELL_ID
names=['key','_input_charset','partner','payment_type','sign_type']
self._check_params(kwargs,names)
self.key = kwargs.pop('key')
self.pid = kwargs.get('partner')
self.default_params = kwargs
self.params=self.default_params.copy()#调用接口参数
#加密方式
self.sign_tuple = ('sign_type', kwargs.get('sign_type'), kwargs.get('sign_type'))
self.sign_key = False
if not {'seller_id','seller_account_name','seller_email'} & set(kwargs):
raise ParameterValueError("seller_id,seller_account_name,seller_email and must have one.")
def _generate_md5_sign(self, params):
#md5加密生成签名
src = '&'.join(['%s=%s' % (key, value) for key,
value in sorted(params.items())]) + self.key
return md5(src.encode('utf-8')).hexdigest()
def _check_params(self, params, names):
#检查params是否包含names中所有属性字符串
if not all(k in params for k in names):
raise MissingParameter('missing parameters')
return
def _build_sign_params(self,**params):
#对参数params进行编码
try:
if 'sign_type' in params:params.pop('sign_type')
if 'sign' in params:params.pop('sign')
except KeyError:
pass
signkey, signvalue, signdescription = self.sign_tuple
signmethod = getattr(self, '_generate_%s_sign' %
signdescription.lower())
if signmethod is None:
raise NotImplementedError("This type '%s' of sign is not implemented yet." %
signdescription)
if self.sign_key:
params.update({signkey: signvalue})
params.update({signkey: signvalue,'sign': signmethod(params)})
return params
def _build_url(self):
#对已经加密过的kwargs生成get的请求地址
params_signed=self._build_sign_params(**self.params)
return '%s?%s' % (self.GATEWAY_URL, urlencode(params_signed))
def _check_create_direct_pay_by_user(self, **kwargs):
'''即时到帐'''
self._check_params(kwargs, ['service','out_trade_no', 'subject'])
if not kwargs.get('total_fee') and \
not (kwargs.get('price') and kwargs.get('quantity')):
raise ParameterValueError('total_fee or (price && quantiry) must have one.')
return True
def _check_create_partner_trade_by_buyer(self, **kwargs):
'''担保交易'''
names = ['service','out_trade_no', 'subject', 'logistics_type',
'logistics_fee', 'logistics_payment', 'price', 'quantity']
self._check_params(kwargs, names)
if not {'notify_url','return_url'} & set(kwargs):
raise ParameterValueError("notify_url,return_url must have one.")
return True
def _check_send_goods_confirm_by_platform(self,**kwargs):
'''确认发货接口'''
self._check_params(kwargs,['service','trade_no','logistics_name','transport_type',])
return True
def _check_trade_create_by_buyer(self, **kwargs):
'''标准双接口'''
names = ['service','out_trade_no', 'subject', 'logistics_type',
'logistics_fee', 'logistics_payment', 'price', 'quantity']
self._check_params(kwargs, names)
return True
def _check_add_alipay_qrcode(self, **kwargs):
'''二维码管理 - 添加'''
self._check_params(kwargs, ['service','biz_data', 'biz_type'])
utcnow = datetime.utcnow()
shanghainow = timezone('Asia/Shanghai').fromutc(utcnow)
kwargs['method'] = 'add'
kwargs['timestamp'] = shanghainow.strftime('%Y-%m-%d %H:%M:%S')
return True
def add_alipay_qrcode(self, **kwargs):
if self._check_add_alipay_qrcode(kwargs):
return requests.get(self._build_url(**kwargs))
def get_sign_method(self, **kwargs):
signkey, signvalue, signdescription = self.sign_tuple
signmethod = getattr(self, '_generate_%s_sign' %
signdescription.lower())
if signmethod is None:
raise NotImplementedError("This type '%s' of sign is not implemented yet." %
signdescription)
return signmethod
def verify_notify(self, **kwargs):
#kwargs是支付宝返回信息,需要再次验证
sign = kwargs.pop('sign')
try:
kwargs.pop('sign_type')
except KeyError:
pass
params={key:kwargs[key][0] for key in kwargs}
if self._build_sign_params(**params).get('sign') == sign[0]:
return self.check_notify_remotely(**params)
else:
return False
def check_notify_remotely(self, **kwargs):
remote_result = requests.get(self.NOTIFY_GATEWAY_URL % (self.pid, kwargs['notify_id']),
headers={'connection': 'close'}).text
return remote_result == 'true'
def request_url(self,**kwargs):
self.params.update(kwargs)
service=self.params.get('service')
if service!='alipay.mobile.qrcode.manage':
check_method=getattr(self,'_check_'+service)
else:
check_method=self._check_add_alipay_qrcode
if check_method!=None:
check_method(**kwargs)
return self._build_url()
#根据参数请求不同的支付宝服务,在请求时候更新self.params,添加如 订单号,价格,物流等信息
def request(self,**kwargs):
self.params.update(kwargs)
if self.request_method=='get':
service=self.params.get('service')
if service!='alipay.mobile.qrcode.manage':
check_method=getattr(self,'_check_'+service)
else:
check_method=self._check_add_alipay_qrcode
if check_method!=None:
check_method(kwargs)
return HttpResponseRedirect(self._build_url())
else:
raise NotImplementedError("This type '%s' of sign is not implemented yet." %service)
else:#采用post方法
service=kwargs.get('service')
if service!='alipay.mobile.qrcode.manage':
check_method=getattr(self,'_check_'+service)
else:
check_method=self._check_add_alipay_qrcode
if check_method!=None:
check_method(kwargs)
data=urllib.request.urlopen(self.GATEWAY_URL,self._build_sign_params(self.params))
return HttpResponse(data)
else:
raise NotImplementedError("This type '%s' of sign is not implemented yet." %service)
| 38.970149 | 103 | 0.611515 |
ace2caf1a173204521237bfdfee6bedd6eb76df7 | 1,072 | py | Python | BGLL_tst.py | YanhuiJoe/SCAN-BGLL-community-detection | 699b8af9bc496a9afbfee57b4ce750a386896726 | [
"MIT"
] | 5 | 2018-08-30T02:38:56.000Z | 2019-03-06T06:03:35.000Z | BGLL_tst.py | YanhuiJoe/SCAN-BGLL-community-detection | 699b8af9bc496a9afbfee57b4ce750a386896726 | [
"MIT"
] | null | null | null | BGLL_tst.py | YanhuiJoe/SCAN-BGLL-community-detection | 699b8af9bc496a9afbfee57b4ce750a386896726 | [
"MIT"
] | 2 | 2020-07-24T06:34:42.000Z | 2021-02-13T02:40:48.000Z | import math
import unittest
import numpy as np
from BGLL import PyLouvain
from sklearn import metrics
def compute_NMI(labels_true, labels_pre):
labels_pre = np.array(labels_pre)
m = max(max(labels_pre[i]) for i in range(labels_pre.size))
# print(m)
labels_pre_ = [-1 for i in range(m + 1)]
# labels_true = [1 for i in range(m + 1)]
for i in range(labels_pre.size):
for j in labels_pre[i]:
labels_pre_[j] = i
print("Clustering : ", labels_pre_)
NMI = metrics.normalized_mutual_info_score(labels_true, labels_pre_)
return NMI
if __name__ == '__main__':
filename = 'data/karate.txt'
labels_true = [1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 1, 1, 2, 1, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
if filename.split('.')[-1] == 'gml':
pyl = PyLouvain.from_gml_file(filename)
else:
pyl = PyLouvain.from_file(filename)
partition, q = pyl.apply_method()
# print("Clustering : ")
# print(partition)
# for clu in partition:
# print(clu)
print("Modularity : ", q)
if labels_true:
NMI = compute_NMI(labels_true, partition)
print("NMI", NMI)
| 27.487179 | 117 | 0.670709 |
ace2cc5f99596557f4f17b8b14c59766af04712d | 831 | py | Python | imgur/store.py | suuuuumod/awmepost | b5e8699c552bb6c0e469fdf867a5bc48b637ce09 | [
"MIT"
] | 1 | 2021-03-25T09:06:15.000Z | 2021-03-25T09:06:15.000Z | imgur/store.py | suuuuumod/awmepost | b5e8699c552bb6c0e469fdf867a5bc48b637ce09 | [
"MIT"
] | null | null | null | imgur/store.py | suuuuumod/awmepost | b5e8699c552bb6c0e469fdf867a5bc48b637ce09 | [
"MIT"
] | null | null | null | from typing import List
from pymongo.errors import DuplicateKeyError
from core import store
class ImgurStore(store.MongoStore):
def __init__(self, name, url, clear_age):
super().__init__(name, url, clear_age)
self.tags = self.get_collection('tags')
self.tags.create_index('name', unique=True)
def get_tags(self) -> set:
tags = self.tags.find()
tags = {t['name'] for t in tags}
return tags
def add_tags(self, tags: List[str]):
for t in tags:
try:
self.tags.insert_one({
'name': t
})
except DuplicateKeyError:
pass
def remove_tags(self, tags: List[str]):
self.tags.delete_many({
'name': {
'$in': tags
}
})
| 24.441176 | 51 | 0.533093 |
ace2ccae095c98e988515f307c7fccf50d04f694 | 413 | py | Python | battle-city/code/bullet.py | mbdimitrova/battle-city | bd4f70d7f4c1d5cb41513ee44c1c03ef81786ab0 | [
"MIT"
] | 1 | 2015-04-27T23:27:52.000Z | 2015-04-27T23:27:52.000Z | battle-city/code/bullet.py | mbdimitrova/battle-city | bd4f70d7f4c1d5cb41513ee44c1c03ef81786ab0 | [
"MIT"
] | null | null | null | battle-city/code/bullet.py | mbdimitrova/battle-city | bd4f70d7f4c1d5cb41513ee44c1c03ef81786ab0 | [
"MIT"
] | null | null | null | from .sprite import *
from .map import *
class Bullet(Sprite):
"""Sprite for bullets"""
def __init__(self, position, direction):
sprite_cache = TileCache("bullet.png")
self.frames = sprite_cache["bullet.png"]
Sprite.__init__(self, position)
self.direction = direction
self.position = position
self.image = self.frames[DIRECTIONS.index(self.direction)][0]
| 29.5 | 69 | 0.656174 |
ace2ccbd51597a341778a1591afbb90a5d150710 | 1,156 | py | Python | white-head-mountain/pcdn/gethostList.py | jiangwenfan/pythonScripts | c9004944f162af575e111522f98d4de4f59885e6 | [
"Apache-2.0"
] | null | null | null | white-head-mountain/pcdn/gethostList.py | jiangwenfan/pythonScripts | c9004944f162af575e111522f98d4de4f59885e6 | [
"Apache-2.0"
] | null | null | null | white-head-mountain/pcdn/gethostList.py | jiangwenfan/pythonScripts | c9004944f162af575e111522f98d4de4f59885e6 | [
"Apache-2.0"
] | null | null | null | import requests
import json
#get host list
def get_ips(nodename):
"""
:param nodename: 节点名
:return: 主机列表
"""
url = "http://rexm.qingcdn.com/api?Content-Type=application/json&Cache-Control=no-cache&Postman-Token=6ed7d994-d154-f34a-2980-23e286bcbfb3"
payload = {
"token": "06e574386324b8fb9df399fa4de92d4a",
"method": "Svr.get",
"params": {"filter": ["fault_status=2101"],
"select": ["name", "logic_nics.ip", "logic_nics.bandwidth", "use_status",
"svcs.type_module"]}
}
node = "node.name=" + nodename
payload.get('params').get('filter').append(node)
headers = {
'Content-Type': 'text/plain'
}
response = requests.request("POST", url, headers=headers, data=json.dumps(payload))
iplist = sorted([ip.get('name') for ip in json.loads((response.text)).get('data')])
assert len(iplist) > 0, "没有获取到节点IP,请检查节点状态是否正常(确保节点内机器都为上线或者挂起状态,故障状态的机器获取不到IP)!"
return iplist
#demo
if __name__ == '__main__':
while True:
hostNameList = get_ips("联通-福建-泉州-小节点-2")
print(hostNameList)
| 35.030303 | 144 | 0.604671 |
ace2cd00d84e139b1eea10660b43bafca45a6f19 | 4,326 | py | Python | tods/tests/detection_algorithm/test_PyodVAE.py | ZhuangweiKang/tods | fe3f55f8ccb306dd292c668e0f1154f1afdfa556 | [
"Apache-2.0"
] | 544 | 2020-09-21T06:02:33.000Z | 2022-03-27T07:16:32.000Z | tods/tests/detection_algorithm/test_PyodVAE.py | ZhuangweiKang/tods | fe3f55f8ccb306dd292c668e0f1154f1afdfa556 | [
"Apache-2.0"
] | 35 | 2020-09-21T06:33:13.000Z | 2022-03-11T14:20:21.000Z | tods/tests/detection_algorithm/test_PyodVAE.py | ZhuangweiKang/tods | fe3f55f8ccb306dd292c668e0f1154f1afdfa556 | [
"Apache-2.0"
] | 86 | 2020-09-21T16:44:33.000Z | 2022-03-11T18:20:22.000Z | import unittest
from d3m import container, utils
from d3m.metadata import base as metadata_base
from d3m.container import DataFrame as d3m_dataframe
from tods.detection_algorithm.PyodVAE import VariationalAutoEncoderPrimitive
from pyod.utils.data import generate_data
from tods.detection_algorithm.core.UODCommonTest import UODCommonTest
import numpy as np
class PyodAVECase(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.n_train = 200
self.n_test = 100
self.contamination = 0.1
self.roc_floor = 0.8
self.X_train, self.y_train, self.X_test, self.y_test = generate_data(
n_train=self.n_train, n_test=self.n_test,
contamination=self.contamination, random_state=42)
self.X_train = d3m_dataframe(self.X_train, generate_metadata=True)
self.X_test = d3m_dataframe(self.X_test, generate_metadata=True)
hyperparams_default = VariationalAutoEncoderPrimitive.metadata.get_hyperparams().defaults()
hyperparams = hyperparams_default.replace({'contamination': self.contamination, })
hyperparams = hyperparams.replace({'return_subseq_inds': True, })
self.primitive = VariationalAutoEncoderPrimitive(hyperparams=hyperparams)
self.primitive.set_training_data(inputs=self.X_train)
self.primitive.fit()
self.prediction_labels = self.primitive.produce(inputs=self.X_test).value
self.prediction_score = self.primitive.produce_score(inputs=self.X_test).value
self.uodbase_test = UODCommonTest(model=self.primitive._clf,
X_train=self.X_train,
y_train=self.y_train,
X_test=self.X_test,
y_test=self.y_test,
roc_floor=self.roc_floor,
)
def test_detector(self):
self.uodbase_test.test_detector()
def test_metadata(self):
# print(self.prediction_labels.metadata.to_internal_simple_structure())
self.assertEqual(utils.to_json_structure(self.prediction_labels.metadata.to_internal_simple_structure()), [{
'selector': [],
'metadata': {
# 'top_level': 'main',
'schema': metadata_base.CONTAINER_SCHEMA_VERSION,
'structural_type': 'd3m.container.pandas.DataFrame',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Table'],
'dimension': {
'name': 'rows',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularRow'],
'length': 100,
},
},
}, {
'selector': ['__ALL_ELEMENTS__'],
'metadata': {
'dimension': {
'name': 'columns',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularColumn'],
'length': 3,
},
},
}, {
'selector': ['__ALL_ELEMENTS__', 0],
'metadata': {
'name': 'TODS.anomaly_detection_primitives.VariationalAutoEncoder0_0',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Attribute'],
'structural_type': 'numpy.int64',
},
}, {
'selector': ['__ALL_ELEMENTS__', 1],
'metadata': {
'name': 'TODS.anomaly_detection_primitives.VariationalAutoEncoder0_1',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Attribute'],
'structural_type': 'numpy.int64',
},
}, {
'selector': ['__ALL_ELEMENTS__', 2],
'metadata': {
'name': 'TODS.anomaly_detection_primitives.VariationalAutoEncoder0_2',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Attribute'],
'structural_type': 'numpy.int64',
},
}])
def test_params(self):
params = self.primitive.get_params()
self.primitive.set_params(params=params)
if __name__ == '__main__':
unittest.main()
| 41.2 | 116 | 0.588303 |
ace2cda6d290d49c2f4a70978b40122d08e62672 | 8,483 | py | Python | research/scripts/stock/seq_models.py | rakesh4real/eurusd | 9fe2ffe3001478a676fadc3d410405c231a9c183 | [
"MIT"
] | 1 | 2021-01-19T08:51:58.000Z | 2021-01-19T08:51:58.000Z | seq_models.py | anushkrishnav/stock-prediction | 0416c89c608a22ebc21d962167e83c39f9328091 | [
"MIT"
] | 2 | 2021-01-25T07:43:13.000Z | 2021-01-25T07:50:16.000Z | seq_models.py | anushkrishnav/stock-prediction | 0416c89c608a22ebc21d962167e83c39f9328091 | [
"MIT"
] | 1 | 2021-02-05T19:44:50.000Z | 2021-02-05T19:44:50.000Z | # =======================================================================================
# beg: basic imports and setup
# =======================================================================================
from tqdm import tqdm
from loguru import logger
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# =======================================================================================
# end: basic imports
# =======================================================================================
# =======================================================================================
# beg: config
# =======================================================================================
input_size = 1
hidden_size = 256
n_layers = 2
n_classes = 1
seq_len = 64
batch_size = 1
# =======================================================================================
# end: config
# =======================================================================================
# ========================================================================================
# beg: models
# ========================================================================================
class RNN(nn.Module):
def __init__(self, seq_length, input_size, hidden_size, n_layers, n_classes):
super(RNN, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.rnn = nn.RNN(input_size, self.hidden_size, self.n_layers, batch_first = True)
self.fc = nn.Linear(self.hidden_size*seq_length, n_classes)
def forward(self, x):
# 3-D: (n_layers, bath_size, hidden_size)
h0 = torch.zeros((self.n_layers, x.shape[0], self.hidden_size)).to(device)
out, _ = self.rnn(x, h0)
# classification layer on output of last time step
out = out.reshape(out.shape[0], -1)
out = self.fc(out)
return out
class RNN_GRU(nn.Module):
def __init__(self, seq_length, input_size, hidden_size, n_layers, n_classes):
super(RNN_GRU, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.rnn = nn.GRU(input_size, self.hidden_size, self.n_layers, batch_first = True)
self.fc = nn.Linear(self.hidden_size*seq_length, n_classes)
def forward(self, x):
# 3-D: (n_layers, bath_size, hidden_size)
h0 = torch.zeros((self.n_layers, x.shape[0], self.hidden_size)).to(device)
out, _ = self.rnn(x, h0)
# classification layer on output of last time step
out = out.reshape(out.shape[0], -1)
out = self.fc(out)
return out
class RNN_LSTM(nn.Module):
def __init__(self, seq_length, input_size, hidden_size, n_layers, n_classes):
super(RNN_LSTM, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.lstm = nn.LSTM(input_size, self.hidden_size, self.n_layers, batch_first = True)
self.fc = nn.Linear(self.hidden_size*seq_length, n_classes)
def forward(self, x):
# 3-D: (n_layers, bath_size, hidden_size)
h0 = torch.zeros((self.n_layers, x.shape[0], self.hidden_size)).to(device)
c0 = torch.zeros((self.n_layers, x.shape[0], self.hidden_size)).to(device)
out, _ = self.lstm(x, (h0, c0))
# out: tensor of shape (batch_size, seq_length, hidden_size)
# classification layer on output of last time step
out = out.reshape(out.shape[0], -1)
out = self.fc(out)
return out
# ========================================================================================
# end: models
# ========================================================================================
# ========================================================================================
# beg: dataloaders
# ========================================================================================
def get_dataloaders_from(xtrain, ytrain, xvalid, yvalid, xhdout, yhdout, **kwargs):
"""
**kwargs:
+ batch_size
+ shuffle
"""
train_dataset = TensorDataset(torch.from_numpy(xtrain).float(), torch.from_numpy(ytrain).float())
valid_dataset = TensorDataset(torch.from_numpy(xvalid).float(), torch.from_numpy(yvalid).float())
hdout_dataset = TensorDataset(torch.from_numpy(xhdout).float(), torch.from_numpy(yhdout).float())
train_loader = DataLoader(dataset=train_dataset, **kwargs)
valid_loader = DataLoader(dataset=valid_dataset, **kwargs)
hdout_loader = DataLoader(dataset=hdout_dataset, **kwargs)
return train_loader, valid_loader, hdout_loader
# ========================================================================================
# end: dataloaders
# ========================================================================================
# ========================================================================================
# beg: trainer
# ========================================================================================
class Model:
def __init__(self, pytorch_model):
self.pytorch_model = pytorch_model
def compile(self, optimizer, loss, **kwargs):
self.criterion = loss
self.optimizer = optimizer(self.pytorch_model.parameters(), **kwargs)
def fit(
self,
train_loader: DataLoader,
valid_loader: DataLoader,
epochs: int,
):
log_every = 1
hist = {
'train_loss': []
}
self.pytorch_model.train()
for epoch in range(epochs):
pbar = tqdm(enumerate(train_loader), total=len(train_loader))
for batch_idx, (data, targets) in pbar:
# Get data to cuda if possible
data = data.to(device=device).squeeze(1)
targets = targets.to(device=device)
# forward
scores = self.pytorch_model(data).reshape(-1)
loss = self.criterion(scores, targets)
# backward
self.optimizer.zero_grad()
loss.backward()
# gradient descent or adam step
self.optimizer.step()
pbar.set_description(f'loss {loss.item():.6f}')
if batch_idx==len(train_loader)-1:
val_loss = self.eval(valid_loader)
pbar.set_description(f'epoch {epoch} >> {pbar.desc} val_loss {val_loss:.6f}')
hist['train_loss'].append(loss.item())
return hist
def eval(self, loader):
losses = []
self.pytorch_model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device).squeeze(1)
y = y.to(device=device)
scores = self.pytorch_model(x).reshape(-1)
loss =self.criterion(scores, y)
losses.append(loss)
self.pytorch_model.train()
return np.median(losses)
def predict(self, ndarray):
self.pytorch_model.eval()
ret = self.pytorch_model(
torch.from_numpy(ndarray).float().to(device))
return ret.detach().cpu().numpy()
# ========================================================================================
# end: trainer
# ========================================================================================
if __name__ == '__main__':
@logger.catch
def test_rnn():
model = RNN(100, input_size, hidden_size, n_layers, n_classes).to(device)
test_batch_seq = torch.rand(64, 100, 28).to(device)
out = model(test_batch_seq)
return out.shape
@logger.catch
def test_gru():
model = RNN_GRU(100, input_size, hidden_size, n_layers, n_classes).to(device)
test_batch_seq = torch.rand(64, 100, 28).to(device)
out = model(test_batch_seq)
return out.shape
@logger.catch
def test_lstm():
model = RNN_LSTM(100, input_size, hidden_size, n_layers, n_classes).to(device)
test_batch_seq = torch.rand(64, 100, 28).to(device)
out = model(test_batch_seq)
return out.shape
logger.success(f'[RNN] TEST PASSED! output shape: {test_rnn()}')
logger.success(f'[GRU] TEST PASSED! output shape: {test_gru()}')
logger.success(f'[LSTM] TEST PASSED! output shape: {test_lstm()}') | 36.252137 | 101 | 0.493693 |
ace2cdc0cec2b64dc40935053e232c47483a66ea | 1,530 | py | Python | exercises/tree-building/tree_building.py | Alexhans/python | 41287dda364e11edbaeda7cc50994d951007e835 | [
"MIT"
] | 2 | 2019-07-25T04:40:24.000Z | 2020-12-18T21:29:02.000Z | exercises/tree-building/tree_building.py | toroad/python | ce085c81a82ae5fb460fe166323dbbaa5a2588c5 | [
"MIT"
] | null | null | null | exercises/tree-building/tree_building.py | toroad/python | ce085c81a82ae5fb460fe166323dbbaa5a2588c5 | [
"MIT"
] | 1 | 2021-12-29T19:26:23.000Z | 2021-12-29T19:26:23.000Z | class Record():
def __init__(self, record_id, parent_id):
self.record_id = record_id
self.parent_id = parent_id
class Node():
def __init__(self, node_id):
self.node_id = node_id
self.children = []
def BuildTree(records):
root = None
records.sort(key=lambda x: x.record_id)
ordered_id = [i.record_id for i in records]
if records:
if ordered_id[-1] != len(ordered_id) - 1:
raise ValueError
if ordered_id[0] != 0:
raise ValueError
trees = []
parent = {}
for i in range(len(ordered_id)):
for j in records:
if ordered_id[i] == j.record_id:
if j.record_id == 0:
if j.parent_id != 0:
raise ValueError
if j.record_id < j.parent_id:
raise ValueError
if j.record_id == j.parent_id:
if j.record_id != 0:
raise ValueError
trees.append(Node(ordered_id[i]))
for i in range(len(ordered_id)):
for j in trees:
if i == j.node_id:
parent = j
for j in records:
if j.parent_id == i:
for k in trees:
if k.node_id == 0:
continue
if j.record_id == k.node_id:
child = k
parent.children.append(child)
if len(trees) > 0:
root = trees[0]
return root
| 30 | 53 | 0.481046 |
ace2cdec7d71ba5cd88a62d529fc9a283fd28fa3 | 5,296 | py | Python | home/print_results.py | ardyflora/UdacityAIProgrammingWithPython | 2f2c3161df5f5bd8d1dfa08961bbe6ff57f02d86 | [
"MIT"
] | null | null | null | home/print_results.py | ardyflora/UdacityAIProgrammingWithPython | 2f2c3161df5f5bd8d1dfa08961bbe6ff57f02d86 | [
"MIT"
] | null | null | null | home/print_results.py | ardyflora/UdacityAIProgrammingWithPython | 2f2c3161df5f5bd8d1dfa08961bbe6ff57f02d86 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/print_results.py
#
# PROGRAMMER: Ripudaman Flora
# DATE CREATED: Jan 7, 2019
# REVISED DATE:
# PURPOSE: Create a function print_results that prints the results statistics
# from the results statistics dictionary (results_stats_dic). It
# should also allow the user to be able to print out cases of misclassified
# dogs and cases of misclassified breeds of dog using the Results
# dictionary (results_dic).
# This function inputs:
# -The results dictionary as results_dic within print_results
# function and results for the function call within main.
# -The results statistics dictionary as results_stats_dic within
# print_results function and results_stats for the function call within main.
# -The CNN model architecture as model wihtin print_results function
# and in_arg.arch for the function call within main.
# -Prints Incorrectly Classified Dogs as print_incorrect_dogs within
# print_results function and set as either boolean value True or
# False in the function call within main (defaults to False)
# -Prints Incorrectly Classified Breeds as print_incorrect_breed within
# print_results function and set as either boolean value True or
# False in the function call within main (defaults to False)
# This function does not output anything other than printing a summary
# of the final results.
##
# TODO 6: Define print_results function below, specifically replace the None
# below by the function definition of the print_results function.
# Notice that this function doesn't to return anything because it
# prints a summary of the results using results_dic and results_stats_dic
#
def print_results(results_dic, results_stats_dic, model,
print_incorrect_dogs = False, print_incorrect_breed = False):
"""
Prints summary results on the classification and then prints incorrectly
classified dogs and incorrectly classified dog breeds if user indicates
they want those printouts (use non-default values)
Parameters:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
idx 1 = classifier label (string)
idx 2 = 1/0 (int) where 1 = match between pet image and
classifer labels and 0 = no match between labels
idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and
0 = pet Image 'is-NOT-a' dog.
idx 4 = 1/0 (int) where 1 = Classifier classifies image
'as-a' dog and 0 = Classifier classifies image
'as-NOT-a' dog.
results_stats_dic - Dictionary that contains the results statistics (either
a percentage or a count) where the key is the statistic's
name (starting with 'pct' for percentage or 'n' for count)
and the value is the statistic's value
model - Indicates which CNN model architecture will be used by the
classifier function to classify the pet images,
values must be either: resnet alexnet vgg (string)
print_incorrect_dogs - True prints incorrectly classified dog images and
False doesn't print anything(default) (bool)
print_incorrect_breed - True prints incorrectly classified dog breeds and
False doesn't print anything(default) (bool)
Returns:
None - simply printing results.
"""
# Prints summary statistics over the run
print("\n\n*** Results Summary for CNN Model Architecture",model.upper(),
"***")
print("{:20}: {:3d}".format('N Images', results_stats_dic['n_images']))
print("{:20}: {:3d}".format('N Dog Images', results_stats_dic['n_dogs_img']))
print("{:20}: {:3d}".format('N Not-Dog Images', results_stats_dic['n_notdogs_img']))
# Prints summary statistics (percentages) on Model Run
for key in results_stats_dic:
if key.startswith('p'):
print("{:20}: {:3f}".format(key, results_stats_dic[key]))
# IF print_incorrect_breed == True AND there were dogs whose breeds
# were incorrectly classified - print out these cases
if (print_incorrect_breed and
(results_stats_dic['n_correct_dogs'] != results_stats_dic['n_correct_breed'])
):
print("\nINCORRECT Dog Breed Assignment:")
# process through results dict, printing incorrectly classified breeds
for key in results_dic:
# Pet Image Label is-a-Dog, classified as-a-dog but is WRONG breed
if ( sum(results_dic[key][3:]) == 2 and
results_dic[key][2] == 0 ):
print("Real: {:>26} Classifier: {:>30}".format(results_dic[key][0],
results_dic[key][1]))
| 58.844444 | 94 | 0.625378 |
ace2ceeada3195d7744f2d2564841cbf3d3a862e | 4,376 | py | Python | code/options.py | mueedhafiz1982/point-cloud-generation-from-2D_image | a0067d369f0824846a90948d50522e05bf2742c4 | [
"MIT"
] | null | null | null | code/options.py | mueedhafiz1982/point-cloud-generation-from-2D_image | a0067d369f0824846a90948d50522e05bf2742c4 | [
"MIT"
] | null | null | null | code/options.py | mueedhafiz1982/point-cloud-generation-from-2D_image | a0067d369f0824846a90948d50522e05bf2742c4 | [
"MIT"
] | null | null | null | import numpy as np
import argparse,os
import scipy.linalg
import tensorflow as tf
import util
def set(training):
# parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument("--category", default="03001627", help="category ID number")
parser.add_argument("--group", default="0", help="name for group")
parser.add_argument("--model", default="test", help="name for model instance")
parser.add_argument("--load", default=None, help="load trained model to fine-tune/evaluate")
parser.add_argument("--std", type=float, default=0.1, help="initialization standard deviation")
parser.add_argument("--outViewN", type=int, default=8, help="number of fixed views (output)")
parser.add_argument("--inSize", default="64x64", help="resolution of encoder input")
parser.add_argument("--outSize", default="128x128", help="resolution of decoder output")
parser.add_argument("--predSize", default="128x128", help="resolution of prediction")
parser.add_argument("--upscale", type=int, default=5, help="upscaling factor for rendering")
parser.add_argument("--novelN", type=int, default=5, help="number of novel views simultaneously")
parser.add_argument("--arch", default=None)
if training: # training
parser.add_argument("--batchSize", type=int, default=20, help="batch size for training")
parser.add_argument("--chunkSize", type=int, default=100, help="data chunk size to load")
parser.add_argument("--itPerChunk", type=int, default=50, help="training iterations per chunk")
parser.add_argument("--lr", type=float, default=1e-4, help="base learning rate (AE)")
parser.add_argument("--lrDecay", type=float, default=1.0, help="learning rate decay multiplier")
parser.add_argument("--lrStep", type=int, default=20000, help="learning rate decay step size")
parser.add_argument("--lambdaDepth",type=float, default=1.0, help="loss weight factor (depth)")
parser.add_argument("--fromIt", type=int, default=0, help="resume training from iteration number")
parser.add_argument("--toIt", type=int, default=100000, help="run training to iteration number")
else: # evaluation
parser.add_argument("--batchSize", type=int, default=1, help="batch size for evaluation")
opt = parser.parse_args()
# these stay fixed
opt.sampleN = 100
opt.renderDepth = 1.0
opt.BNepsilon = 1e-5
opt.BNdecay = 0.999
opt.inputViewN = 24
# ------ below automatically set ------
opt.training = training
opt.inH,opt.inW = [int(x) for x in opt.inSize.split("x")]
opt.outH,opt.outW = [int(x) for x in opt.outSize.split("x")]
opt.H,opt.W = [int(x) for x in opt.predSize.split("x")]
opt.visBlockSize = int(np.floor(np.sqrt(opt.batchSize)))
opt.Khom3Dto2D = np.array([[opt.W,0 ,0,opt.W/2],
[0,-opt.H,0,opt.H/2],
[0,0,-1,0],
[0,0, 0,1]],dtype=np.float32)
opt.Khom2Dto3D = np.array([[opt.outW,0 ,0,opt.outW/2],
[0,-opt.outH,0,opt.outH/2],
[0,0,-1,0],
[0,0, 0,1]],dtype=np.float32)
opt.fuseTrans = np.load("/content/data/trans_fuse{0}.npy".format(opt.outViewN), allow_pickle=True)
print("({0}) {1}".format(
util.toGreen("{0}".format(opt.group)),
util.toGreen("{0}".format(opt.model))))
print("------------------------------------------")
print("batch size: {0}, category: {1}".format(
util.toYellow("{0}".format(opt.batchSize)),
util.toYellow("{0}".format(opt.category))))
print("size: {0}x{1}(in), {2}x{3}(out), {4}x{5}(pred)".format(
util.toYellow("{0}".format(opt.inH)),
util.toYellow("{0}".format(opt.inW)),
util.toYellow("{0}".format(opt.outH)),
util.toYellow("{0}".format(opt.outW)),
util.toYellow("{0}".format(opt.H)),
util.toYellow("{0}".format(opt.W))))
if training:
print("learning rate: {0} (decay: {1}, step size: {2})".format(
util.toYellow("{0:.2e}".format(opt.lr)),
util.toYellow("{0}".format(opt.lrDecay)),
util.toYellow("{0}".format(opt.lrStep))))
print("depth loss weight: {0}".format(
util.toYellow("{0}".format(opt.lambdaDepth))))
print("viewN: {0}(out), upscale: {1}, novelN: {2}".format(
util.toYellow("{0}".format(opt.outViewN)),
util.toYellow("{0}".format(opt.upscale)),
util.toYellow("{0}".format(opt.novelN))))
print("------------------------------------------")
if training:
print(util.toMagenta("training model ({0}) {1}...".format(opt.group,opt.model)))
return opt
| 49.168539 | 103 | 0.659278 |
ace2cfac0376ed531da7b0c1264c131ea27db920 | 1,155 | py | Python | circus/commands/rmwatcher.py | ajah/circus | 4ae397e0f46686edf1013000234b92698231bf40 | [
"Apache-2.0"
] | 1 | 2015-11-05T11:44:54.000Z | 2015-11-05T11:44:54.000Z | circus/commands/rmwatcher.py | ajah/circus | 4ae397e0f46686edf1013000234b92698231bf40 | [
"Apache-2.0"
] | null | null | null | circus/commands/rmwatcher.py | ajah/circus | 4ae397e0f46686edf1013000234b92698231bf40 | [
"Apache-2.0"
] | null | null | null | from circus.commands.base import Command
from circus.exc import ArgumentError
class RmWatcher(Command):
"""\
Remove a watcher
================
This command remove a watcher dynamically from the arbiter. The
watchers are gracefully stopped.
ZMQ Message
-----------
::
{
"command": "rm",
"properties": {
"name": "nameofwatcher",
}
}
A message contains 1 property:
- name: name of watcher
The response return a status "ok".
Command line
------------
::
$ circusctl rm <name>
Options
+++++++
- <name>: name of the watcher to create
"""
name = "rm"
properties = ['name']
def message(self, *args, **opts):
if len(args) < 1 or len(args) > 1:
raise ArgumentError("number of arguments invalid")
return self.make_message(name=args[0])
def execute(self, arbiter, props):
self._get_watcher(arbiter, props['name'])
arbiter.rm_watcher(props['name'])
| 20.263158 | 71 | 0.499567 |
ace2cfda75e8c6a51e697e39a582286f78badcd7 | 21,007 | py | Python | mtls_server/cert_processor.py | drGrove/prodaccess-server | a2bd4e86c4477e388286739fda4a7a591e8eb206 | [
"Apache-2.0"
] | null | null | null | mtls_server/cert_processor.py | drGrove/prodaccess-server | a2bd4e86c4477e388286739fda4a7a591e8eb206 | [
"Apache-2.0"
] | null | null | null | mtls_server/cert_processor.py | drGrove/prodaccess-server | a2bd4e86c4477e388286739fda4a7a591e8eb206 | [
"Apache-2.0"
] | null | null | null | """Certificate Processor."""
import datetime
import os
import uuid
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID, ExtendedKeyUsageOID, ExtensionOID
import gnupg
from .key_refresh import KeyRefresh
from .logger import logger
from .storage import StorageEngine
from .storage import StorageEngineCertificateConflict
from .storage import StorageEngineMissing
from .utils import create_dir_if_missing
from .utils import get_abs_path
class CertProcessorKeyNotFoundError(Exception):
pass
class CertProcessorInvalidSignatureError(Exception):
pass
class CertProcessorUntrustedSignatureError(Exception):
pass
class CertProcessorMismatchedPublicKeyError(Exception):
pass
class CertProcessorNotAdminUserError(Exception):
pass
class CertProcessorNoPGPKeyFoundError(Exception):
pass
class CertProcessorUnsupportedCriticalExtensionError(Exception):
pass
class CertProcessor:
def __init__(self, config, user_gnupg, admin_gnupg):
"""Cerificate Processor.
Args:
config (ConfigParser): a config as configparser.
"""
user_gnupg_path = get_abs_path(
config.get(
"gnupg", "user", os.path.join(os.getcwd(), "secrets/gnupg")
)
)
admin_gnupg_path = get_abs_path(
config.get(
"gnupg",
"admin",
os.path.join(os.getcwd(), "secrets/gnupg_admin"),
)
)
create_dir_if_missing(user_gnupg_path)
create_dir_if_missing(admin_gnupg_path)
self.user_gpg = gnupg.GPG(gnupghome=user_gnupg_path)
self.admin_gpg = gnupg.GPG(gnupghome=admin_gnupg_path)
self.user_gpg.encoding = "utf-8"
self.admin_gpg.encoding = "utf-8"
# Start Background threads for getting revoke/expiry from Keyserver
if os.environ.get('AUTO_REFRESH_KEYS', '0') == '1':
KeyRefresh("user_key_refresh", self.user_gpg, config)
KeyRefresh("admin_key_refresh", self.admin_gpg, config)
if config.get("storage", "engine", None) is None:
raise StorageEngineMissing()
self.storage = StorageEngine(config)
self.storage.init_db()
self.config = config
self.openssl_format = serialization.PrivateFormat.TraditionalOpenSSL
self.no_encyption = serialization.NoEncryption()
self.SERVER_URL = config.get(
"mtls", "fqdn", os.environ.get("FQDN", "localhost")
)
self.PROTOCOL = config.get(
"mtls", "protocol", os.environ.get("PROTOCOL", "http")
)
def get_csr(self, csr):
"""Given a CSR string, get a cryptography CSR Object.
Args:
csr (str): A csr string.
Returns:
cryptography.x509.CertificateSigningRequest: A cryptography CSR
Object if it can be parsed, otherwise None.
"""
try:
return x509.load_pem_x509_csr(
bytes(csr, "utf-8"), default_backend()
)
except Exception as e:
logger.error(e)
return None
def get_ca_key(self):
"""Get the CA Key.
Returns:
cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey: The
CAs private key.
"""
ca_key_path = self.config.get("ca", "key")
if not os.path.isabs(ca_key_path):
ca_key_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), ca_key_path)
)
ca_dir = "/".join(ca_key_path.split("/")[:-1])
create_dir_if_missing(ca_dir)
try:
with open(ca_key_path, "rb") as key_file:
if os.environ.get("CA_KEY_PASSWORD"):
pw = os.environ.get("CA_KEY_PASSWORD", "").encode("UTF-8")
else:
pw = None
ca_key = serialization.load_pem_private_key(
key_file.read(), password=pw, backend=default_backend()
)
return ca_key
except (ValueError, FileNotFoundError):
logger.error("Error opening file: {}".format(ca_key_path))
logger.info("Generating new root key...")
key = rsa.generate_private_key(
public_exponent=65537, key_size=4096, backend=default_backend()
)
if os.environ.get("CA_KEY_PASSWORD"):
encryption_algorithm = serialization.BestAvailableEncryption(
os.environ.get("CA_KEY_PASSWORD", "").encode("UTF-8")
)
else:
encryption_algorithm = self.no_encyption
key_data = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=self.openssl_format,
encryption_algorithm=encryption_algorithm,
)
with open(ca_key_path, "wb") as f:
f.write(key_data)
return key
def get_ca_cert(self, key=None):
"""Get the CA Certificate.
Args:
key (cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
A key in cryptography RSAPrivateKey format.
Returns:
cryptography.x509.Certificate: The CA Certificate.
Returns:
cryptography.x509.Certificate: CA Certificate
"""
ca_cert_path = get_abs_path(self.config.get("ca", "cert"))
ca_dir = "/".join(ca_cert_path.split("/")[:-1])
create_dir_if_missing(ca_dir)
# Grab the CA Certificate from filesystem if it exists and return
if os.path.isfile(ca_cert_path):
with open(ca_cert_path, "rb") as cert_file:
ca_cert = x509.load_pem_x509_certificate(
cert_file.read(), default_backend()
)
return ca_cert
# We want this to run after the attempt to get the Cert for the case where we want
# a certificate that's already been generated. e.g. the route for getting the CA
# certificate
if key is None:
raise CertProcessorKeyNotFoundError()
key_id = x509.SubjectKeyIdentifier.from_public_key(key.public_key())
subject = issuer = x509.Name(
[
x509.NameAttribute(
NameOID.COMMON_NAME, self.config.get("ca", "issuer")
)
]
)
now = datetime.datetime.utcnow()
serial = x509.random_serial_number()
ca_cert = (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer)
.public_key(key.public_key())
.serial_number(serial)
.not_valid_before(now)
.not_valid_after(now + datetime.timedelta(days=365))
.add_extension(key_id, critical=False)
.add_extension(
x509.AuthorityKeyIdentifier(
key_id.digest, [x509.DirectoryName(issuer)], serial
),
critical=False,
)
.add_extension(
x509.BasicConstraints(ca=True, path_length=0), critical=True
)
.add_extension(
x509.KeyUsage(
digital_signature=True,
content_commitment=False,
key_encipherment=False,
data_encipherment=False,
key_agreement=False,
key_cert_sign=True,
crl_sign=True,
encipher_only=False,
decipher_only=False,
),
critical=True,
)
.sign(key, hashes.SHA256(), default_backend())
)
with open(ca_cert_path, "wb") as f:
f.write(ca_cert.public_bytes(serialization.Encoding.PEM))
return ca_cert
def is_admin(self, fingerprint):
"""Determine if the fingerprint is associated with with an admin.
Args:
fingerprint (str): The users fingerprint.
Returns:
bool: Is the user an admin.
"""
if self.get_gpg_key_by_fingerprint(fingerprint, True) is not None:
return True
return False
def get_gpg_key_by_fingerprint(self, fingerprint, is_admin=False):
if is_admin:
keys = self.admin_gpg.list_keys()
else:
keys = self.user_gpg.list_keys()
for key in keys:
if key["fingerprint"] == fingerprint:
return key
return None
def check_subject_against_key(self, subj, signer_key):
"""Check a subject email against the signing fingerprint.
The only exception to this is if an admin user is to generate a
certificate on behalf of someone else. This should be done with extreme
care, but access will only be allowed for the life of the certificate.
Args:
subj (cryptography.x509.Name): An x509 subject.
signer_key (dict): PGP key details from python-gnupg.
Returns:
Wheather the subject email matches a PGP uid for a given
fingerprint.
"""
email = subj.get_attributes_for_oid(NameOID.EMAIL_ADDRESS)[0].value
return any(email in uid for uid in signer_key["uids"])
def check_san_against_key(self, san, signer_key):
"""Check a SAN email against the signing fingerprint.
The only exception to this is if an admin user is to generate a
certificate on behalf of someone else. This should be done with extreme
care, but access will only be allowed for the life of the certificate.
Args:
san (cryptography.x509.SubjectAlternativeName): An x509 SubjectAlternativeName.
signer_key (dict): PGP key details from python-gnupg.
Returns:
Wheather the subject email matches a PGP uid for a given
fingerprint.
"""
email = san.value.get_values_for_type(x509.RFC822Name)[0]
return any(email in uid for uid in signer_key["uids"])
def get_allowed_subject_name(self, subj, ca_cert, gpg_key, is_admin):
csr_subject_arr = []
attr = ca_cert.subject.get_attributes_for_oid(NameOID.ORGANIZATION_NAME)
if attr:
ca_cert_organization = attr[0].value
else:
ca_cert_organization = ""
for attribute in subj:
attr_oid = attribute.oid
val = attribute.value
if attr_oid == NameOID.COMMON_NAME:
csr_subject_arr.append(attribute)
continue
if attr_oid == NameOID.EMAIL_ADDRESS:
email = subj.get_attributes_for_oid(NameOID.EMAIL_ADDRESS)[0].value
csr_subject_arr.append(x509.NameAttribute(NameOID.EMAIL_ADDRESS, email))
email_in_key = self.check_subject_against_key(subj, gpg_key)
if not email_in_key and not is_admin:
raise CertProcessorNotAdminUserError()
continue
if attr_oid == NameOID.ORGANIZATION_NAME:
# If the organization provided does not align with the CA, just
# override with the CA Organization Name. Since we've already proven
# that the user is allowed to create a Client Certificate for this
# Organization it isn't a big deal
if val != ca_cert_organization:
# If the CA Certificate does not have an organization, just skip
if ca_cert_organization != "":
continue
attribute = x509.NameAttribute(NameOID.ORGANIZATION_NAME, ca_cert_organization)
csr_subject_arr.append(attribute)
continue
logger.warning(f"Disallowed Name OID {attr_oid} removed from Subject")
return x509.Name(csr_subject_arr)
def get_allowed_extensions(self, csr, gpg_key, is_admin):
extensions = []
for extension in csr.extensions:
if extension.oid == ExtensionOID.SUBJECT_ALTERNATIVE_NAME:
allowed_entries = [x509.RFC822Name]
final_entries = []
for entry in allowed_entries:
if entry == x509.RFC822Name:
email_in_key = self.check_san_against_key(extension, gpg_key)
if not email_in_key and not is_admin:
raise CertProcessorNotAdminUserError()
final_entries.append(x509.RFC822Name(extension.value.get_values_for_type(x509.RFC822Name)[0]))
extensions.append((x509.SubjectAlternativeName(final_entries), False))
continue
## Catch All
if extension.critical == True:
logger.critical(f"CSR with Critical Extension {extension.oid} found could not be processed.")
raise CertProcessorUnsupportedCriticalExtensionError()
return extensions
def generate_cert(self, csr, lifetime, fingerprint):
"""Generate a Certificate from a CSR.
Args:
csr: The CSR object
lifetime: The lifetime of the certificate in seconds
fingerprint: The fingerprint of the signer for the CSR.
Raises:
CertProcessorNotAdminUserError: When an admin request is made
without and admin key
CertProcessorInvalidSignatureError: When an invalid user attempts
to sign a request for a certificate
Returns:
The certificates public bytes
"""
ca_pkey = self.get_ca_key()
ca_cert = self.get_ca_cert(ca_pkey)
now = datetime.datetime.utcnow()
lifetime_delta = now + datetime.timedelta(seconds=int(lifetime))
is_admin = self.is_admin(fingerprint)
logger.info(f"generate_cert: getting gpg key for {fingerprint}")
user_gpg_key = self.get_gpg_key_by_fingerprint(fingerprint, is_admin)
if user_gpg_key is None:
raise CertProcessorNoPGPKeyFoundError()
builder = x509.CertificateBuilder()
builder = builder.subject_name(self.get_allowed_subject_name(csr.subject, ca_cert, user_gpg_key, is_admin))
builder = builder.issuer_name(ca_cert.subject)
builder = builder.public_key(csr.public_key())
builder = builder.serial_number(uuid.uuid4().int)
builder = builder.not_valid_before(now)
builder = builder.not_valid_after(lifetime_delta)
for extension in self.get_allowed_extensions(csr, user_gpg_key, is_admin):
builder = builder.add_extension(extension[0], critical=extension[1])
crl_dp = x509.DistributionPoint(
[
x509.UniformResourceIdentifier(
"{protocol}://{server_url}/crl".format(
protocol=self.PROTOCOL, server_url=self.SERVER_URL
)
)
],
relative_name=None,
reasons=None,
crl_issuer=None,
)
builder = builder.add_extension(
x509.CRLDistributionPoints([crl_dp]), critical=False
)
builder = builder.add_extension(
x509.BasicConstraints(ca=False, path_length=None),
critical=True,
)
builder = builder.add_extension(
x509.ExtendedKeyUsage([ExtendedKeyUsageOID.CLIENT_AUTH]),
critical=True,
)
logger.info(f"generate_cert: Signing certificate for {fingerprint}")
cert = builder.sign(
private_key=ca_pkey,
algorithm=hashes.SHA256(),
backend=default_backend(),
)
try:
logger.info(f"generate_cert: saving certificate for {fingerprint}")
self.storage.save_cert(cert, fingerprint)
except StorageEngineCertificateConflict:
logger.info(
f"generate_cert: updating certificate for {fingerprint}"
)
cert = self.update_cert(csr, lifetime, user_gpg_key, is_admin)
return cert.public_bytes(serialization.Encoding.PEM)
def update_cert(self, csr, lifetime, user_gpg_key, is_admin):
"""Given a CSR, look it up in the database, update it and present the
new certificate.
Args:
csr (cryptography.x509.CertificateSigningRequest): A CSR.
lifetime (int): Lifetime in seconds.
Raises:
CertProcessorMismatchedPublicKeyError: The public key from the new
CSR does not match the in database Certificate.
Returns:
cryptography.x509.Certificate: A Signed Certificate for a user.
"""
common_name = csr.subject.get_attributes_for_oid(NameOID.COMMON_NAME)
common_name = common_name[0].value
bcert = bytes(
str(self.storage.get_cert(common_name=common_name)[0]), "UTF-8"
)
old_cert = x509.load_pem_x509_certificate(
bcert, backend=default_backend()
)
old_cert_pub = (
old_cert.public_key()
.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
.decode("UTF-8")
)
csr_pub = (
csr.public_key()
.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
.decode("UTF-8")
)
if old_cert_pub != csr_pub:
raise CertProcessorMismatchedPublicKeyError
ca_pkey = self.get_ca_key()
ca_cert = self.get_ca_cert(ca_pkey)
now = datetime.datetime.utcnow()
lifetime_delta = now + datetime.timedelta(seconds=int(lifetime))
builder = x509.CertificateBuilder()
builder = builder.subject_name(old_cert.subject)
builder = builder.issuer_name(ca_cert.subject)
builder = builder.public_key(csr.public_key())
builder = builder.serial_number(old_cert.serial_number)
builder = builder.not_valid_before(old_cert.not_valid_before)
builder = builder.not_valid_after(lifetime_delta)
for extension in self.get_allowed_extensions(csr, user_gpg_key, is_admin):
builder = builder.add_extension(extension[0], critical=extension[1])
crl_dp = x509.DistributionPoint(
[
x509.UniformResourceIdentifier(
"{protocol}://{server_url}/crl".format(
protocol=self.PROTOCOL, server_url=self.SERVER_URL
)
)
],
relative_name=None,
reasons=None,
crl_issuer=None,
)
builder = builder.add_extension(
x509.CRLDistributionPoints([crl_dp]), critical=False
)
cert = builder.sign(
private_key=ca_pkey,
algorithm=hashes.SHA256(),
backend=default_backend(),
)
self.storage.update_cert(cert=cert, serial_number=cert.serial_number)
return cert
def get_crl(self):
"""Generates a Certificate Revocation List.
Returns:
A Certificate Revocation List.
"""
ca_pkey = self.get_ca_key()
ca_cert = self.get_ca_cert(ca_pkey)
crl = (
x509.CertificateRevocationListBuilder()
.issuer_name(ca_cert.subject)
.last_update(datetime.datetime.utcnow())
.next_update(
datetime.datetime.utcnow() + datetime.timedelta(minutes=15)
)
)
for cert in self.storage.get_revoked_certs():
# Convert the string cert into a cryptography cert object
cert = x509.load_pem_x509_certificate(
bytes(str(cert), "UTF-8"), backend=default_backend()
)
# Add the certificate to the CRL
crl = crl.add_revoked_certificate(
x509.RevokedCertificateBuilder()
.serial_number(cert.serial_number)
.revocation_date(datetime.datetime.utcnow())
.build(backend=default_backend())
)
# Sign the CRL
crl = crl.sign(
private_key=ca_pkey,
algorithm=hashes.SHA256(),
backend=default_backend(),
)
return crl
def revoke_cert(self, serial_number):
"""Given a serial number, revoke a certificate.
Args:
serial_number (int): A certificate serial number.
"""
self.storage.revoke_cert(serial_number)
| 37.5125 | 118 | 0.601752 |
ace2d0082110ae22783a1ca323c70bb8caf768e2 | 2,763 | py | Python | zipline/utils/enum.py | moneygeek/zipline | c90019754d4a02d7118c181535d3932e40430633 | [
"Apache-2.0"
] | null | null | null | zipline/utils/enum.py | moneygeek/zipline | c90019754d4a02d7118c181535d3932e40430633 | [
"Apache-2.0"
] | null | null | null | zipline/utils/enum.py | moneygeek/zipline | c90019754d4a02d7118c181535d3932e40430633 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ctypes import (
Structure,
c_ubyte,
c_uint,
c_ulong,
c_ulonglong,
c_ushort,
sizeof,
)
import numpy as np
import pandas as pd
from six.moves import range
_inttypes_map = {
sizeof(t) - 1: t for t in {
c_ubyte,
c_uint,
c_ulong,
c_ulonglong,
c_ushort
}
}
_inttypes = list(
pd.Series(_inttypes_map).sort_index().reindex(
range(max(_inttypes_map.keys())),
method='bfill',
),
)
def enum(option, *options):
"""
Construct a new enum object.
Parameters
----------
*options : iterable of str
The names of the fields for the enum.
Returns
-------
enum
A new enum collection.
Examples
--------
>>> e = enum('a', 'b', 'c')
>>> e
<enum: ('a', 'b', 'c')>
>>> e.a
0
>>> e.b
1
>>> e.a in e
True
>>> tuple(e)
(0, 1, 2)
Notes
-----
Identity checking is not guaranteed to work with enum members, instead
equality checks should be used. From CPython's documentation:
"The current implementation keeps an array of integer objects for all
integers between -5 and 256, when you create an int in that range you
actually just get back a reference to the existing object. So it should be
possible to change the value of 1. I suspect the behaviour of Python in
this case is undefined. :-)"
"""
options = (option,) + options
rangeob = range(len(options))
try:
inttype = _inttypes[int(np.log2(len(options) - 1)) // 8]
except IndexError:
raise OverflowError(
'Cannot store enums with more than sys.maxsize elements, got %d' %
len(options),
)
class _enum(Structure):
_fields_ = [(o, inttype) for o in options]
def __iter__(self):
return iter(rangeob)
def __contains__(self, value):
return 0 <= value < len(options)
def __repr__(self):
return '<enum: %s>' % (
('%d fields' % len(options))
if len(options) > 10 else
repr(options)
)
return _enum(*rangeob)
| 24.236842 | 78 | 0.598625 |
ace2d0d47d32b4a200f1b3f75221b8473a5f8009 | 4,832 | py | Python | hsma_bandits_1.py | TomMonks/mab_game | ff1bf03f89103abcdc92e6bd26e39772bed93f08 | [
"MIT"
] | null | null | null | hsma_bandits_1.py | TomMonks/mab_game | ff1bf03f89103abcdc92e6bd26e39772bed93f08 | [
"MIT"
] | null | null | null | hsma_bandits_1.py | TomMonks/mab_game | ff1bf03f89103abcdc92e6bd26e39772bed93f08 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
HSMA Multi-arm bandit code-along
In this python notebook you will learn about
* The exploration-exploitation dilemma in reinforcement learning
* How multi-arm bandits 'home-in' on the best solution over time.
Notes:
This python file has been setup to run in Spyter using its 'cell' approach
to execution. Click your cursor anywhere in a cell (indicated by the
dividing lines) and press Shift-Return.
"""
#%%
# Standard Imports
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
#%%
#import multi-arm bandit agents
from relearn.bandit_world.agents import (EpsilonGreedy,
AnnealingEpsilonGreedy)
#
from relearn.bandit_world.environments import (custom_bandit_problem,
BernoulliCasino)
#%%
# Step 1: Create some bandits
bandit_arms = custom_bandit_problem(0.2, 0.5, 0.3, 0.75, 0.3)
#how many bandit arms to we have?
print(f'No. of bandits: {len(bandit_arms)}')
#%%
# Step 2: pull a bandit arm
chosen_arm = 3
#bandits return either a 0 or 1.
result = bandit_arms[chosen_arm].play()
if result == 1:
print('you won!')
else:
print('you lost')
#%%
# Step 3: Create a casino environment for an agent to interact with
environment = BernoulliCasino(bandits=bandit_arms)
#%%
# Step 4: Create an instance of an EpsilonGreedy Agent and solve!
agent = EpsilonGreedy(epsilon=0.2, budget=50, environment=environment)
agent.solve()
#%%
print(f'Total reward: {agent.total_reward}')
print('Action taken:')
print(agent.actions)
#%%
# Let's look at how we would organise our code to that we can run experiments
# First some simple utility functions to help us print out results...
def print_reward(agent):
'''
Utility function to print formatted results
Parameters
----------
agent : object
Multi arm bandit agent.
Returns
-------
None.
'''
print(f'Total reward: {agent.total_reward}')
print('\nFinal Model:\n------')
for bandit_index in range(len(agent._means)):
print(f'Bandit {bandit_index + 1}:\t{agent._means[bandit_index]:.2f}')
def visualise_agent_actions(agent):
'''
Visualise the actions taken in a bar chart
Params:
-----
agent : object
Multi arm bandit agent.
'''
actions = agent.actions
x = [i + 1 for i in range(actions.shape[0])]
plt.bar(x, actions)
plt.title('Histogram of Actions Taken by Algorithm')
plt.xlabel('Arm')
plt.ylabel('Number of times each arm was selected')
plt.show()
#%%
def epsilon_greedy_experiment(epsilon=0.1, budget=1000, random_seed=None):
'''
Simple example experiment of the MAB
Params:
-------
epsilon: float
exploration parameter. percentage of bandit rounds that are random
budget: int
Total number of rounds the agent will play
random_seed: int
Parameter to control random sampling to ensure you get a repeated
result
Returns:
-------
None.
'''
print('------\nAgent: Epsilon-Greedy')
#to reproduce the result set a random seed
np.random.seed(seed=random_seed)
#create environment
bandit_arms = custom_bandit_problem(0.2, 0.5, 0.3, 0.75, 0.3)
environment = BernoulliCasino(bandits=bandit_arms)
#create agent and solve
agent = EpsilonGreedy(epsilon=0.1, budget=budget, environment=environment)
agent.solve()
#print out formatted results
print_reward(agent)
visualise_agent_actions(agent)
#%%
#run out experiment
epsilon_greedy_experiment(random_seed=42)
#%%
#Let's try a different experiment using an annealing epsilon greedy agent
#This type of agent starts of very exploratative, but gradually starts to
#exploit more as the budget runs out.
def anneal_experiment(budget=1000, random_seed=None):
'''
simple example experiment of the MAB
using AnnealingEpsilonGreedy
Params:
-------
budget: int
Total number of rounds the agent will play
random_seed: int
Parameter to control random sampling to ensure you get a repeated
result
Returns:
-------
None.
'''
print('--------\nAgent:\tAnnealing Epsilon-Greedy')
#to reproduce the result set a random seed
np.random.seed(seed=random_seed)
bandit_arms = custom_bandit_problem(0.2, 0.5, 0.3, 0.75, 0.3)
environment = BernoulliCasino(bandits=bandit_arms)
agent = AnnealingEpsilonGreedy(budget=budget, environment=environment)
agent.solve()
print_reward(agent)
visualise_agent_actions(agent)
#%%
#run an annealing experiment.
anneal_experiment(random_seed=42)
#%% | 22.579439 | 78 | 0.659975 |
ace2d2016827787ffee475a229d88fe426335a04 | 813 | py | Python | tests/test_import.py | bourbaki-py/introspection | f7e3d4e85188c9725f906b11eb59c3d2487c6f9d | [
"Apache-2.0"
] | null | null | null | tests/test_import.py | bourbaki-py/introspection | f7e3d4e85188c9725f906b11eb59c3d2487c6f9d | [
"Apache-2.0"
] | 6 | 2021-10-30T13:38:47.000Z | 2021-10-30T14:12:34.000Z | tests/test_import.py | bourbaki-py/introspection | f7e3d4e85188c9725f906b11eb59c3d2487c6f9d | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
import os
from itertools import chain
from pathlib import Path
from setuptools import find_packages
import pytest
module = type(os)
top_dir = Path(__file__).parent.parent
def all_module_paths(rootdir):
parents = find_packages(rootdir)
return list(chain(parents, chain.from_iterable(map(submodule_paths, parents))))
def submodule_paths(parent_module_path: str):
paths = Path(parent_module_path.replace(".", os.path.sep)).glob("*.py")
return (
parent_module_path + "." + os.path.splitext(p.name)[0].replace(os.path.sep, ".")
for p in paths
if not p.name.startswith("_")
)
@pytest.mark.parametrize("module_name", all_module_paths(top_dir))
def test_submodule_import(module_name):
mod = __import__(module_name)
assert isinstance(mod, module)
| 26.225806 | 88 | 0.723247 |
ace2d251ae85e55cde81411eae9d1cf41da739ae | 5,811 | py | Python | faster_rcnn/mkl.py | Intervideo/TFFRCNN | 26f6b12cf1265296cb2c80c6ff28ebf008ec46c0 | [
"MIT"
] | null | null | null | faster_rcnn/mkl.py | Intervideo/TFFRCNN | 26f6b12cf1265296cb2c80c6ff28ebf008ec46c0 | [
"MIT"
] | null | null | null | faster_rcnn/mkl.py | Intervideo/TFFRCNN | 26f6b12cf1265296cb2c80c6ff28ebf008ec46c0 | [
"MIT"
] | null | null | null | import numpy as np
import pylab as pl
from scikits.learn import svm
from scikits.learn.base import BaseEstimator, ClassifierMixin
class RBF(object):
"""docstring for RBF"""
def __init__(self, gamma):
self.gamma = gamma
def __call__(self, X, Y=None):
XX = np.sum(X * X, axis=1)[:,np.newaxis]
if Y is None:
Y = X
YY = XX.T
else:
YY = np.sum(Y * Y, axis=1)[np.newaxis,:]
distances = XX + YY # Using broadcasting
distances -= 2 * np.dot(X, Y.T)
distances = np.maximum(distances, 0)
return np.exp(- self.gamma * distances)
def linear(X, Y=None):
"""Linear kernel"""
if Y is None:
Y = X
return np.dot(X, Y.T)
class MultiKernel(object):
def __init__(self, kernels, gammas, X=None):
self.kernels = kernels
self.gammas = gammas
self.X = X
self.Ks = None
if X is not None: # Precompute kernels
self.Ks = [kernel(X) for kernel in kernels]
def __call__(self, X, Y=None):
"""Construct kernel by linear combination"""
K = 0
if X is self.X and (Y is X or Y is None):
for gamma, Ki in zip(self.gammas, self.Ks):
if gamma > 0.0:
K += gamma * Ki
else:
for gamma, kernel in zip(self.gammas, self.kernels):
if gamma > 0.0:
K += gamma * kernel(X, Y)
return K
###############################################################################
# Multi Kernel SVC (2 classes only)
class MultiKernelSVC(BaseEstimator, ClassifierMixin):
"""Lp - Multiple Kernel Learning (MKL)
2 classes only
Parameters
----------
kernel : string, optional
List of precomputed kernels.
p : float, optional
???
C : float, optional (default=1.0)
Penalty parameter C of the error term.
Notes
-----
Add ref Szafranski 2010 Machine Learning Journal
"""
def __init__(self, kernels, p=1, maxit=10, C=1, verbose=False, tol=1e-5,
store_objective=False):
self.kernels = kernels
self.p = p
self.maxit = maxit
self.C = C
self.verbose = verbose
self.tol = tol
self.store_objective = store_objective
def fit(self, X, y, **params):
"""fit the MKL and learn the kernel"""
self._set_params(**params)
X = np.atleast_2d(X)
y = y.ravel()
classes = np.unique(y)
n_classes = classes.size
assert n_classes == 2
y = np.array(y, dtype=np.int)
y[y==classes[0]] = -1
y[y==classes[1]] = 1
p = float(self.p)
kernels = self.kernels
C = self.C
n_kernels = len(self.kernels)
# kernel weights
gammas = (1.0 / n_kernels) ** (1.0 / p) * np.ones(n_kernels)
# Construct kernel by linear combination
multi_kernel = MultiKernel(kernels, gammas, X)
Ks = multi_kernel.Ks
norms = np.empty(n_kernels)
maxit = self.maxit
objective = []
for it in range(maxit):
if self.verbose:
print "Gammas : %s" % multi_kernel.gammas
svc = svm.SVC(kernel=multi_kernel, C=C)
svc.fit(X, y)
dual_coef_ = svc.dual_coef_.ravel()
support_ = np.array(svc.support_, dtype=np.int).ravel() - 1
# Update kernel weights
for i, (gamma, K) in enumerate(zip(multi_kernel.gammas, Ks)):
norms[i] = (gamma**2) * np.dot(dual_coef_,
np.dot(K[support_][:,support_], dual_coef_))
if self.store_objective:
dual_obj = -0.5 * np.dot(dual_coef_,
np.dot(multi_kernel(X[support_]), dual_coef_)) + \
(dual_coef_ * y[support_]).sum()
objective.append(dual_obj)
# print norms
norms = norms ** (1.0 / (1.0 + p))
scaling = np.sum(norms ** p ) ** (1.0 / p)
gammas_ = norms / scaling
gammas_[gammas_ < 1e-6 * gammas_.max()] = 0.0
if (gammas_ - multi_kernel.gammas).max() < self.tol:
if self.verbose:
print "Converged after %d interations" % it
break
multi_kernel.gammas = gammas_
else:
if self.verbose:
print "Did NOT converge after %d interations" % it
self._svc = svc
self.gammas_ = multi_kernel.gammas
self.objective = objective
return self
def predict(self, X):
return self._svc.predict(X)
if __name__ == '__main__':
xx, yy = np.meshgrid(np.linspace(-5, 5, 40), np.linspace(-5, 5, 40))
np.random.seed(0)
X = np.random.randn(300, 2)
y = np.logical_xor(X[:,0]>0, X[:,1]>0)
# y = X[:,0]>0
y = np.array(y, dtype=np.int)
y[y==0] = -1
# Define the kernels
kernels = [RBF(10 ** k) for k in range(-5, 0)] # some RBF kernels
kernels.append(linear) # Add linear kernel
# fit the model
clf = MultiKernelSVC(kernels=kernels, C=1e6, verbose=True, maxit=100,
tol=1e-5, p=1, store_objective=True)
clf.fit(X, y)
objective = clf.objective
# pl.close('all')
# pl.figure()
# pl.plot(objective)
# pl.xlabel('Iterations')
# pl.ylabel('Dual objective')
# # plot the line, the points, and the nearest vectors to the plane
# Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Z = Z.reshape(xx.shape)
# # pl.close('all')
# pl.figure()
# pl.set_cmap(pl.cm.Paired)
# pl.pcolormesh(xx, yy, Z)
# pl.scatter(X[:,0], X[:,1], c=y)
# pl.axis('tight')
# pl.show()
| 29.348485 | 87 | 0.524867 |
ace2d359647b4a4ec33cd8db0c101e6894923b8c | 11,081 | py | Python | tests/callbacks/test_stochastic_weight_avg.py | HabanaAI/pytorch-lightning | 07b4452b71dc7397fefb35477f922eff096752ad | [
"Apache-2.0"
] | 15,666 | 2020-01-14T07:16:15.000Z | 2022-03-31T23:22:26.000Z | tests/callbacks/test_stochastic_weight_avg.py | HabanaAI/pytorch-lightning | 07b4452b71dc7397fefb35477f922eff096752ad | [
"Apache-2.0"
] | 9,140 | 2020-01-14T03:10:42.000Z | 2022-03-31T19:57:09.000Z | tests/callbacks/test_stochastic_weight_avg.py | HabanaAI/pytorch-lightning | 07b4452b71dc7397fefb35477f922eff096752ad | [
"Apache-2.0"
] | 2,340 | 2020-01-14T06:45:32.000Z | 2022-03-31T22:57:07.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from unittest import mock
import pytest
import torch
from torch import nn
from torch.optim.swa_utils import SWALR
from torch.utils.data import DataLoader
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import StochasticWeightAveraging
from pytorch_lightning.strategies import DDPSpawnStrategy, Strategy
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.boring_model import BoringModel, RandomDataset, RandomIterableDataset
from tests.helpers.runif import RunIf
class SwaTestModel(BoringModel):
def __init__(self, batchnorm: bool = True, interval: str = "epoch", iterable_dataset: bool = False):
super().__init__()
layers = [nn.Linear(32, 32)]
if batchnorm:
layers.append(nn.BatchNorm1d(32))
layers += [nn.ReLU(), nn.Linear(32, 2)]
self.layer = nn.Sequential(*layers)
self.interval = interval
self.iterable_dataset = iterable_dataset
def training_step(self, batch, batch_idx):
output = self.forward(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def train_dataloader(self):
dset_cls = RandomIterableDataset if self.iterable_dataset else RandomDataset
dset = dset_cls(32, 64)
return DataLoader(dset, batch_size=2)
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": torch.optim.lr_scheduler.StepLR(optimizer, step_size=1),
"interval": self.interval,
},
}
class SwaTestCallback(StochasticWeightAveraging):
update_parameters_calls: int = 0
transfer_weights_calls: int = 0
def update_parameters(self, *args, **kwargs):
self.update_parameters_calls += 1
return StochasticWeightAveraging.update_parameters(*args, **kwargs)
def transfer_weights(self, *args, **kwargs):
self.transfer_weights_calls += 1
return StochasticWeightAveraging.transfer_weights(*args, **kwargs)
def on_train_epoch_start(self, trainer, *args):
super().on_train_epoch_start(trainer, *args)
assert trainer.fit_loop._skip_backward == (trainer.current_epoch > self.swa_end)
if self.swa_start <= trainer.current_epoch:
assert isinstance(trainer.lr_schedulers[0]["scheduler"], SWALR)
assert trainer.lr_schedulers[0]["interval"] == "epoch"
assert trainer.lr_schedulers[0]["frequency"] == 1
def on_train_epoch_end(self, trainer, *args):
super().on_train_epoch_end(trainer, *args)
if self.swa_start <= trainer.current_epoch <= self.swa_end:
swa_epoch = trainer.current_epoch - self.swa_start
assert self.n_averaged == swa_epoch + 1
elif trainer.current_epoch > self.swa_end:
assert self.n_averaged == self._max_epochs - self.swa_start
def on_train_end(self, trainer, pl_module):
super().on_train_end(trainer, pl_module)
# make sure these are correctly set again
assert not trainer.fit_loop._skip_backward
assert trainer.accumulate_grad_batches == 2
assert trainer.num_training_batches == 5
if not isinstance(trainer.strategy, DDPSpawnStrategy):
# check backward call count. the batchnorm update epoch should not backward
assert trainer.strategy.backward.call_count == trainer.max_epochs * trainer.limit_train_batches
# check call counts
assert self.update_parameters_calls == trainer.max_epochs - (self._swa_epoch_start - 1)
assert self.transfer_weights_calls == 1
def train_with_swa(
tmpdir, batchnorm=True, strategy=None, gpus=None, num_processes=1, interval="epoch", iterable_dataset=False
):
model = SwaTestModel(batchnorm=batchnorm, interval=interval, iterable_dataset=iterable_dataset)
swa_start = 2
max_epochs = 5
swa_callback = SwaTestCallback(swa_epoch_start=swa_start, swa_lrs=0.1)
assert swa_callback.update_parameters_calls == 0
assert swa_callback.transfer_weights_calls == 0
trainer = Trainer(
default_root_dir=tmpdir,
enable_progress_bar=False,
max_epochs=max_epochs,
limit_train_batches=5,
limit_val_batches=0,
callbacks=[swa_callback],
accumulate_grad_batches=2,
strategy=strategy,
gpus=gpus,
num_processes=num_processes,
)
with mock.patch.object(Strategy, "backward", wraps=trainer.strategy.backward):
trainer.fit(model)
# check the model is the expected
assert trainer.lightning_module == model
@RunIf(min_gpus=2, standalone=True)
def test_swa_callback_ddp(tmpdir):
train_with_swa(tmpdir, strategy="ddp", gpus=2)
@RunIf(min_gpus=2)
def test_swa_callback_ddp_spawn(tmpdir):
train_with_swa(tmpdir, strategy="ddp_spawn", gpus=2)
@RunIf(skip_windows=True, skip_49370=True)
def test_swa_callback_ddp_cpu(tmpdir):
train_with_swa(tmpdir, strategy="ddp_spawn", num_processes=2)
@RunIf(min_gpus=1)
def test_swa_callback_1_gpu(tmpdir):
train_with_swa(tmpdir, gpus=1)
@pytest.mark.parametrize("batchnorm", (True, False))
@pytest.mark.parametrize("iterable_dataset", (True, False))
def test_swa_callback(tmpdir, batchnorm: bool, iterable_dataset: bool):
train_with_swa(tmpdir, batchnorm=batchnorm, iterable_dataset=iterable_dataset)
@pytest.mark.parametrize("interval", ("epoch", "step"))
def test_swa_callback_scheduler_step(tmpdir, interval: str):
train_with_swa(tmpdir, interval=interval)
def test_swa_warns(tmpdir, caplog):
model = SwaTestModel(interval="step")
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, callbacks=StochasticWeightAveraging())
with caplog.at_level(level=logging.INFO), pytest.warns(UserWarning, match="SWA is currently only supported"):
trainer.fit(model)
assert "Swapping scheduler `StepLR` for `SWALR`" in caplog.text
def test_swa_raises():
with pytest.raises(MisconfigurationException, match=">0 integer or a float between 0 and 1"):
StochasticWeightAveraging(swa_epoch_start=0, swa_lrs=0.1)
with pytest.raises(MisconfigurationException, match=">0 integer or a float between 0 and 1"):
StochasticWeightAveraging(swa_epoch_start=1.5, swa_lrs=0.1)
with pytest.raises(MisconfigurationException, match=">0 integer or a float between 0 and 1"):
StochasticWeightAveraging(swa_epoch_start=-1, swa_lrs=0.1)
with pytest.raises(MisconfigurationException, match="positive float, or a list of positive floats"):
StochasticWeightAveraging(swa_epoch_start=5, swa_lrs=[0.2, 1])
@pytest.mark.parametrize("stochastic_weight_avg", [False, True])
@pytest.mark.parametrize("use_callbacks", [False, True])
def test_trainer_and_stochastic_weight_avg(tmpdir, use_callbacks: bool, stochastic_weight_avg: bool):
"""Test to ensure SWA Callback is injected when `stochastic_weight_avg` is provided to the Trainer."""
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
return optimizer
model = TestModel()
kwargs = {
"default_root_dir": tmpdir,
"callbacks": StochasticWeightAveraging(swa_lrs=1e-3) if use_callbacks else None,
"stochastic_weight_avg": stochastic_weight_avg,
"limit_train_batches": 4,
"limit_val_batches": 4,
"max_epochs": 2,
}
if stochastic_weight_avg:
with pytest.deprecated_call(match=r"stochastic_weight_avg=True\)` is deprecated in v1.5"):
trainer = Trainer(**kwargs)
else:
trainer = Trainer(**kwargs)
trainer.fit(model)
if use_callbacks or stochastic_weight_avg:
assert sum(1 for cb in trainer.callbacks if isinstance(cb, StochasticWeightAveraging)) == 1
assert trainer.callbacks[0]._swa_lrs == [1e-3 if use_callbacks else 0.1]
else:
assert all(not isinstance(cb, StochasticWeightAveraging) for cb in trainer.callbacks)
def test_swa_deepcopy(tmpdir):
"""Test to ensure SWA Callback doesn't deepcopy dataloaders and datamodule potentially leading to OOM."""
class TestSWA(StochasticWeightAveraging):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.on_before_accelerator_backend_setup_called = False
def on_before_accelerator_backend_setup(self, trainer: "Trainer", pl_module: "LightningModule"):
super().on_before_accelerator_backend_setup(trainer, pl_module)
assert self._average_model.train_dataloader is not pl_module.train_dataloader
assert self._average_model.train_dataloader.__self__ == self._average_model
assert self._average_model.trainer is None
self.on_before_accelerator_backend_setup_called = True
model = BoringModel()
swa = TestSWA()
trainer = Trainer(default_root_dir=tmpdir, callbacks=swa, fast_dev_run=True)
trainer.fit(model, train_dataloaders=DataLoader(RandomDataset(32, 2)))
assert swa.on_before_accelerator_backend_setup_called
def test_swa_multiple_lrs(tmpdir):
swa_lrs = [0.123, 0.321]
class TestModel(BoringModel):
def __init__(self):
super(BoringModel, self).__init__()
self.layer1 = torch.nn.Linear(32, 32)
self.layer2 = torch.nn.Linear(32, 2)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
return x
def configure_optimizers(self):
params = [{"params": self.layer1.parameters(), "lr": 0.1}, {"params": self.layer2.parameters(), "lr": 0.2}]
return torch.optim.Adam(params)
def on_train_epoch_start(self):
optimizer = trainer.optimizers[0]
assert [pg["lr"] for pg in optimizer.param_groups] == [0.1, 0.2]
assert [pg["initial_lr"] for pg in optimizer.param_groups] == swa_lrs
assert [pg["swa_lr"] for pg in optimizer.param_groups] == swa_lrs
self.on_train_epoch_start_called = True
model = TestModel()
swa_callback = StochasticWeightAveraging(swa_lrs=swa_lrs)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=swa_callback,
fast_dev_run=1,
)
trainer.fit(model)
assert model.on_train_epoch_start_called
| 39.859712 | 119 | 0.706615 |
ace2d3e616512775ebe90b1821f9cff82ed1c373 | 1,453 | py | Python | gitfire.py | rohit3463/gitfire | a25d113870fbc6b9efb53a55bea8d06f143940c8 | [
"MIT"
] | null | null | null | gitfire.py | rohit3463/gitfire | a25d113870fbc6b9efb53a55bea8d06f143940c8 | [
"MIT"
] | null | null | null | gitfire.py | rohit3463/gitfire | a25d113870fbc6b9efb53a55bea8d06f143940c8 | [
"MIT"
] | null | null | null | import os
import sys
import configparser
import git
from datetime import datetime
from getpass import getuser
from serial import Serial
from serial.serialutil import SerialException
def get_serial_port():
return "/dev/"+os.popen("dmesg | egrep ttyACM | cut -f3 -d: | tail -n1").read().strip()
def getGitRepo():
config = configparser.ConfigParser()
config.read("/home/{0}/.config/gitfire.conf".format(getuser()))
return config['DEFAULT']['GIT_FIRE_REPO']
def FireInTheHole():
try:
gitRepo = getGitRepo()
sys.stdout.write(gitRepo)
repo = git.Repo(gitRepo)
branch = 'fire' + str(datetime.now())
branch = branch.replace(" ", "+")
branch = branch.replace(":", "-")
repo.git.checkout('-b', branch)
repo.git.add('-A')
repo.git.commit('-m', 'Fire! Adding commiting all files')
repo.git.push('origin', branch)
sys.stdout.write("Pushed to remote branch: {0}".format(branch))
except KeyError:
sys.stderr.write("Error in reading config file, Please Check")
else:
repo.close()
try:
with Serial(get_serial_port(), 9600) as ser:
fire_state = '0'
fire_state = ser.readline().decode("utf-8")
if fire_state != '0':
ser.write(b'1')
sys.stdout.write("FIRE!!")
FireInTheHole()
except SerialException:
sys.stderr.write("Arduino not connected to serial port") | 26.907407 | 91 | 0.622161 |
ace2d457f28a3bba4cdb29b35bc7502a5c9644ee | 6,362 | py | Python | Lib/site-packages/tensorflow_probability/python/internal/_numpy/tensor_util.py | caiyongji/tf2.3.1-py3.7.9-full-built | ace4efcbf05b2b494388739718a18c13eab83c71 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/site-packages/tensorflow_probability/python/internal/_numpy/tensor_util.py | caiyongji/tf2.3.1-py3.7.9-full-built | ace4efcbf05b2b494388739718a18c13eab83c71 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/site-packages/tensorflow_probability/python/internal/_numpy/tensor_util.py | caiyongji/tf2.3.1-py3.7.9-full-built | ace4efcbf05b2b494388739718a18c13eab83c71 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tools for processing Tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_probability.python.internal.backend.numpy.compat import v2 as tf
from tensorflow_probability.python.internal._numpy import dtype_util
__all__ = [
'convert_nonref_to_tensor',
'discover_trainable_variables',
'discover_variables',
'is_module',
'is_ref',
'is_trainable_variable',
'is_variable',
]
def convert_nonref_to_tensor(value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a `Tensor` if input is nonreference type.
This function converts Python objects of various types to `Tensor` objects
only if the input has nonreference semantics. Reference semantics are
characterized by `tensor_util.is_ref` and is any object which is a
`tf.Variable` or instance of `tf.Module`. This function accepts any input
which `tf.convert_to_tensor` would also.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
dtype_hint: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so dtype_hint
can be used as a soft preference. If the conversion to
`dtype_hint` is not possible, this argument has no effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
tensor: A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
#### Examples:
```python
from tensorflow_probability.python.internal._numpy import tensor_util
x = tf.Variable(0.)
y = tensor_util.convert_nonref_to_tensor(x)
x is y
# ==> True
x = tf.constant(0.)
y = tensor_util.convert_nonref_to_tensor(x)
x is y
# ==> True
x = np.array(0.)
y = tensor_util.convert_nonref_to_tensor(x)
x is y
# ==> False
tf.is_tensor(y)
# ==> True
x = tfp.util.DeferredTensor(13.37, lambda x: x)
y = tensor_util.convert_nonref_to_tensor(x)
x is y
# ==> True
tf.is_tensor(y)
# ==> True
tf.equal(y, 13.37)
# ==> True
```
"""
# We explicitly do not use a tf.name_scope to avoid graph clutter.
if value is None:
return None
if is_ref(value):
if dtype is None:
return value
dtype_base = dtype_util.base_dtype(dtype)
value_dtype_base = dtype_util.base_dtype(value.dtype)
if dtype_base != value_dtype_base:
raise TypeError('Mutable type must be of dtype "{}" but is "{}".'.format(
dtype_util.name(dtype_base), dtype_util.name(value_dtype_base)))
return value
return tf.convert_to_tensor(
value, dtype=dtype, dtype_hint=dtype_hint, name=name)
def is_ref(x):
"""Evaluates if the object has reference semantics.
An object is deemed "reference" if it is a `tf.Variable` instance or is
derived from a `tf.Module` with `dtype` and `shape` properties.
Args:
x: Any object.
Returns:
is_ref: Python `bool` indicating input is has nonreference semantics, i.e.,
is a `tf.Variable` or a `tf.Module` with `dtype` and `shape` properties.
"""
# TODO(b/134430874): Consider making this recurse through nests, e.g.,
# `tensor_util.is_ref([tf.Variable(0.), np.array(1.)])`
# returns True. Note: we'd need to actually create a tf.Module on user's
# behalf and it would need a `dtype` and `shape`. (I.e., there would be some
# work to support this.)
return (
is_variable(x) or
(is_module(x) and hasattr(x, 'dtype') and hasattr(x, 'shape'))
)
def is_variable(x):
"""Returns `True` when input is a `tf.Variable`, otherwise `False`."""
return isinstance(x, tf.Variable)
def is_trainable_variable(x):
"""Returns `True` when input is trainable `tf.Variable`, otherwise `False`."""
return is_variable(x) and getattr(x, 'trainable', False)
def is_module(x):
"""Returns `True` when input is a `tf.Module`, otherwise `False`."""
return isinstance(x, tf.Module)
class _Track(tf.Module):
"""Bridge to create functional interface for variable tracking."""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def discover_trainable_variables(x):
"""Returns `tuple` of all trainable `tf.Variables` discoverable in input.
Warning: unlike possibly `tf.Module`, use of this function only does a static,
"one-time" discovery. (This is self-evidently true from its functional
nature.)
Args:
x: An object to inspected for `tf.Variable` dependencies.
Returns:
trainable_vars: A Python `tuple` of `tf.Variable`s with `trainable=True`.
"""
return _Track(x).trainable_variables
def discover_variables(x):
"""Returns `tuple` of all `tf.Variables` discoverable in input.
Warning: unlike possibly `tf.Module`, use of this function only does a static,
"one-time" discovery. (This is self-evidently true from its functional
nature.)
Args:
x: An object to inspected for `tf.Variable` dependencies.
Returns:
vars: A Python `tuple` of `tf.Variable`s, regardless of their value of
`trainable`.
"""
return _Track(x).variables
| 32.294416 | 80 | 0.702295 |
ace2d49be517717f20f4985b0e149b72518433f1 | 293 | py | Python | docker/1.4/config/create_user.py | w-michal/papermerge | 14703c3316deea06696da041b7adc4bd0b15270b | [
"Apache-2.0"
] | 1,586 | 2020-01-07T10:53:31.000Z | 2022-03-31T16:15:42.000Z | docker/1.4/config/create_user.py | w-michal/papermerge | 14703c3316deea06696da041b7adc4bd0b15270b | [
"Apache-2.0"
] | 398 | 2020-01-29T16:31:44.000Z | 2022-03-11T21:22:10.000Z | docker/1.4/config/create_user.py | w-michal/papermerge | 14703c3316deea06696da041b7adc4bd0b15270b | [
"Apache-2.0"
] | 196 | 2020-01-29T09:25:16.000Z | 2022-03-29T11:08:35.000Z | from django.contrib.auth import get_user_model
# see ref. below
UserModel = get_user_model()
if not UserModel.objects.filter(username='admin').exists():
user = UserModel.objects.create_user('admin', password='admin')
user.is_superuser = True
user.is_staff = True
user.save()
| 26.636364 | 67 | 0.730375 |
ace2d53b27e9d51d6487dd3435e3d9f1579b9b4b | 1,425 | py | Python | src/c3nav/mapdata/migrations/0081_auto_20191225_1015.py | johnjohndoe/c3nav | a17f863a3512e305595c16b0300796b6bae81241 | [
"Apache-2.0"
] | 132 | 2016-11-12T01:45:23.000Z | 2022-03-08T15:17:10.000Z | src/c3nav/mapdata/migrations/0081_auto_20191225_1015.py | johnjohndoe/c3nav | a17f863a3512e305595c16b0300796b6bae81241 | [
"Apache-2.0"
] | 66 | 2016-09-29T09:46:19.000Z | 2022-03-11T23:26:18.000Z | src/c3nav/mapdata/migrations/0081_auto_20191225_1015.py | johnjohndoe/c3nav | a17f863a3512e305595c16b0300796b6bae81241 | [
"Apache-2.0"
] | 42 | 2016-09-29T08:34:57.000Z | 2022-03-08T15:17:15.000Z | # Generated by Django 2.2.8 on 2019-12-25 09:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0080_auto_20191224_2203'),
]
operations = [
migrations.AlterModelOptions(
name='reportupdate',
options={'default_related_name': 'reportupdates', 'ordering': ('datetime',), 'verbose_name': 'Report update', 'verbose_name_plural': 'Report updates'},
),
migrations.AlterField(
model_name='reportupdate',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='reportupdates', to=settings.AUTH_USER_MODEL, verbose_name='author'),
),
migrations.AlterField(
model_name='reportupdate',
name='comment',
field=models.TextField(blank=True, verbose_name='comment'),
),
migrations.AlterField(
model_name='reportupdate',
name='public',
field=models.BooleanField(verbose_name='comment is public'),
),
migrations.AlterField(
model_name='reportupdate',
name='report',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='updates', to='mapdata.Report'),
),
]
| 35.625 | 174 | 0.630877 |
ace2d6194196f1ef28a225ac4ca3a591c6e79624 | 8,152 | py | Python | pytorch_toolkit/object_detection/tests/train_tests_custom_object_detection.py | noodik/training_extensions | f52547f9b343280a81fc6cac93d38dbffee441ac | [
"Apache-2.0"
] | null | null | null | pytorch_toolkit/object_detection/tests/train_tests_custom_object_detection.py | noodik/training_extensions | f52547f9b343280a81fc6cac93d38dbffee441ac | [
"Apache-2.0"
] | null | null | null | pytorch_toolkit/object_detection/tests/train_tests_custom_object_detection.py | noodik/training_extensions | f52547f9b343280a81fc6cac93d38dbffee441ac | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import json
import os
import unittest
import yaml
from ote.tests.test_case import (
skip_if_cpu_is_not_supported,
skip_if_cuda_not_available,
skip_non_instantiated_template_if_its_allowed,
get_dependencies,
download_snapshot_if_not_yet
)
from ote.tests.utils import collect_ap
from ote.utils.misc import run_through_shell
def create_custom_object_detection_test_case(model_name):
problem_name = 'custom-object-detection'
domain_name = 'object_detection'
metric_keys = ['bbox']
ann_file=os.path.dirname(__file__) + '/../../../data/airport/annotation_example_train.json'
img_root=os.path.dirname(__file__) + '/../../../data/airport/train'
class TestCase(unittest.TestCase):
domain = domain_name
problem = problem_name
model = model_name
metrics = metric_keys
topic = 'train'
@classmethod
def setUpClass(cls):
cls.templates_folder = os.environ['MODEL_TEMPLATES']
cls.template_folder = os.path.join(cls.templates_folder, domain_name, problem_name, model_name)
skip_non_instantiated_template_if_its_allowed(cls.template_folder, problem_name, model_name)
cls.template_file = os.path.join(cls.template_folder, 'template.yaml')
cls.ann_file = ann_file
cls.img_root = img_root
cls.dependencies = get_dependencies(cls.template_file)
cls.total_epochs = 1
download_snapshot_if_not_yet(cls.template_file, cls.template_folder)
run_through_shell(
f'cd {cls.template_folder};'
f'pip install -r requirements.txt;'
)
def setUp(self):
self.output_folder = os.path.join(self.template_folder, f'output_{self.id()}')
os.makedirs(self.output_folder, exist_ok=True)
def do_evaluation(self, on_gpu):
initial_command = 'export CUDA_VISIBLE_DEVICES=;' if not on_gpu else ''
metrics_path = os.path.join(self.output_folder, "metrics.yaml")
run_through_shell(
f'{initial_command}'
f'cd {self.template_folder};'
f'python eval.py'
f' --test-ann-files {self.ann_file}'
f' --test-data-roots {self.img_root}'
f' --save-metrics-to {metrics_path}'
f' --load-weights {os.path.join(self.output_folder, "latest.pth")}'
)
with open(metrics_path) as read_file:
content = yaml.safe_load(read_file)
for metric_key in metric_keys:
value = [metrics['value'] for metrics in content['metrics'] if metrics['key'] == metric_key][0]
self.assertGreaterEqual(value, 0.0)
def do_finetuning(self, on_gpu):
log_file = os.path.join(self.output_folder, 'test_finetuning.log')
initial_command = 'export CUDA_VISIBLE_DEVICES=;' if not on_gpu else ''
run_through_shell(
f'{initial_command}'
f'cd {self.template_folder};'
f'python train.py'
f' --train-ann-files {self.ann_file}'
f' --train-data-roots {self.img_root}'
f' --val-ann-files {self.ann_file}'
f' --val-data-roots {self.img_root}'
f' --resume-from snapshot.pth'
f' --save-checkpoints-to {self.output_folder}'
f' --gpu-num 1'
f' --batch-size 1'
f' --epochs {self.total_epochs}'
f' | tee {log_file}')
self.assertTrue(os.path.exists(os.path.join(self.output_folder, 'latest.pth')))
def do_finetuning_with_classes(self, on_gpu):
log_file = os.path.join(self.output_folder, 'test_finetuning.log')
initial_command = 'export CUDA_VISIBLE_DEVICES=;' if not on_gpu else ''
run_through_shell(
f'{initial_command}'
f'cd {self.template_folder};'
f'python train.py'
f' --train-ann-files {self.ann_file}'
f' --train-data-roots {self.img_root}'
f' --val-ann-files {self.ann_file}'
f' --val-data-roots {self.img_root}'
f' --resume-from snapshot.pth'
f' --save-checkpoints-to {self.output_folder}'
f' --gpu-num 1'
f' --batch-size 1'
f' --epochs {self.total_epochs}'
f' --classes vehicle,person,non-vehicle'
f' | tee {log_file}')
self.assertTrue(os.path.exists(os.path.join(self.output_folder, 'latest.pth')))
def do_export(self, on_gpu):
initial_command = 'export CUDA_VISIBLE_DEVICES=;' if not on_gpu else ''
run_through_shell(
f'{initial_command}'
f'cd {os.path.dirname(self.template_file)};'
f'pip install -r requirements.txt;'
f'python export.py'
f' --load-weights {os.path.join(self.output_folder, "latest.pth")}'
f' --save-model-to {self.output_folder}'
)
def do_evaluation_of_exported_model(self):
metrics_path = os.path.join(self.output_folder, "metrics_exported.yaml")
run_through_shell(
f'cd {os.path.dirname(self.template_file)};'
f'python eval.py'
f' --test-ann-files {self.ann_file}'
f' --test-data-roots {self.img_root}'
f' --load-weights {os.path.join(self.output_folder, "model.bin")}'
f' --save-metrics-to {metrics_path}'
)
with open(metrics_path) as read_file:
content = yaml.safe_load(read_file)
for metric_key in self.metrics:
value = [metrics['value'] for metrics in content['metrics'] if metrics['key'] == metric_key][0]
self.assertGreaterEqual(value, 0.0)
def test_e2e_on_gpu(self):
skip_if_cuda_not_available()
self.do_finetuning(on_gpu=True)
self.do_evaluation(on_gpu=True)
self.do_export(on_gpu=True)
self.do_evaluation_of_exported_model()
def test_e2e_on_cpu(self):
skip_if_cpu_is_not_supported(self.template_file)
self.do_finetuning(on_gpu=False)
self.do_evaluation(on_gpu=False)
self.do_export(on_gpu=False)
self.do_evaluation_of_exported_model()
def test_finetuning_with_classes_on_gpu(self):
skip_if_cuda_not_available()
self.do_finetuning_with_classes(on_gpu=True)
def test_finetuning_with_classes_on_cpu(self):
skip_if_cpu_is_not_supported(self.template_file)
self.do_finetuning_with_classes(on_gpu=False)
return TestCase
class MobilenetV2_2S_SSD_256x256_TestCase(
create_custom_object_detection_test_case(
model_name='mobilenet_v2-2s_ssd-256x256',
)
):
""" Test case for mobilenet_v2-2s_ssd-256x256 model. """
class MobilenetV2_2S_SSD_384x384_TestCase(
create_custom_object_detection_test_case(
model_name='mobilenet_v2-2s_ssd-384x384',
)
):
""" Test case for mobilenet_v2-2s_ssd-384x384 model. """
class MobilenetV2_2S_SSD_512x512_TestCase(
create_custom_object_detection_test_case(
model_name='mobilenet_v2-2s_ssd-512x512',
)
):
""" Test case for mobilenet_v2-2s_ssd-512x512 model. """
| 39.192308 | 111 | 0.61396 |
ace2d68b3c23984bb5aac21e268da2f0d75f7f8f | 1,185 | py | Python | pyvisdk/do/vm_failed_to_power_off_event.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/vm_failed_to_power_off_event.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/vm_failed_to_power_off_event.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VmFailedToPowerOffEvent(vim, *args, **kwargs):
'''This event records a failure to power off a virtual machine.'''
obj = vim.client.factory.create('{urn:vim25}VmFailedToPowerOffEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 6:
raise IndexError('Expected at least 7 arguments got: %d' % len(args))
required = [ 'reason', 'template', 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 34.852941 | 124 | 0.608439 |
ace2d79afb81ee1e9b452ed3f0f3fd3e0508a663 | 205 | py | Python | employee_management/employee_management/doctype/payments/test_payments.py | Vivekananthan112599/Frappe-Vivek | 6a2b70c736e17e9748c6a30e5722341acfb3b5c5 | [
"MIT"
] | null | null | null | employee_management/employee_management/doctype/payments/test_payments.py | Vivekananthan112599/Frappe-Vivek | 6a2b70c736e17e9748c6a30e5722341acfb3b5c5 | [
"MIT"
] | null | null | null | employee_management/employee_management/doctype/payments/test_payments.py | Vivekananthan112599/Frappe-Vivek | 6a2b70c736e17e9748c6a30e5722341acfb3b5c5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Gopi and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestPayments(unittest.TestCase):
pass
| 18.636364 | 43 | 0.756098 |
ace2d970736763291a1e6e4380ddc82e33561fb2 | 349 | py | Python | documenters_aggregator/utils.py | easherma/city-scrapers | e61237765bb9ef091c954fc3e8334ceeba73d4e4 | [
"MIT"
] | null | null | null | documenters_aggregator/utils.py | easherma/city-scrapers | e61237765bb9ef091c954fc3e8334ceeba73d4e4 | [
"MIT"
] | null | null | null | documenters_aggregator/utils.py | easherma/city-scrapers | e61237765bb9ef091c954fc3e8334ceeba73d4e4 | [
"MIT"
] | null | null | null | from functools import reduce
from operator import getitem
def get_key(the_dict, location_string):
"""
Get nested dict key using dot notation.
```
get_key(mydict, 'key1.key2')
```
"""
try:
return reduce(getitem, location_string.split('.'), the_dict) or ''
except (KeyError, TypeError):
return None
| 19.388889 | 74 | 0.633238 |
ace2d9b707d96c0a665fa012ef389ef4f9067afa | 906 | py | Python | src/anyio/abc/_testing.py | cjmartian/anyio | 512b1e62933124648ccd4d760174905a82d4593d | [
"MIT"
] | 3 | 2021-04-03T18:20:56.000Z | 2021-09-09T17:44:46.000Z | src/anyio/abc/_testing.py | cjmartian/anyio | 512b1e62933124648ccd4d760174905a82d4593d | [
"MIT"
] | null | null | null | src/anyio/abc/_testing.py | cjmartian/anyio | 512b1e62933124648ccd4d760174905a82d4593d | [
"MIT"
] | 3 | 2021-01-31T16:40:52.000Z | 2021-08-29T18:32:34.000Z | from abc import ABCMeta, abstractmethod
from typing import Any, Awaitable, Callable, Dict
class TestRunner(metaclass=ABCMeta):
"""
Encapsulates a running event loop. Every call made through this object will use the same event
loop.
"""
def __enter__(self) -> 'TestRunner':
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@abstractmethod
def close(self) -> None:
"""Close the event loop."""
@abstractmethod
def call(self, func: Callable[..., Awaitable], *args: tuple, **kwargs: Dict[str, Any]):
"""
Call the given function within the backend's event loop.
:param func: a callable returning an awaitable
:param args: positional arguments to call ``func`` with
:param kwargs: keyword arguments to call ``func`` with
:return: the return value of ``func``
"""
| 29.225806 | 98 | 0.639073 |
ace2d9e5133882a9ef40fd441f9b95ba30dc72b6 | 4,372 | py | Python | tests/test_movie_review.py | chakki-works/chazutsu | 811dedb19e3d670011da3679dc3befe4f1f2c41c | [
"Apache-2.0"
] | 250 | 2017-05-12T07:48:04.000Z | 2022-03-08T02:14:26.000Z | tests/test_movie_review.py | chakki-works/chazutsu | 811dedb19e3d670011da3679dc3befe4f1f2c41c | [
"Apache-2.0"
] | 10 | 2017-05-26T09:14:59.000Z | 2021-11-15T21:31:20.000Z | tests/test_movie_review.py | chakki-works/chazutsu | 811dedb19e3d670011da3679dc3befe4f1f2c41c | [
"Apache-2.0"
] | 28 | 2017-05-16T12:58:15.000Z | 2021-10-05T05:26:13.000Z | import os
import sys
import shutil
import unittest
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import chazutsu.datasets
from tests.dataset_base_test import DatasetTestCase
class TestMovieReview(DatasetTestCase):
def test_prepare_polarity(self):
d = chazutsu.datasets.MovieReview.polarity()
dataset_root, extracted = d.save_and_extract(self.test_dir)
path = d.prepare(dataset_root, extracted)
pos = 0
neg = 0
try:
with open(path, encoding="utf-8") as f:
for ln in f:
els = ln.strip().split("\t")
if len(els) != 2:
raise Exception("data file is not constructed by label and text.")
if els[0] == "1":
pos += 1
else:
neg += 1
except Exception as ex:
d.clear_trush()
self.fail(ex)
count = d.get_line_count(path)
d.clear_trush()
os.remove(path)
# pos=1000, neg=1000
self.assertEqual(count, 2000)
self.assertEqual(pos, 1000)
self.assertEqual(neg, 1000)
def test_extract_polarity_v1(self):
d = chazutsu.datasets.MovieReview.polarity_v1()
dataset_root, extracted = d.save_and_extract(self.test_dir)
path = d.prepare(dataset_root, extracted)
pos = 0
neg = 0
try:
with open(path, encoding="utf-8") as f:
for ln in f:
els = ln.strip().split("\t")
if len(els) != 2:
raise Exception("data file is not constructed by label and text.")
if els[0] == "1":
pos += 1
else:
neg += 1
except Exception as ex:
d.clear_trush()
self.fail(ex)
count = d.get_line_count(path)
d.clear_trush()
# pos=1000, neg=1000
self.assertEqual(count, 5331 + 5331)
self.assertEqual(pos, 5331)
self.assertEqual(neg, 5331)
def test_extract_rating(self):
d = chazutsu.datasets.MovieReview.rating()
dataset_root, extracted = d.save_and_extract(self.test_dir)
path = d.prepare(dataset_root, extracted)
try:
with open(path, encoding="utf-8") as f:
for ln in f:
els = ln.strip().split("\t")
if len(els) != 2:
raise Exception("data file is not constructed by label and text.")
except Exception as ex:
d.clear_trush()
self.fail(ex)
count = d.get_line_count(path)
d.clear_trush()
self.assertTrue(count > 0)
def test_extract_subjectivity(self):
d = chazutsu.datasets.MovieReview.subjectivity()
dataset_root, extracted = d.save_and_extract(self.test_dir)
path = d.prepare(dataset_root, extracted)
sub = 0
obj = 0
try:
with open(path, encoding="utf-8") as f:
for ln in f:
els = ln.strip().split("\t")
if len(els) != 2:
raise Exception("data file is not constructed by label and text.")
if els[0] == "1":
sub += 1
else:
obj += 1
except Exception as ex:
d.clear_trush()
self.fail(ex)
count = d.get_line_count(path)
d.clear_trush()
# sub=5000, obj=5000
self.assertEqual(count, 5000*2)
self.assertEqual(sub, 5000)
self.assertEqual(obj, 5000)
def test_download(self):
r = chazutsu.datasets.MovieReview.subjectivity().download(self.test_dir)
target, data = r.test_data(split_target=True)
self.assertEqual(target.shape[0], data.shape[0])
r.make_vocab(vocab_size=1000)
X, y = r.column("review").as_word_seq(fixed_len=20).to_batch("train", with_target=True)
self.assertEqual(y.shape, (len(y), 1))
self.assertEqual(X.shape, (len(y), 20, len(r.vocab)))
backed = r.column("review").back(X)
print(backed[:3])
shutil.rmtree(r.root)
if __name__ == "__main__":
unittest.main()
| 31.681159 | 95 | 0.528134 |
ace2da4e1b918e0e0c52c49551dc7323ca51aad8 | 916 | py | Python | tests/test_register.py | Sleepychord/cogdata | 529892512b11bac3e765490e46876bea88c14179 | [
"MIT"
] | 3 | 2021-08-13T14:22:30.000Z | 2021-12-08T09:54:31.000Z | tests/test_register.py | Sleepychord/cogdata | 529892512b11bac3e765490e46876bea88c14179 | [
"MIT"
] | 1 | 2021-12-09T03:14:05.000Z | 2021-12-11T10:07:14.000Z | tests/test_register.py | Sleepychord/cogdata | 529892512b11bac3e765490e46876bea88c14179 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
'''
@File : test_register.py
@Time : 2021/07/15 21:05:15
@Author : Ming Ding
@Contact : dm18@mail.tsinghua.edu.cn
'''
# here put the import lib
import os
import sys
import math
import shutil
import random
import torch
from cogdata.utils.helpers import get_registered_cls
def test_register():
test_dir = 'tmp/test_register'
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.makedirs(test_dir)
bin_saver_cls = get_registered_cls('BinarySaver')
bin_ds_cls = get_registered_cls('BinaryDataset')
output_path = os.path.join(test_dir, 'fake_register.bin')
saver = bin_saver_cls(output_path, dtype='int32')
d = torch.arange(100)
saver.save(d)
saver.commit()
dataset = bin_ds_cls(output_path, length_per_sample=5, dtype='int32', preload=True)
assert dataset[3][2] == 17
assert (dataset[-1] == d[-5:]).all()
| 24.756757 | 87 | 0.682314 |
ace2db319172287eb2f68da7224f9894bff7db4f | 3,361 | py | Python | test/integration/smoke/test_global_settings.py | redbridge/cloudstack | 2218053fb11d501950e4beb80e9bee4ae472b5b4 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/integration/smoke/test_global_settings.py | redbridge/cloudstack | 2218053fb11d501950e4beb80e9bee4ae472b5b4 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/integration/smoke/test_global_settings.py | redbridge/cloudstack | 2218053fb11d501950e4beb80e9bee4ae472b5b4 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for updating the granular Configuration parameter with scope and resource id provided.
"""
#Import Local Modules
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
#Import System modules
class TestUpdateConfigWithScope(cloudstackTestCase):
"""
Test to update a configuration (global setting) at various scopes
"""
def setUp(self):
self.apiClient = self.testClient.getApiClient()
@attr(tags=["simulator", "devcloud", "basic", "advanced", "selfservice"])
def test_UpdateConfigParamWithScope(self):
"""
test update configuration setting at zone level scope
@return:
"""
updateConfigurationCmd = updateConfiguration.updateConfigurationCmd()
updateConfigurationCmd.name = "use.external.dns"
updateConfigurationCmd.value = "true"
updateConfigurationCmd.scopename = "zone"
updateConfigurationCmd.scopeid = 1
updateConfigurationResponse = self.apiClient.updateConfiguration(updateConfigurationCmd)
self.debug("updated the parameter %s with value %s"%(updateConfigurationResponse.name, updateConfigurationResponse.value))
listConfigurationsCmd = listConfigurations.listConfigurationsCmd()
listConfigurationsCmd.cfgName = updateConfigurationResponse.name
listConfigurationsCmd.scopename = "zone"
listConfigurationsCmd.scopeid = 1
listConfigurationsResponse = self.apiClient.listConfigurations(listConfigurationsCmd)
self.assertNotEqual(len(listConfigurationsResponse), 0, "Check if the list API \
returns a non-empty response")
for item in listConfigurationsResponse:
if item.name == updateConfigurationResponse.name:
configParam = item
self.assertEqual(configParam.value, updateConfigurationResponse.value, "Check if the update API returned \
is the same as the one we got in the list API")
def tearDown(self):
"""
Reset the configuration back to false
@return:
"""
updateConfigurationCmd = updateConfiguration.updateConfigurationCmd()
updateConfigurationCmd.name = "use.external.dns"
updateConfigurationCmd.value = "false"
updateConfigurationCmd.scopename = "zone"
updateConfigurationCmd.scopeid = 1
self.apiClient.updateConfiguration(updateConfigurationCmd)
| 43.089744 | 130 | 0.724784 |
ace2dc030b8408c9408256efb08d78e271cd7a36 | 6,107 | py | Python | gapid_tests/resource_creation_tests/CreateDestroyImageView_test/CreateDestroyImageView_test.py | RenfengLiu/vulkan_test_applications | 04359b7184ad94659810213ff63ae71296426182 | [
"Apache-2.0"
] | 55 | 2017-06-20T13:54:31.000Z | 2022-02-08T23:58:11.000Z | gapid_tests/resource_creation_tests/CreateDestroyImageView_test/CreateDestroyImageView_test.py | RenfengLiu/vulkan_test_applications | 04359b7184ad94659810213ff63ae71296426182 | [
"Apache-2.0"
] | 53 | 2017-06-15T19:23:07.000Z | 2022-03-30T19:56:30.000Z | gapid_tests/resource_creation_tests/CreateDestroyImageView_test/CreateDestroyImageView_test.py | RenfengLiu/vulkan_test_applications | 04359b7184ad94659810213ff63ae71296426182 | [
"Apache-2.0"
] | 42 | 2017-06-15T19:05:40.000Z | 2022-03-30T14:15:25.000Z | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gapit_test_framework import gapit_test, require, require_equal
from gapit_test_framework import require_not_equal, little_endian_bytes_to_int
from gapit_test_framework import GapitTest
from gapit_test_framework import get_read_offset_function
from vulkan_constants import *
from struct_offsets import VulkanStruct, UINT32_T, POINTER, HANDLE, BOOL32
SWAPCHAIN_CREATE_INFO_ELEMENTS = [
("stype", UINT32_T),
("pNext", POINTER),
("flags", UINT32_T),
("surface", HANDLE),
("minImageCount", UINT32_T),
("imageFormat", UINT32_T),
("imageColorSpace", UINT32_T),
("imageExtent_width", UINT32_T),
("imageExtent_height", UINT32_T),
("imageArrayLayers", UINT32_T),
("imageUsage", UINT32_T),
("imageSharingMode", UINT32_T),
("queueFamilyIndexCount", UINT32_T),
("pQueueFamilyIndices", POINTER),
("preTransform", UINT32_T),
("compositeAlpha", UINT32_T),
("presentMode", UINT32_T),
("clipped", BOOL32),
("oldSwapchain", HANDLE),
]
IMAGE_VIEW_CREATE_INFO_ELEMENTS = [
("sType", UINT32_T),
("pNext", POINTER),
("flags", UINT32_T),
("image", HANDLE),
("viewType", UINT32_T),
("format", UINT32_T),
# These nested struct offsets are correct because all of the alignments
# are trivially satisfied.
("components_r", UINT32_T),
("components_g", UINT32_T),
("components_b", UINT32_T),
("components_a", UINT32_T),
("subresourceRange_aspectMask", UINT32_T),
("subresourceRange_baseMipLevel", UINT32_T),
("subresourceRange_levelCount", UINT32_T),
("subresourceRange_baseArrayLayer", UINT32_T),
("subresourceRange_layerCount", UINT32_T),
]
def check_create_image_view(test, index):
"""Gets the |index|'th vkCreateImageView command call atom, checks its
return value and arguments. Also checks the image view handler value pointed
by the image view pointer. This method does not check the content of the
VkImageViewCreateInfo struct pointed by the create info pointer. Returns the
checked vkCreateImageView atom, device handler value and image view handler
value. """
create_image_view = require(test.nth_call_of("vkCreateImageView", index))
require_equal(VK_SUCCESS, int(create_image_view.return_val))
device = create_image_view.int_device
require_not_equal(0, device)
p_create_info = create_image_view.hex_pCreateInfo
require_not_equal(0, p_create_info)
p_image_view = create_image_view.hex_pView
require_not_equal(0, p_image_view)
image_view = little_endian_bytes_to_int(require(
create_image_view.get_write_data(p_image_view,
NON_DISPATCHABLE_HANDLE_SIZE)))
require_not_equal(0, image_view)
return create_image_view, device, image_view
def check_destroy_image_view(test, device, image_view, device_properties):
"""Checks the |index|'th vkDestroyImageView command call atom, including the
device handler value and the image view handler value.
"""
destroy_image_view = require(test.next_call_of("vkDestroyImageView"))
require_equal(device, destroy_image_view.int_device)
require_equal(image_view, destroy_image_view.int_imageView)
def get_image_view_create_info(create_image_view, architecture):
"""Returns a VulkanStruct which is built to represent the image view create
info struct used in the given create image view command."""
def get_data(offset, size):
return little_endian_bytes_to_int(require(
create_image_view.get_read_data(create_image_view.hex_pCreateInfo +
offset, size)))
return VulkanStruct(architecture, IMAGE_VIEW_CREATE_INFO_ELEMENTS, get_data)
@gapit_test("CreateDestroyImageView_test")
class ColorAttachmentImage(GapitTest):
def expect(self):
"""1. Expects a normal image view for a normal 2D color attachement
image created in swapchain."""
architecture = self.architecture
device_properties = require(self.next_call_of(
"vkGetPhysicalDeviceProperties"))
create_swapchain = require(self.next_call_of("vkCreateSwapchainKHR"))
image_format = VulkanStruct(architecture, SWAPCHAIN_CREATE_INFO_ELEMENTS,
get_read_offset_function(
create_swapchain, create_swapchain.hex_pCreateInfo)).imageFormat
create_image_view, device, image = check_create_image_view(self, 1)
info = get_image_view_create_info(create_image_view, architecture)
check_destroy_image_view(self, device, image, device_properties)
require_equal(info.sType, VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO)
require_equal(info.pNext, 0)
require_equal(info.flags, 0)
require_equal(info.viewType, VK_IMAGE_VIEW_TYPE_2D)
require_equal(info.format, image_format)
require_equal(info.components_r, VK_COMPONENT_SWIZZLE_IDENTITY)
require_equal(info.components_g, VK_COMPONENT_SWIZZLE_IDENTITY)
require_equal(info.components_b, VK_COMPONENT_SWIZZLE_IDENTITY)
require_equal(info.components_a, VK_COMPONENT_SWIZZLE_IDENTITY)
require_equal(info.subresourceRange_aspectMask,
VK_IMAGE_ASPECT_COLOR_BIT)
require_equal(info.subresourceRange_baseMipLevel, 0)
require_equal(info.subresourceRange_levelCount, 1)
require_equal(info.subresourceRange_baseArrayLayer, 0)
require_equal(info.subresourceRange_layerCount, 1)
| 43.312057 | 104 | 0.730637 |
ace2deacc2ddc0aceadb730042ad50f48f156eb8 | 2,252 | py | Python | test/functional/rpc_getchaintips.py | cpass78/adultchain | 85416c12536e8b5340cb57ba18941772c280757d | [
"MIT"
] | null | null | null | test/functional/rpc_getchaintips.py | cpass78/adultchain | 85416c12536e8b5340cb57ba18941772c280757d | [
"MIT"
] | null | null | null | test/functional/rpc_getchaintips.py | cpass78/adultchain | 85416c12536e8b5340cb57ba18941772c280757d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the getchaintips RPC.
- introduce a network split
- work on chains of different lengths
- join the network together again
- verify that getchaintips now returns two chain tips.
"""
from test_framework.test_framework import AdultChainTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (AdultChainTestFramework):
def set_test_params(self):
self.num_nodes = 4
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
tips = self.nodes[0].getchaintips()
assert_equal(len(tips), 1)
assert_equal(tips[0]['branchlen'], 0)
assert_equal(tips[0]['height'], 200)
assert_equal(tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network()
self.nodes[0].generate(10)
self.nodes[2].generate(20)
self.sync_all([self.nodes[:2], self.nodes[2:]])
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
| 34.121212 | 70 | 0.64698 |
ace2df4d1edc3ee0eda80d3a5f365163d8b3a57a | 727 | py | Python | web/app/__init__.py | NekodRider/test-site | a179854f17bfc05f9fea8dcc3607b9c5d56cfdd6 | [
"MIT"
] | 2 | 2017-03-09T14:42:38.000Z | 2017-04-01T13:02:27.000Z | web/app/__init__.py | NekodRider/test-site | a179854f17bfc05f9fea8dcc3607b9c5d56cfdd6 | [
"MIT"
] | null | null | null | web/app/__init__.py | NekodRider/test-site | a179854f17bfc05f9fea8dcc3607b9c5d56cfdd6 | [
"MIT"
] | 2 | 2017-10-14T02:59:02.000Z | 2019-03-23T13:38:09.000Z | from flask import Flask, request, redirect, url_for
from dotenv import load_dotenv
import os
UPLOAD_FOLDER = './uploads/'
DOWNLOAD_FOLDER = './test'
STORAGE_FOLDER = './storage'
load_dotenv()
if not os.path.exists(UPLOAD_FOLDER):
os.mkdir(UPLOAD_FOLDER)
if not os.path.exists(DOWNLOAD_FOLDER):
os.mkdir(DOWNLOAD_FOLDER)
if not os.path.exists(STORAGE_FOLDER):
os.mkdir(STORAGE_FOLDER)
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['DOWNLOAD_FOLDER'] = DOWNLOAD_FOLDER
app.config['STORAGE_FOLDER'] = STORAGE_FOLDER
app.config['SECRET_KEY'] = 'unique-studio'
app.config['MAX_CONTENT_LENGTH'] = 256 * 1024 * 1024
app.config['ADMIN_PSWD'] = os.getenv('ADMIN_PSWD')
from app import views | 30.291667 | 52 | 0.763411 |
ace2e09898212f277eb7aa0ce4be6b850fab49b2 | 27,734 | py | Python | Framework/Common/Settings.py | bvbohnen/X4_Customizer | 6f865008690916a66a44c97331d9a2692baedb35 | [
"MIT"
] | 25 | 2018-12-10T12:52:11.000Z | 2022-01-29T14:42:57.000Z | Framework/Common/Settings.py | bvbohnen/X4_Customizer | 6f865008690916a66a44c97331d9a2692baedb35 | [
"MIT"
] | 4 | 2019-08-01T19:09:11.000Z | 2022-01-02T01:47:42.000Z | Framework/Common/Settings.py | bvbohnen/X4_Customizer | 6f865008690916a66a44c97331d9a2692baedb35 | [
"MIT"
] | 6 | 2019-02-16T08:39:04.000Z | 2021-12-21T06:11:58.000Z | '''
Container for general customize settings.
Import as:
from Settings import Settings
'''
import os
from pathlib import Path
import json
from collections import OrderedDict
from functools import wraps
from .Home_Path import home_path
from .Print import Print
class Settings_class:
'''
This holds general settings and paths to control the customizer.
Adjust these settings as needed prior to running the first plugin,
using direct writes to attributes.
Settings may be updated individually, or as arguments of
a call to Settings, or through a "settings.json" file in the
top X4 Customizer folder (eg. where documentation resides).
Any json settings will overwrite defaults, and be overwritten by
settings in the control script. Changes made using the GUI
will be applied to the json settings.
Examples:
* In the control script (prefix paths with 'r' to support backslashes):
<code>
Settings.path_to_x4_folder = r'C:\...'
Settings.path_to_user_folder = r'C:\...'
Settings(
path_to_x4_folder = r'C:\...',
path_to_user_folder = r'C:\...'
)
</code>
* In settings.json (sets defaults for all scripts):
<code>
{
"path_to_x4_folder" : "C:\...",
"path_to_user_folder" : "C:\...",
"output_to_user_extensions": "true"
}
</code>
Paths:
* path_to_x4_folder
- Path to the main x4 folder.
- Defaults to HOMEDRIVE/"Steam/steamapps/common/X4 Foundations"
* path_to_user_folder
- Path to the folder where user files are located.
- Should include config.xml, content.xml, etc.
- Defaults to HOMEPATH/"Documents/Egosoft/X4" or a subfolder
with an 8-digit name.
* path_to_source_folder
- Optional path to a source folder that holds high priority source
files, which will be used instead of reading the x4 cat/dat files.
- For use when running plugins on manually edited files.
- Not needed in general use.
- All files from the source folder will be copied into the extension.
- Defaults to None
* allow_path_error
- Bool, if True and the x4 or user folder path looks wrong, the
customizer will still attempt to run (with a warning).
- Defaults to False
Input:
* prefer_single_files
- Bool, if True then loose files will be used before those in cat/dat
files, otherwise cat/dat takes precedence.
- Only applies within a single search location, eg. within an
extension, within the source folder, or within the base X4 folder;
a loose file in the source folder will still be used over those
in the X4 folder regardless of setting.
- Defaults to False
* ignore_extensions
- Bool, if True then all extensions will be ignored, and files are
only sourced from the source_folder or x4_folder.
- Defaults to False
* extension_whitelist
- String, optional, semicolon separated list of lowercase extension
folder names to consider loading (if found and enabled).
- If not given, all extension folders are checked, except those in
the blacklist.
* extension_blacklist
- String, optional, semicolon separated list of lowercase extension
folder names to always ignore.
* allow_cat_md5_errors
- Bool, if True then when files extracted from cat/dat fail
to verify their md5 hash, no exception will be thrown.
- Defaults to False; consider setting True if needing to
unpack incorrectly assembled catalogs.
* ignore_output_extension
- Bool, if True, the target extension being generated will have
its prior content ignored (this run works on the original files,
and not those changes made last run).
- Defaults to True; should only be set False if not running
transforms and wanting to analyse prior output.
* X4_exe_name
- String, name of the X4.exe file, to be used when sourcing the file
for any exe transforms (if used), assumed to be in the x4 folder.
- Defaults to "X4.exe", but may be useful to change based on the
source exe file for transforms, eg. "X4_nonsteam.exe",
"X4_steam.exe", or similar.
- Note: the modified exe is written to the x4 folder with a ".mod.exe"
extension, and will not be removed on subsequent runs even
if they do not select any exe editing transforms. If wanting this
to work with steam, then the X4.exe may need to be replaced with
this modified exe manually.
Output:
* extension_name
- String, name of the extension being generated.
- Spaces will be replaced with underscores for the extension id.
- A lowercase version of this will be used for the output folder
name.
- Defaults to 'x4_customizer'
* output_to_user_extensions
- Bool, if True then the generated extension holding output files
will be under <path_to_user_folder/extensions>.
- Warning: any prior output on the original path will still exist,
and is not cleaned out automatically at the time of this note.
- Defaults to False, writing to <path_to_x4_folder/extensions>
* path_to_output_folder
- Optional, Path to the location to write the extension files to,
instead of the usual X4 or user documents extensions folders.
- This is the parent directory to the extension_name folder.
* output_to_catalog
- Bool, if True then the modified files will be written to a single
cat/dat pair, otherwise they are written as loose files.
- Defaults to False
* generate_sigs
- Bool, if True then dummy signature files will be created.
- Defaults to True.
* make_maximal_diffs
- Bool, if True then generated xml diff patches will do the
maximum full tree replacement instead of using the algorithm
to find and patch only edited nodes.
- Turn on to more easily view xml changes.
- Defaults to False.
* forced_xpath_attributes
- String, optional comma separate list of XML node attributes which,
if found when constructing xpaths for output diffs, will be
included in the xpath regardless of if they are needed.
- Example: "id,name" will always include "id" and "name" attributes
of elements in the xpath.
- Also supports child node or attributes referenced using a relative
xpath. Example: "parts/part/uv_animations/uv_animation" to require
a uv_animation great-great-grandchild element, or
"component/@ref" to include the "ref" attribute of a "component"
child.
- Can be used to make xpaths more specific, and more likely to break
if an unknown extension is applied before the output extension
(eg. when the customizer output is distributed to other users).
* root_file_tag
- String, extra tag added to names of modified files in the root folder
and not placed in an extension, eg. X4.exe, to avoid overwriting the
originals.
- Defaults to ".mod", eg. "X4.mod.exe".
Logging:
* live_editor_log_file_name
- String, name a json file which the live editor (tracking hand
edits in the gui) will save patches to, and reload from.
- Patches will capture any hand edits made by the user.
- File is located in the output extension folder.
- Defaults to 'live_editor_log.json'
* plugin_log_file_name
- String, name of a text file to write plugin output messages to;
content depends on plugins run.
- File is located in the output extension folder.
- Defaults to 'plugin_log.txt'
* customizer_log_file_name
- String, name a json file to write customizer log information to,
including a list of files written, information that will be loaded
on the next run to guide the file handling logic.
- File is located in the output extension folder.
- Defaults to 'customizer_log.json'
* log_source_paths
- Bool, if True then the path for any source files read will be
printed in the plugin log.
- Defaults to False
* verbose
- Bool, if True some extra status messages may be printed to the
console.
- Defaults to True
Behavior:
* show_tab_close_button
- Bool, if True then a gui tab close button will be shown.
* disable_cleanup_and_writeback
- Bool, if True then cleanup from a prior run and any final
writes will be skipped.
- For use when testing plugins without modifying files.
- Defaults to False
* skip_all_plugins
- Bool, if True all plugins will be skipped.
- For use during cleaning mode.
- Defaults to False
* developer
- Bool, if True then enable some behavior meant just for development,
such as leaving exceptions uncaught.
- Defaults to False
* profile
- Bool, if True then some extra profiling of customizer operations
is performed, and times printed. For use during development.
- Defaults to False.
* disable_threading
- Bool, if True then threads will not be used in the gui to
call scripts and plugins. Will cause the gui to lock up
during processing.
- Intended for development use, to enable breakpoints during calls.
- Defaults to False
* use_scipy_for_scaling_equations
- Bool, if True then scipy will be used to optimize scaling
equations, for smoother curves between the boundaries.
- If False or scipy is not found, then a simple linear scaling
will be used instead.
- May be unused currently.
- Defaults to True
* show_scaling_plots
- Bool, if True and matplotlib and numpy are available, any
generated scaling equations will be plotted (and their
x and y vectors printed for reference). Close the plot window
manually to continue plugin processing.
- Primarily for development use.
- May be unused currently.
- Defaults to False
'''
'''
TODO:
- This was moved to an input arg of Write_To_Extension.
* generate_content_xml
- Bool, when True a new content.xml will be generated for the
extension, overwriting any that already exists.
- Set False if wishing to reuse a custom content.xml, eg. one with
custom description. The existing file may be modified to
fill in dependencies.
- Defaults True.
'''
# TODO: language selection for modifying t files.
def __init__(self):
# Fill in initial defaults.
for field, default in self.Get_Defaults().items():
setattr(self, field, default)
# Very early call to look for a json file to overwrite detaults.
self.Load_Json()
# Flag to track if delayed init has completed.
self._init_complete = False
return
def _Verify_Init(func):
'''
Small wrapper on functions that should verify the init
check has been done before returning.
'''
'''
Note: this cannot be set as a staticmethod since that
delays its creation until after it is needed (presumably);
also, it does not take 'self' on its own, just the
wrapped func.
'''
# Use functools wraps to preserve the docstring and such.
@wraps(func)
# All wrapped functions will have self.
def func_wrapper(self, *args, **kwargs):
# Run delayed init if needed.
if not self._init_complete:
self.Delayed_Init()
# Run the func as normal.
return func(self, *args, **kwargs)
return func_wrapper
def Reset(self):
'''
Resets the settings, such that Delayed_Init will be run
again. For use when paths may be changed since a prior run.
'''
self._init_complete = False
return
def Get_Categorized_Fields(self):
'''
Returns an OrderedDict, keyed by category, with a list of fields in
their preferred display order. Parses the docstring to determine
this ordering.
'''
# TODO: maybe cache if this will be called often, but probably
# doesn't matter.
category_list_dict = OrderedDict()
category = None
# Work through the docstring.
for line in self.__doc__.splitlines():
# Category titles are single words with an ending :, no
# prefix.
strip_line = line.strip()
if strip_line.endswith(':') and strip_line[0] not in ['-','*']:
category = strip_line.replace(':','')
# Fields are recognized names after a *.
elif strip_line.startswith('*'):
field = strip_line.replace('*','').strip()
# Check that the doc term maches a local attribute.
if hasattr(self, field):
# A category should have been found at this point.
assert category != None
# Record the new category if needed.
# Note: cannot use defaultdict for this since already
# using an OrderedDict.
if category not in category_list_dict:
category_list_dict[category] = []
category_list_dict[category].append(field)
return category_list_dict
def Get_Defaults(self):
'''
Returns a dict holding fields and their default values.
Does some dynamic compute to determine default paths, so
this could potentially change across calls.
'''
defaults = {}
# For the path lookups, use os.environ to look up some windows
# path terms, but in case they aren't found just use '.' so
# this doesn't error out here.
# Add '/' after the drive letter, else it gets ignored and the path
# is treated as relative.
# TODO: some sort of smart but fast folder search.
# TODO: consider placing default settings overrides in a json file,
# that will work on all called scripts.
defaults['path_to_x4_folder'] = (Path(os.environ.get('HOMEDRIVE','.') + '/')
/ 'Steam/steamapps/common/X4 Foundations')
defaults['path_to_user_folder'] = (Path(os.environ.get('HOMEPATH','.'))
/ 'Documents/Egosoft/X4')
# If the user folder exists but has no uidata.xml, check an id folder.
# Note: while content.xml is wanted, it apparently not always
# created (maybe only made the first time a mod gets enabled/disabled
# in the menu?).
# Note: on a new profile, only uidata.xml and pipelinecache.bin
# are created, so can only rely on one of those being present.
if (defaults['path_to_user_folder'].exists()
and not (defaults['path_to_user_folder'] / 'uidata.xml').exists()):
# Iterate through all files and dirs.
for dir in defaults['path_to_user_folder'].iterdir():
# Skip non-dirs.
if not dir.is_dir():
continue
# Check for the uidata.xml.
# Probably don't need to check folder name for digits;
# common case just has one folder.
if (dir / 'uidata.xml').exists():
# Record it and stop looping.
defaults['path_to_user_folder'] = dir
break
defaults['extension_name'] = 'x4_customizer'
defaults['output_to_user_extensions'] = False
defaults['path_to_output_folder'] = None
defaults['path_to_source_folder'] = None
defaults['prefer_single_files'] = False
defaults['ignore_extensions'] = False
defaults['extension_whitelist'] = ''
defaults['extension_blacklist'] = ''
defaults['allow_cat_md5_errors'] = False
defaults['ignore_output_extension'] = True
defaults['X4_exe_name'] = 'X4.exe'
defaults['root_file_tag'] = '.mod'
defaults['make_maximal_diffs'] = False
defaults['forced_xpath_attributes'] = ''
defaults['plugin_log_file_name'] = 'plugin_log.txt'
defaults['live_editor_log_file_name'] = 'live_editor_log.json'
defaults['customizer_log_file_name'] = 'customizer_log.json'
defaults['show_tab_close_button'] = True
defaults['disable_cleanup_and_writeback'] = False
defaults['log_source_paths'] = False
defaults['skip_all_plugins'] = False
defaults['use_scipy_for_scaling_equations'] = True
defaults['show_scaling_plots'] = False
defaults['developer'] = False
defaults['profile'] = False
defaults['disable_threading'] = False
defaults['verbose'] = True
defaults['allow_path_error'] = False
defaults['output_to_catalog'] = False
defaults['generate_sigs'] = False
return defaults
def Load_Json(self):
'''
Look for a "settings.json" file in the main x4 customizer directory,
and load defaults from it.
Returns a list of field names updated.
'''
fields_updated = []
# Try the home_path and the call directory to find this.
for json_path in [Path('settings.json'), home_path / 'settings.json']:
if not json_path.exists():
continue
# If the json is malformed, json.load with toss an exception.
# In that case, just ignore it.
try:
with open(json_path, 'r') as file:
json_dict = json.load(file)
except Exception as ex:
Print(('Skipping load of "settings.json" due to {}.'
).format(type(ex).__name__))
# Don't continue; just return. Avoids repeating errors
# if the cwd is the home_path.
return fields_updated
# Do some replacements of strings for normal types;
# unfortunately json.load doesn't do this automatically.
replacements_dict = {
'true' : True,
'True' : True,
'1' : True,
'false': False,
'False': False,
'0' : False,
}
# Note: this is unsafe if a path is given that matches one
# of these strings, so only apply these replacements for
# select fields. This will be based on the defaults, and
# which are bools.
defaults = self.Get_Defaults()
for key, value in json_dict.items():
if isinstance(defaults[key], bool):
# Convert to python bools.
value = replacements_dict.get(value, value)
elif defaults[key] == None:
# Convert none to None for paths.
if value == 'none':
value = None
# Paths and string names will be left alone.
if hasattr(self, key):
# This should always be a bool or a string.
setattr(self, key, value)
fields_updated.append(key)
else:
Print(('Entry "{}" in settings.json not recognized; skipping.'
).format(key))
# Don't want to check other json files.
break
return fields_updated
def Save_Json(self, fields_to_save):
'''
Save the given settings fields to settings.json.
This should preferably only save non-default settings.
'''
json_dict = OrderedDict()
# Field replacements going to json.
replacements_dict = {
True : 'true',
False : 'false',
None : 'none',
}
# Can follow the preferred field order, for readability.
for category, field_list in self.Get_Categorized_Fields().items():
for field in field_list:
# Skip if unwanted.
if field not in fields_to_save:
continue
value = getattr(self, field)
# Get any json suitable replacements.
if value in replacements_dict:
value = replacements_dict[value]
# Stringify, in case it is a Path.
json_dict[field] = str(value)
# Always save to the home_path for now.
with open(home_path / 'settings.json', 'w') as file:
json.dump(json_dict, file, indent = 2)
return
def __call__(self, *args, **kwargs):
'''
Convenience function for applying settings by calling
the settings object with fields to set.
'''
# Ignore args; just grab kwargs.
for name, value in kwargs.items():
# Warn on unexpected names.
if not hasattr(self, name):
Print('Warning: setting "{}" not recognized'.format(name))
else:
setattr(self, name, value)
# Reset to pre-init state, so the new paths and such
# will get cleaned up and checked.
self.Reset()
return
# TODO: with _Verify_Init in place, wherever this was called
# originally may no longer need to call it.
def Delayed_Init(self):
'''
Checks the current paths for errors (not existing, etc.), converts
them to Path objects, creates the output extension folder, etc.
Raises AssertionError on any critical problem, and may raise
other exceptions on misc problems.
Sets _init_complete if no errors are found.
'''
# Limit to running just once, though this might get called
# on every plugin. Note: this flag will only get set after
# all error checks are passed.
if self._init_complete:
return
# Note: some problems can occur if the user input paths are
# weird (one guy tried a newline). This code may trigger
# other exceptions than those listed.
# Start with conversions to full Paths, since the user
# may have written these with strings.
self.path_to_x4_folder = Path(self.path_to_x4_folder).resolve()
self.path_to_user_folder = Path(self.path_to_user_folder).resolve()
if self.path_to_source_folder != None:
self.path_to_source_folder = Path(self.path_to_source_folder).resolve()
if self.path_to_output_folder != None:
self.path_to_output_folder = Path(self.path_to_output_folder).resolve()
# Verify the X4 path looks correct.
if not self.path_to_x4_folder.exists():
raise AssertionError(
'Path to the X4 folder appears to not exist.'
+'\n (x4 path: {})'.format(self.path_to_x4_folder)
)
# Check for 01.cat.
# Print a warning but continue if anything looks wrong; the user
# may wish to have this tool generate files to a separate
# directory first.
if not (self.path_to_x4_folder / '01.cat').exists():
message = ('Warning: Path to the X4 folder appears incorrect.'
'\n (x4 path: {})').format(self.path_to_x4_folder)
if self.allow_path_error:
Print(message)
else:
# Hard error.
raise AssertionError(message + '\nEnable "allow_path_error" to bypass this check.')
# Check the user folder for uidata.xml.
if not (self.path_to_user_folder / 'uidata.xml').exists():
message = ('Path to the user folder appears incorrect, lacking'
' uidata.xml.\n (path: {})').format(self.path_to_user_folder)
if self.allow_path_error:
Print(message)
else:
# Hard error.
raise AssertionError(message + '\nEnable "allow_path_error" to bypass this check.')
# Check that root_file_tag is not empty, to avoid overwrites.
if not self.root_file_tag:
raise AssertionError('Empty Settings.root_file_tag disallowed')
# Check that file names are given, and not blank.
# TODO
# If here, can continue with source file processing.
self._init_complete = True
return
def Paths_Are_Valid(self):
'''
Returns True if all paths appear to be valid and ready
for game file reading and output writing, else False.
'''
# If this makes it through Delayed_Init, either on the first
# time or because _init_complete is set, then thing should
# be good to go.
try:
self.Delayed_Init()
except Exception:
# A problem was encountered.
return False
return True
# The following functions return paths that might be unsafe
# if delayed init wasn't run yet.
@_Verify_Init
def Get_X4_Folder(self):
'Returns the path to the X4 base folder.'
return self.path_to_x4_folder
@_Verify_Init
def Get_User_Folder(self):
'Returns the path to the user folder.'
return self.path_to_user_folder
@_Verify_Init
def Get_Output_Folder(self):
'''
Returns the path to the output extension folder.
Creates it if it does not exist.
'''
# Check for an override.
if self.path_to_output_folder:
path = self.path_to_output_folder
else:
# Pick the user or x4 folder, extensions subfolder.
if self.output_to_user_extensions:
path = self.path_to_user_folder
else:
path = self.path_to_x4_folder
# Offset to the extension.
path = path / 'extensions'
# Use a lowercase name to improve portability, as it may
# be required for reliable operation on linux.
path = path / (self.extension_name.lower())
# Create the output folder if it does not exist.
if not path.exists():
# Add any needed parents as well.
path.mkdir(parents = True)
return path
@_Verify_Init
def Get_Source_Folder(self):
'Returns the path to the Source folder.'
return self.path_to_source_folder
@_Verify_Init
def Get_Plugin_Log_Path(self):
'Returns the path to the plugin log file.'
return self.Get_Output_Folder() / self.plugin_log_file_name
@_Verify_Init
def Get_Customizer_Log_Path(self):
'Returns the path to the customizer log file.'
return self.Get_Output_Folder() / self.customizer_log_file_name
@_Verify_Init
def Get_User_Content_XML_Path(self):
'Returns the path to the user content.xml file.'
return self.path_to_user_folder / 'content.xml'
@_Verify_Init
def Get_Live_Editor_Log_Path(self):
'Returns the path to the live editor log file.'
return self.Get_Output_Folder() / self.live_editor_log_file_name
# General settings object, to be referenced by any place so interested.
Settings = Settings_class()
| 40.845361 | 99 | 0.617653 |
ace2e0c9542f2d77be56381a27f07dc86bc551d9 | 1,067 | py | Python | py-recipes/functional_factorial.py | beenorgone-notebook/python-notebook | 48e42b5be70e7ca06e8440a75516c055c961371c | [
"MIT"
] | null | null | null | py-recipes/functional_factorial.py | beenorgone-notebook/python-notebook | 48e42b5be70e7ca06e8440a75516c055c961371c | [
"MIT"
] | null | null | null | py-recipes/functional_factorial.py | beenorgone-notebook/python-notebook | 48e42b5be70e7ca06e8440a75516c055c961371c | [
"MIT"
] | 1 | 2018-04-08T13:24:39.000Z | 2018-04-08T13:24:39.000Z | import functools
from operator import mul
def medium_runtime(n):
'''Run a function n times and print out medium runtime.'''
def _medium_runtime(func):
@functools.wraps(func)
def __medium_runtime(*args, **kwargs):
from timeit import default_timer as timer
start = timer()
for _ in range(n):
x = func(*args, **kwargs)
end = timer()
print("Medium Runtime Is: {}".format((end - start) / n))
return x
return __medium_runtime
return _medium_runtime
def memoize(obj):
"Apply memoization technique for obj."
cache = obj.cache = {}
def _memoize(*args, **kwargs):
arg_str = str(*args) + str(**kwargs)
if arg_str not in cache:
cache[arg_str] = obj(*args, **kwargs)
return cache[arg_str]
return _memoize
@medium_runtime(50)
@memoize
def factorial(n):
return functools.reduce(mul, range(1, n + 1), 1)
print(factorial(3000))
print(factorial(1000))
print(factorial(100))
print(factorial(202))
| 25.404762 | 68 | 0.606373 |
ace2e13c3c21e27189b8422ae74e851ce029e323 | 271 | py | Python | bfm/views/index.py | gadoor/bot-for-messenger | ecf6529a0f8358ad2747b6ed218c06063704f084 | [
"MIT"
] | null | null | null | bfm/views/index.py | gadoor/bot-for-messenger | ecf6529a0f8358ad2747b6ed218c06063704f084 | [
"MIT"
] | null | null | null | bfm/views/index.py | gadoor/bot-for-messenger | ecf6529a0f8358ad2747b6ed218c06063704f084 | [
"MIT"
] | null | null | null | """
@Author: Hizaoui Mohamed Abdelkader
@Email-1: hizaoui.ma@gmail.com
@Email-2: hizaoui.mohamed.abdelkader@gmail.com
@created on: '3/7/17'
"""
from bfm.views import CustomRequestHandler
class Index(CustomRequestHandler):
def get(self):
self.write("Index")
| 20.846154 | 46 | 0.723247 |
ace2e19d68a87b58ba6c9a89e3aa743858dcfe31 | 7,756 | py | Python | test/Base/ParseXML.py | PLOS/content-repo | dc1206a756fcbd9f971ee703ae39950b2de3e726 | [
"MIT"
] | 3 | 2017-02-17T19:15:19.000Z | 2018-10-30T20:50:32.000Z | test/Base/ParseXML.py | PLOS/content-repo | dc1206a756fcbd9f971ee703ae39950b2de3e726 | [
"MIT"
] | 38 | 2017-02-17T18:19:09.000Z | 2021-12-14T21:12:55.000Z | test/Base/ParseXML.py | PLOS/content-repo | dc1206a756fcbd9f971ee703ae39950b2de3e726 | [
"MIT"
] | 1 | 2017-02-10T00:29:55.000Z | 2017-02-10T00:29:55.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Public Library of Science
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""
Class for accessing XML data, returning a dom representation
"""
__author__ = 'jgray@plos.org'
import logging
import urllib
import xml.etree.ElementTree as ET
from .Config import repo_config
xmlurl = str(repo_config['transport']) + '://' + str(repo_config['host']) + ':' + \
str(repo_config['port']) + str(repo_config['path']) + '/objects/'
class ParseXML(object):
def get_auths(self, bucket, article):
authors = []
try:
xmlpath = xmlurl + bucket + '?key=' + article + '.XML'
article_xml = urllib.urlopen(xmlpath)
root = ET.parse(article_xml).getroot()
for author in root.findall(".//contrib[@contrib-type='author']"):
fullname = ""
fname = author.find('./name/given-names')
lname = author.find('./name/surname')
collab = author.find('./collab')
if fname is not None and lname is not None:
fullname = fname.text + ' ' + lname.text
else:
fullname = collab.text
logging.info(fullname + ' has name issue')
logging.info("Author with no first or last name: " + fullname)
authors.append(fullname)
except Exception as e:
logging.error(e)
return authors
def get_corresp_auths(self, bucket, article):
corresp_auth = []
try:
xmlpath = xmlurl + bucket + '?key=' + article + '.XML'
article_xml = urllib.urlopen(xmlpath)
root = ET.parse(article_xml).getroot()
for contrib in root.findall(".//contrib[@contrib-type='author']"):
fullname = ""
corresp = contrib.find("./xref[@ref-type='corresp']")
if corresp is not None:
fname_node = contrib.find('./name/given-names')
lname_node = contrib.find('./name/surname')
if fname_node is not None and lname_node is not None:
fullname = fname_node.text + ' ' + lname_node.text
else:
logging.info(corresp + ' has name issue')
fullname = contrib.find('./collab')
logging.info("Author with no first or last name: " + fullname)
corresp_auth.append(fullname)
except Exception as e:
logging.error(e)
return corresp_auth
def get_cocontributing_auths(self, bucket, article):
cocontrib_auth = []
try:
xmlpath = xmlurl + bucket + '?key=' + article + '.XML'
article_xml = urllib.urlopen(xmlpath)
root = ET.parse(article_xml).getroot()
for contrib in root.findall(".//contrib[@equal-contrib='yes']"):
fullname = ""
if contrib is not None:
fname_node = contrib.find('./name/given-names')
lname_node = contrib.find('./name/surname')
if fname_node is not None and lname_node is not None:
fullname = fname_node.text + ' ' + lname_node.text
else:
fullname = contrib.find('./collab')
logging.info("Author with no first or last name: " + fullname)
logging.info(contrib + ' has name issue')
cocontrib_auth.append(fullname)
except Exception as e:
logging.error(e)
return cocontrib_auth
def get_customfootnote_auths(self, bucket, article):
customfootnote_auth = []
try:
xmlpath = xmlurl + bucket + '?key=' + article + '.XML'
article_xml = urllib.urlopen(xmlpath)
root = ET.parse(article_xml).getroot()
for contrib in root.findall(".//contrib[@contrib-type='author']"):
fullname = ""
customfootnote = contrib.find("./xref[@ref-type='fn']")
if customfootnote is not None:
fname_node = contrib.find('./name/given-names')
lname_node = contrib.find('./name/surname')
if fname_node is not None and lname_node is not None:
fullname = fname_node.text + ' ' + lname_node.text
else:
fullname = contrib.find('./collab')
logging.info("Author with no first or last name: " + fullname)
logging.info(customfootnote + ' has name issue')
customfootnote_auth.append(fullname)
except Exception as e:
logging.error(e)
return customfootnote_auth
def get_article_sections(self, bucket, article):
article_sections = []
patient_summary = False
try:
xmlpath = xmlurl + bucket + '?key=' + article + '.XML'
article_xml = urllib.urlopen(xmlpath)
root = ET.parse(article_xml).getroot()
for abstract in root.findall(".//front/article-meta/abstract"):
if not abstract.attrib:
article_sections.append('Abstract')
else:
if str(abstract.attrib['abstract-type']):
if str(abstract.attrib['abstract-type']) == 'toc':
continue
else:
if str(abstract.attrib['abstract-type']) == 'patient':
patient_summary = True
else:
article_sections.append(abstract.find("./title").text)
for section in root.findall(".//body/sec"):
title = section.find("./title")
# logging.info(str(title.text))
if str(title.text) == "None":
continue
else:
article_sections.append(title.text)
if root.findall(".//back/ack"):
article_sections.append('Acknowledgments')
if root.findall(".//front/article-meta/author-notes/fn[@fn-type='con']"):
article_sections.append('Author Contributions')
for refs in root.findall(".//back/ref-list"):
title = refs.find("./title")
article_sections.append(title.text)
if patient_summary:
article_sections.append('Patient Summary')
except Exception as e:
logging.error(e)
return (article_sections)
| 43.819209 | 86 | 0.556343 |
ace2e3166933e7e50991f17e75344772ee19d524 | 1,296 | py | Python | Advent-Of-Code-2016/html_scraper.py | adriano-arce/Interview-Problems | a29767ba9ececfe1209fd6cc2153eb342d57fc23 | [
"MIT"
] | 1 | 2015-10-16T17:35:12.000Z | 2015-10-16T17:35:12.000Z | Advent-Of-Code-2016/html_scraper.py | adriano-arce/Interview-Problems | a29767ba9ececfe1209fd6cc2153eb342d57fc23 | [
"MIT"
] | null | null | null | Advent-Of-Code-2016/html_scraper.py | adriano-arce/Interview-Problems | a29767ba9ececfe1209fd6cc2153eb342d57fc23 | [
"MIT"
] | null | null | null | from pyquery import PyQuery
from textwrap import wrap
source = 'http://adventofcode.com/2016/day/10'
d = PyQuery(source)
for tag in ('code', 'em'):
for node in d(tag).items():
node.replace_with(node.text())
for node in d('p').items():
content = node.text()
if node.next() and node.next()[0].tag != 'ul':
content += '\n'
node.replace_with(content)
for node in d('li').items():
node.replace_with('- ' + node.text())
for node in d('ul').items():
node.replace_with(node.text() + '\n')
for node in d('h2').items():
header = node.text()[4:-4]
node.replace_with(header + '\n' + ('=' * len(header)) + '\n\n')
for node in d('pre').items():
content = '\n'.join(' ' + row for row in node.text().split('\n'))
node.replace_with(content + '\n')
final = []
margin = 80
for row in d('article').text().split('\n'):
if row == '':
final.append(row)
elif row.startswith('- '):
sub_rows = wrap(row, margin - 2)
for i in range(1, len(sub_rows)):
sub_rows[i] = ' ' + sub_rows[i]
final.extend(sub_rows)
else:
final.extend(wrap(row, margin))
final.append('')
final.append('Part Two')
final.append('========')
final.append('')
final.append('Source: %s\n' % source)
print('\n'.join(final))
| 25.92 | 72 | 0.574074 |
ace2e35e5bef48e095664fdc34c45d57ccae87e4 | 3,062 | py | Python | scrapeTileColors.py | ThomasSelvig/Python-Minecraft-DJ | 9dc998651600637547f1689dd2e8deea12be9315 | [
"MIT"
] | 1 | 2020-08-18T23:22:28.000Z | 2020-08-18T23:22:28.000Z | scrapeTileColors.py | ThomasSelvig/Python-Minecraft-DJ | 9dc998651600637547f1689dd2e8deea12be9315 | [
"MIT"
] | null | null | null | scrapeTileColors.py | ThomasSelvig/Python-Minecraft-DJ | 9dc998651600637547f1689dd2e8deea12be9315 | [
"MIT"
] | null | null | null | from PIL import Image
import json, os
aliases = {
"bone_block_side": "bone_block",
"hay_block_side": "hay_block",
"quartz_block_side": "quartz_block",
"melon_side": "melon"
}
whitelist = [
"log",
"planks",
"concrete",
"terracotta",
"wool",
"wood",
"ice",
"bricks",
"clay",
"stone",
"dirt",
"andesite",
"granite",
"block",
"sponge",
"melon",
"prismarine",
"quartz",
"bedrock"
]
blacklist = [
"frosted",
"mcmeta",
"door",
"grass_block",
"stairs",
"slab",
"stage",
"dust",
"glazed",
"command",
"powder",
"coral",
"grindstone",
"honey",
"mushroom",
"_bottom",
"_top",
"_on",
"torch",
"stonecutter",
"structure",
"glowstone",
"campfire",
"wall",
"stem",
"quartz_ore",
"birch"
]
path = f"{os.path.dirname(os.path.abspath(__file__))}/1.15.2 assets/minecraft/textures/block"
approved = []
# approval process
for file in os.listdir(path):
blacklisted = any([term in file for term in blacklist])
whitelisted = any([term in file for term in whitelist])
if whitelisted and not blacklisted:
approved.append(file)
colors = {}
for file in approved:
im = Image.open(path+"/"+file)
px = im.load()
# if not grayscale
if type(px[0, 0]) != int and len(px[0, 0]) >= 3:
r, g, b = 0, 0, 0
for y in range(im.size[1]):
for x in range(im.size[0]):
p = px[x, y]
r += p[0]
g += p[1]
b += p[2]
pxCount = im.size[0]*im.size[1]
name = file.replace(".png", "")
if name in aliases:
name = aliases[name]
colors[name] = (int(r/pxCount), int(g/pxCount), int(b/pxCount))
with open(f"{os.path.dirname(os.path.abspath(__file__))}/tileColors.json", "w+", encoding="utf8") as fs:
json.dump(colors, fs, indent=4)
print(len(colors)) # 102
class DEPRECATED:
"""
This is a totally unfeacible way to look up colors
i don't even know why i thought this would be a good idea
results generated from this class are stored in allColors.txt which i did you the favor of adding to .gitignore
"""
class StoreAllTileMaps:
# this can be optimized by skipping half of all colors: won't make much difference
def run():
print(time.asctime())
with open(PATH+"/allColors.txt", "a+") as fs:
with Pool(10) as p: # multiprocessing
labelList = p.map(StoreAllTileMaps.getBlockUsingMP, StoreAllTileMaps.getCol())
print(time.asctime())
last = None
for color, label in zip(StoreAllTileMaps.getCol(), labelList):
if label != last: # if the label changes, write the new thing to file
# store hex values
r, g, b = tuple(map(lambda i: hex(i)[2:], color))
# format hex values: 6 -> 06, cd -> cd, f -> 0f
r, g, b = tuple(map(lambda i: "0"+i if len(i) < 2 else i, (r, g, b)))
fs.write(f"{r}{g}{b}:{label}" + "\n")
last = label
def getBlockUsingMP(rgb):
return PixelArt.getBlock(*rgb)
def getCol():
for r in range(256):
for g in range(256):
for b in range(256):
yield r, g, b | 22.188406 | 113 | 0.596669 |
ace2e591e02982f38b230b2091cf044cabaa39ed | 9,734 | py | Python | lektor/packages.py | Sapphire64/lektor | 097bb899fc551e5312a89071136cd5507acdac17 | [
"BSD-3-Clause"
] | null | null | null | lektor/packages.py | Sapphire64/lektor | 097bb899fc551e5312a89071136cd5507acdac17 | [
"BSD-3-Clause"
] | null | null | null | lektor/packages.py | Sapphire64/lektor | 097bb899fc551e5312a89071136cd5507acdac17 | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
import site
import errno
import click
import shutil
import tempfile
import requests
import pkg_resources
from subprocess import PIPE
from .utils import portable_popen
class PackageException(Exception):
pass
def _get_package_version_from_project(cfg, name):
choices = (name.lower(), 'lektor-' + name.lower())
for pkg, version in cfg.section_as_dict('packages').iteritems():
if pkg.lower() in choices:
return {
'name': pkg,
'version': version
}
def add_package_to_project(project, req):
"""Given a pacakge requirement this returns the information about this
plugin.
"""
if '@' in req:
name, version = req.split('@', 1)
version_hint = version
else:
name = req
version = None
version_hint = 'latest release'
cfg = project.open_config()
info = _get_package_version_from_project(cfg, name)
if info is not None:
raise RuntimeError('The package was already added to the project.')
for choice in name, 'lektor-' + name:
rv = requests.get('https://pypi.python.org/pypi/%s/json' % choice)
if rv.status_code != 200:
continue
data = rv.json()
canonical_name = data['info']['name']
if version is None:
version = data['info']['version']
version_info = data['releases'].get(version)
if version_info is None:
raise RuntimeError('Latest requested version (%s) could not '
'be found' % version_hint)
cfg['packages.%s' % canonical_name] = version
cfg.save()
return {
'name': canonical_name,
'version': version
}
raise RuntimeError('The package could not be found on PyPI')
def remove_package_from_project(project, name):
cfg = project.open_config()
choices = (name.lower(), 'lektor-' + name.lower())
for pkg, version in cfg.section_as_dict('packages').iteritems():
if pkg.lower() in choices:
del cfg['packages.%s' % pkg]
cfg.save()
return {
'name': pkg,
'version': version
}
def download_and_install_package(package_root, package=None, version=None,
requirements_file=None):
"""This downloads and installs a specific version of a package."""
# XXX: windows
env = dict(os.environ)
args = [
sys.executable,
'-m', 'pip', 'install', '--target', package_root,
]
if package is not None:
args.append('%s%s%s' % (package, version and '==' or '',
version or ''))
if requirements_file is not None:
args.extend(('-r', requirements_file))
rv = portable_popen(args, env=env).wait()
if rv != 0:
raise RuntimeError('Failed to install dependency package.')
def install_local_package(package_root, path):
"""This installs a local dependency of a package."""
# XXX: windows
env = dict(os.environ)
env['PYTHONPATH'] = package_root
# Step 1: generate egg info and link us into the target folder.
rv = portable_popen([
sys.executable,
'-m', 'pip',
'install', '--editable', path,
'--install-option=--install-dir=%s' % package_root,
'--no-deps'
], env=env).wait()
if rv != 0:
raise RuntimeError('Failed to install local package')
# Step 2: generate the egg info into a temp folder to find the
# requirements.
tmp = tempfile.mkdtemp()
try:
rv = portable_popen([
sys.executable,
'setup.py', '--quiet', 'egg_info', '--quiet',
'--egg-base', tmp
], cwd=path).wait()
dirs = os.listdir(tmp)
if rv != 0 or len(dirs) != 1:
raise RuntimeError('Failed to create egg info for local package.')
requires = os.path.join(tmp, dirs[0], 'requires.txt')
# We have dependencies, install them!
if os.path.isfile(requires):
download_and_install_package(package_root,
requirements_file=requires)
finally:
shutil.rmtree(tmp)
def get_package_info(path):
"""Returns the name of a package at a path."""
rv = portable_popen([
sys.executable,
'setup.py', '--quiet', '--name', '--author', '--author-email',
'--license', '--url',
], cwd=path, stdout=PIPE).communicate()[0].splitlines()
def _process(value):
value = value.strip()
if value == 'UNKNOWN':
return None
return value.decode('utf-8', 'replace')
return {
'name': _process(rv[0]),
'author': _process(rv[1]),
'author_email': _process(rv[2]),
'license': _process(rv[3]),
'url': _process(rv[4]),
'path': path,
}
def register_package(path):
"""Registers the plugin at the given path."""
portable_popen([
sys.executable,
'setup.py', 'register'
], cwd=path).wait()
def publish_package(path):
"""Registers the plugin at the given path."""
portable_popen([
sys.executable,
'setup.py', 'sdist', 'bdist_wheel', 'upload'
], cwd=path).wait()
def load_manifest(filename):
rv = {}
try:
with open(filename) as f:
for line in f:
if line[:1] == '@':
rv[line.strip()] = None
continue
line = line.strip().split('=', 1)
if len(line) == 2:
key = line[0].strip()
value = line[1].strip()
rv[key] = value
except IOError as e:
if e.errno != errno.ENOENT:
raise
return rv
def write_manifest(filename, packages):
with open(filename, 'w') as f:
for package, version in sorted(packages.items()):
if package[:1] == '@':
f.write('%s\n' % package)
else:
f.write('%s=%s\n' % (package, version))
def list_local_packages(path):
"""Lists all local packages below a path that could be installed."""
rv = []
try:
for filename in os.listdir(path):
if os.path.isfile(os.path.join(path, filename, 'setup.py')):
rv.append('@' + filename)
except OSError:
pass
return rv
def update_cache(package_root, remote_packages, local_package_path,
refresh=False):
"""Updates the package cache at package_root for the given dictionary
of packages as well as packages in the given local package path.
"""
requires_wipe = False
if refresh:
click.echo('Force package cache refresh.')
requires_wipe = True
manifest_file = os.path.join(package_root, 'lektor-packages.manifest')
local_packages = list_local_packages(local_package_path)
old_manifest = load_manifest(manifest_file)
to_install = []
all_packages = dict(remote_packages)
all_packages.update((x, None) for x in local_packages)
# step 1: figure out which remote packages to install.
for package, version in remote_packages.iteritems():
old_version = old_manifest.pop(package, None)
if old_version is None:
to_install.append((package, version))
elif old_version != version:
requires_wipe = True
# step 2: figure out which local packages to install
for package in local_packages:
if old_manifest.pop(package, False) is False:
to_install.append((package, None))
# Bad news, we need to wipe everything
if requires_wipe or old_manifest:
try:
shutil.rmtree(package_root)
except OSError:
pass
to_install = all_packages.items()
if to_install:
click.echo('Updating packages in %s for project' % package_root)
try:
os.makedirs(package_root)
except OSError:
pass
for package, version in to_install:
if package[:1] == '@':
install_local_package(package_root,
os.path.join(local_package_path, package[1:]))
else:
download_and_install_package(package_root, package, version)
write_manifest(manifest_file, all_packages)
def add_site(path):
"""This adds a path to as proper site packages to all associated import
systems. Primarily it invokes `site.addsitedir` and also configures
pkg_resources' metadata accordingly.
"""
site.addsitedir(path)
ws = pkg_resources.working_set
ws.entry_keys.setdefault(path, [])
ws.entries.append(path)
for dist in pkg_resources.find_distributions(path, False):
ws.add(dist, path, insert=True)
def load_packages(env, reinstall=False):
"""This loads all the packages of a project. What this does is updating
the current cache in ``root/package-cache`` and then add the Python
modules there to the load path as a site directory and register it
appropriately with pkg_resource's workingset.
Afterwards all entry points should function as expected and imports
should be possible.
"""
config = env.load_config()
package_root = env.project.get_package_cache_path()
update_cache(package_root, config['PACKAGES'],
os.path.join(env.root_path, 'packages'),
refresh=reinstall)
add_site(package_root)
def wipe_package_cache(env):
"""Wipes the entire package cache."""
package_root = env.project.get_package_cache_path()
try:
shutil.rmtree(package_root)
except (OSError, IOError):
pass
| 31 | 78 | 0.595439 |
ace2e65172275a29c93d754cf3ba93cd7d9d5443 | 21,399 | py | Python | results-processor/wptreport.py | machogallo/wpt.fyi | 1068a6b1397a0aa74ecbf9d7ce697d2194304fa5 | [
"BSD-3-Clause"
] | null | null | null | results-processor/wptreport.py | machogallo/wpt.fyi | 1068a6b1397a0aa74ecbf9d7ce697d2194304fa5 | [
"BSD-3-Clause"
] | null | null | null | results-processor/wptreport.py | machogallo/wpt.fyi | 1068a6b1397a0aa74ecbf9d7ce697d2194304fa5 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2018 The WPT Dashboard Project. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import gzip
import hashlib
import io
import json
import logging
import os
import re
import tempfile
from datetime import datetime, timezone
from typing import (
Any,
Callable,
Dict,
IO,
Iterator,
List,
Optional,
Set,
Union,
cast,
)
import requests
from mypy_extensions import TypedDict
import config
DEFAULT_PROJECT = 'wptdashboard'
CHANNEL_TO_LABEL = {
'release': 'stable',
'stable': 'stable',
'beta': 'beta',
'dev': 'experimental',
'experimental': 'experimental',
'nightly': 'experimental',
'preview': 'experimental',
}
# Ignore inconsistent browser minor versions for now.
# TODO(Hexcles): Remove this when the TC decision task is implemented.
IGNORED_CONFLICTS = {'browser_build_id', 'browser_changeset'}
_log = logging.getLogger(__name__)
class RunInfo(TypedDict, total=False):
product: str
browser_version: str
browser_channel: str
revision: str
os: str
os_version: str
class RawWPTReport(TypedDict, total=False):
results: List[Dict]
run_info: RunInfo
time_start: float
time_end: float
class WPTReportError(Exception):
"""Base class for all input-related exceptions."""
def __init__(self, message: str,
path: Optional[Union[str, List[str]]] = None) -> None:
self.message = message
self.path = path
def __str__(self):
message = self.message
if self.path:
message += " (%s)" % self.path
return message
class InvalidJSONError(WPTReportError):
def __init__(self) -> None:
super(InvalidJSONError, self).__init__("Invalid JSON")
class MissingMetadataError(WPTReportError):
def __init__(self, key: str) -> None:
super(MissingMetadataError, self).__init__(
"Missing required metadata '%s'" %
(key,)
)
class InsufficientDataError(WPTReportError):
def __init__(self) -> None:
super(InsufficientDataError, self).__init__("Missing 'results' field")
class ConflictingDataError(WPTReportError):
def __init__(self, key: str) -> None:
super(ConflictingDataError, self).__init__(
"Conflicting '%s' found in the merged report" % (key,)
)
class BufferedHashsum(object):
"""A simple buffered hash calculator."""
def __init__(self,
hash_ctor: Callable = hashlib.sha1,
block_size: int = 1024*1024) -> None:
assert block_size > 0
self._hash = hash_ctor()
self._block_size = block_size
def hash_file(self, fileobj: IO[bytes]) -> None:
"""Updates the hashsum from a given file.
Calling this method on multiple files is equivalent to computing the
hash of all the files concatenated together.
Args:
fileobj: A file object to hash (must be in binary mode).
Returns:
A string, the hexadecimal digest of the file.
"""
assert not isinstance(fileobj, io.TextIOBase)
buf = fileobj.read(self._block_size)
while len(buf) > 0:
self._hash.update(buf)
buf = fileobj.read(self._block_size)
def hashsum(self) -> str:
"""Returns the hexadecimal digest of the current hash."""
return cast(str, self._hash.hexdigest())
class WPTReport(object):
"""An abstraction of wptreport.json with some transformation features."""
def __init__(self) -> None:
self._hash = BufferedHashsum()
self._report: RawWPTReport = {
'results': [],
'run_info': {},
}
self._summary: Dict[str, List[int]] = {}
def _add_chunk(self, chunk: RawWPTReport) -> None:
self._report['results'].extend(chunk['results'])
def update_property(key: str, source: Dict, target: Dict,
conflict_func: Optional[Callable] = None) -> None:
"""Updates target[key] if source[key] is set.
If target[key] is already set, use conflict_func to resolve the
conflict or raise an exception if conflict_func is None.
"""
if key not in source:
return
if key in target and source[key] != target[key]:
if conflict_func:
target[key] = conflict_func(source[key], target[key])
else:
raise ConflictingDataError(key)
else:
target[key] = source[key]
if 'run_info' in chunk:
def ignore_conflict(a, b):
return a if a == b else None
for key in chunk['run_info']:
update_property(
key,
cast(Dict, chunk['run_info']),
cast(Dict, self._report['run_info']),
ignore_conflict if key in IGNORED_CONFLICTS else None,
)
update_property(
'time_start', cast(Dict, chunk), cast(Dict, self._report), min)
update_property(
'time_end', cast(Dict, chunk), cast(Dict, self._report), max)
def load_file(self, filename: str) -> None:
"""Loads wptreport from a local path.
Args:
filename: Filename of the screenshots database (the file can be
gzipped if the extension is ".gz").
"""
with open(filename, mode='rb') as f:
if filename.endswith('.gz'):
self.load_gzip_json(f)
else:
self.load_json(f)
def load_json(self, fileobj: IO[bytes]) -> None:
"""Loads wptreport from a JSON file.
This method can be called multiple times to load and merge new chunks.
Args:
fileobj: A JSON file object (must be in binary mode).
Raises:
InsufficientDataError if the file does not contain a results field;
ConflictingDataError if the current file contains information
conflicting with existing data (from previous files).
"""
assert not isinstance(fileobj, io.TextIOBase)
self._hash.hash_file(fileobj)
fileobj.seek(0)
# JSON files are always encoded in UTF-8 (RFC 8529).
with io.TextIOWrapper(fileobj, encoding='utf-8') as text_file:
try:
report = json.load(text_file, strict=False)
except json.JSONDecodeError as e:
raise InvalidJSONError from e
# Raise when 'results' is either not found or empty.
if 'results' not in report:
raise InsufficientDataError
self._add_chunk(report)
def load_gzip_json(self, fileobj: IO[bytes]) -> None:
"""Loads wptreport from a gzipped JSON file.
Args:
fileobj: A gzip file object.
"""
# Gzip is always opened in binary mode (in fact, r == rb for gzip).
with gzip.GzipFile(fileobj=fileobj, mode='rb') as gzip_file:
self.load_json(cast(IO[bytes], gzip_file))
def update_metadata(self, revision: str = '',
browser_name: str = '', browser_version: str = '',
os_name: str = '', os_version: str = '') -> None:
"""Overwrites metadata of the report."""
# Unfortunately, the names of the keys don't exactly match.
if revision:
self._report['run_info']['revision'] = revision
if browser_name:
self._report['run_info']['product'] = browser_name
if browser_version:
self._report['run_info']['browser_version'] = browser_version
if os_name:
self._report['run_info']['os'] = os_name
if os_version:
self._report['run_info']['os_version'] = os_version
@staticmethod
def write_json(fileobj: IO[bytes], payload: Any) -> None:
"""Encode an object to JSON and writes it to disk.
Args:
fileobj: A file object to write to.
payload: An object that can be JSON encoded.
"""
# json.dump only produces ASCII characters by default.
if isinstance(fileobj, io.TextIOBase):
json.dump(payload, fileobj)
else:
with io.TextIOWrapper(fileobj, encoding='ascii') as text_file:
json.dump(payload, text_file)
@staticmethod
def write_gzip_json(filepath: str, payload: Any) -> None:
"""Encode an object to JSON and writes it to disk.
Args:
filepath: A file path to write to. All intermediate directories
in the path will be automatically created.
payload: An object that can be JSON encoded.
"""
if os.path.dirname(filepath):
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, 'wb') as f:
with gzip.GzipFile(fileobj=f, mode='wb') as gz:
WPTReport.write_json(cast(IO[bytes], gz), payload)
@property
def results(self) -> List[Dict]:
"""The 'results' field of the report."""
return self._report['results']
@property
def run_info(self) -> RunInfo:
"""The 'run_info' field of the report."""
return self._report['run_info']
def hashsum(self) -> str:
"""Hex checksum of the decompressed, concatenated report."""
return self._hash.hashsum()
def summarize(self) -> Dict[str, List[int]]:
"""Creates a summary of all the test results.
The summary will be cached after the first call to this method.
Returns:
A summary dictionary.
Raises:
ConflictingDataError if a test appears multiple times in results.
MissingMetadataError if any required metadata is missing.
"""
if self._summary:
return self._summary
for result in self.results:
test_file = result['test'].strip()
if test_file in self._summary:
raise ConflictingDataError(test_file)
if result['status'] in ('OK', 'PASS'):
self._summary[test_file] = [1, 1]
else:
self._summary[test_file] = [0, 1]
for subtest in result['subtests']:
if subtest['status'] == 'PASS':
self._summary[test_file][0] += 1
self._summary[test_file][1] += 1
return self._summary
def each_result(self) -> Iterator[Any]:
"""Iterates over all the individual test results.
Returns:
A generator.
"""
return (result for result in self.results)
def write_summary(self, filepath: str) -> None:
"""Writes the summary JSON file to disk.
Args:
filepath: A file path to write to.
"""
self.write_gzip_json(filepath, self.summarize())
def write_result_directory(self, directory: str) -> None:
"""Writes individual test results to a directory.
Args:
directory: The base directory to write to.
"""
if directory.endswith('/'):
directory = directory[:-1]
for result in self.each_result():
test_file = result['test'].strip()
assert test_file.startswith('/')
filepath = directory + test_file
self.write_gzip_json(filepath, result)
def product_id(self, separator: str = '-', sanitize: bool = False) -> str:
"""Returns an ID string for the product configuration.
Args:
separator: A character to separate fields in the ID string.
sanitize: Whether to sanitize (replace them with underscores)
characters in the product ID that are not URL-safe.
Returns:
A string, the product ID of this run.
"""
name = separator.join([self.run_info['product'],
self.run_info['browser_version'],
self.run_info['os']])
# os_version isn't required.
if self.run_info.get('os_version'):
name += separator + self.run_info['os_version']
hashsum = self.hashsum()
assert len(hashsum) > 0, 'Missing hashsum of the report'
name += separator + hashsum[:10]
if sanitize:
name = re.sub('[^A-Za-z0-9._-]', '_', name)
return name
def populate_upload_directory(self,
output_dir: Optional[str] = None) -> str:
"""Populates a directory suitable for uploading to GCS.
The directory structure is as follows:
[output_dir]:
- [sha][:10]:
- [product]-summary.json.gz
- [product]:
- (per-test results produced by write_result_directory)
Args:
output_dir: A given output directory instead of a temporary one.
Returns:
The output directory.
"""
if not output_dir:
output_dir = tempfile.mkdtemp()
self.write_summary(os.path.join(output_dir, self.sha_summary_path))
self.write_result_directory(
os.path.join(output_dir, self.sha_product_path))
return output_dir
@property
def sha_product_path(self) -> str:
"""A relative path: sha/product_id"""
try:
return os.path.join(self.run_info['revision'],
self.product_id(separator='-', sanitize=True))
except KeyError as e:
# str(e) gives the name of the key.
raise MissingMetadataError(str(e)) from e
@property
def sha_summary_path(self) -> str:
"""A relative path: sha/product_id-summary.json.gz"""
return self.sha_product_path + '-summary.json.gz'
@property
def test_run_metadata(self) -> Dict[str, str]:
"""Returns a dict of metadata.
The dict can be used as the payload for the test run creation API.
Raises:
MissingMetadataError if any required metadata is missing.
"""
# Required fields:
try:
payload = {
'browser_name': self.run_info['product'],
'browser_version': self.run_info['browser_version'],
'os_name': self.run_info['os'],
'revision': self.run_info['revision'][:10],
'full_revision_hash': self.run_info['revision'],
}
except KeyError as e:
# str(e) gives the name of the key.
raise MissingMetadataError(str(e)) from e
# Optional fields:
if self.run_info.get('os_version'):
payload['os_version'] = self.run_info['os_version']
def microseconds_to_iso(ms_since_epoch: float) -> str:
dt = datetime.fromtimestamp(ms_since_epoch / 1000, timezone.utc)
return dt.isoformat()
if self._report.get('time_start'):
payload['time_start'] = microseconds_to_iso(
self._report['time_start'])
if self._report.get('time_end'):
payload['time_end'] = microseconds_to_iso(
self._report['time_end'])
return payload
def normalize_version(self) -> None:
m = re.match(r'Technology Preview \(Release (\d+), (.*)\)',
self.run_info.get('browser_version', ''))
if m:
self.run_info['browser_version'] = m.group(1) + ' preview'
def finalize(self):
"""Checks and finalizes the report.
Populates all in-memory states (summary & metadata) and raises
exceptions if any check fails.
Raises:
Exceptions inherited from WPTReportError.
"""
self.summarize()
# Additonal final fixup:
self.normalize_version()
# Access two property methods which will raise exceptions if any
# required field is missing.
self.sha_product_path
self.test_run_metadata
def serialize_gzip(self, filepath):
"""Serializes and gzips the in-memory report to a file.
Args:
filepath: A file path to write to.
"""
self.write_gzip_json(filepath, self._report)
def prepare_labels(report: WPTReport,
labels_str: str,
uploader: str) -> Set[str]:
"""Prepares the list of labels for a test run.
The following labels will be automatically added:
* The name of the uploader
* The name of the browser
* The release channel of the browser (if the uploader doesn't provide one)
Args:
report: A WPTReport.
labels_str: A comma-separated string of labels from the uploader.
uploader: The name of the uploader.
Returns:
A set of strings.
"""
labels = set()
labels.add(report.run_info['product'])
labels.add(uploader)
# Empty labels may be generated here, but they will be removed later.
for label in labels_str.split(','):
labels.add(label.strip())
# Add the release channel label.
if report.run_info.get('browser_channel'):
labels.add(report.run_info['browser_channel'])
if report.run_info['browser_channel'] in CHANNEL_TO_LABEL:
labels.add(CHANNEL_TO_LABEL[report.run_info['browser_channel']])
elif not any([i in labels for i in set(CHANNEL_TO_LABEL.values())]):
# Default to "stable".
labels.add('stable')
# Remove any empty labels.
if '' in labels:
labels.remove('')
return labels
def normalize_product(report: WPTReport) -> Set[str]:
"""Normalizes the product identifier in the report.
In addition to modifying the 'product' of the report, this function also
returns a set of labels that need to be added.
Args:
report: A WPTReport
Returns:
A set of strings.
"""
product = report.run_info['product']
if product == 'edge_webdriver':
report.run_info['product'] = 'edge'
return {'edge', 'webdriver', 'edge_webdriver'}
elif product == 'edgechromium':
report.run_info['product'] = 'edge'
return {'edge', 'edgechromium'}
elif product == 'webkitgtk_minibrowser':
report.run_info['product'] = 'webkitgtk'
return {'webkitgtk', 'minibrowser'}
else:
return set()
def create_test_run(report, run_id, labels_str, uploader, auth,
results_url, raw_results_url, callback_url=None):
"""Creates a TestRun on the dashboard.
By posting to the /api/results/create endpoint.
Args:
report: A WPTReport.
run_id: The pre-allocated Datastore ID for this run.
labels_str: A comma-separated string of labels from the uploader.
uploader: The name of the uploader.
auth: A (username, password) tuple for HTTP basic auth.
results_url: URL of the gzipped summary file. (e.g.
'https://.../wptd/0123456789/chrome-62.0-linux-summary.json.gz')
raw_results_url: URL of the raw full report. (e.g.
'https://.../wptd-results/[FullSHA]/chrome-62.0-linux/report.json')
Returns:
The integral ID associated with the created test run.
"""
if callback_url is None:
callback_url = config.project_baseurl() + '/api/results/create'
labels = prepare_labels(report, labels_str, uploader)
assert len(labels) > 0
labels |= normalize_product(report)
payload = report.test_run_metadata
if int(run_id) != 0:
payload['id'] = int(run_id)
payload['results_url'] = results_url
payload['raw_results_url'] = raw_results_url
payload['labels'] = sorted(labels)
response = requests.post(callback_url, auth=auth, json=payload)
response.raise_for_status()
response_data = response.json()
return response_data['id']
def main() -> None:
parser = argparse.ArgumentParser(
description='Parse and transform JSON wptreport.')
parser.add_argument('report', metavar='REPORT', type=str, nargs='+',
help='path to a JSON wptreport (gzipped files are '
'supported as long as the extension is .gz)')
parser.add_argument('--summary', type=str,
help='if specified, write a gzipped JSON summary to '
'this file path')
parser.add_argument('--output-dir', type=str,
help='if specified, write both the summary and '
'per-test results (all gzipped) to OUTPUT_DIR/SHA/ ,'
'suitable for uploading to GCS (please use an '
'empty directory)')
args = parser.parse_args()
report = WPTReport()
for r in args.report:
with open(r, 'rb') as f:
if r.endswith('.gz'):
report.load_gzip_json(f)
else:
report.load_json(f)
if args.summary:
report.write_summary(args.summary)
if args.output_dir:
upload_dir = report.populate_upload_directory(
output_dir=args.output_dir)
_log.info('Populated: %s', upload_dir)
if __name__ == '__main__':
_log.setLevel(logging.INFO)
main()
| 33.383775 | 79 | 0.59428 |
ace2e7514f49167e93ab2d8b48e87109ce6cbc23 | 39,483 | py | Python | planning_and_simulation_modules/Step0_dialog.py | Planheat/Planheat-Tool | 9764fcb86d3898b232c4cc333dab75ebe41cd421 | [
"MIT"
] | 2 | 2020-04-07T03:43:33.000Z | 2021-03-23T13:17:42.000Z | planning_and_simulation_modules/Step0_dialog.py | Planheat/Planheat-Tool | 9764fcb86d3898b232c4cc333dab75ebe41cd421 | [
"MIT"
] | 1 | 2020-07-20T09:56:13.000Z | 2020-07-22T10:26:06.000Z | planning_and_simulation_modules/Step0_dialog.py | Planheat/Planheat-Tool | 9764fcb86d3898b232c4cc333dab75ebe41cd421 | [
"MIT"
] | 1 | 2020-07-20T09:40:15.000Z | 2020-07-20T09:40:15.000Z | import os
import os.path
import csv
import pickle
import processing
from .Tjulia.test.MyLog import MyLog
import geopandas as gpd
import osmnx as ox
from PyQt5 import uic, QtWidgets
from PyQt5 import QtCore
from PyQt5.QtCore import pyqtSignal, QVariant
from PyQt5.QtWidgets import QFileDialog, QTreeWidgetItem, QTableWidget, QTableWidgetItem
from PyQt5.QtGui import QPixmap, QIcon
from PyQt5.QtWidgets import QMessageBox
from qgis.core import (QgsProject, QgsVectorLayer, QgsField, QgsRasterLayer,
QgsFeature, QgsVertexId, QgsMultiPoint, QgsGeometry, QgsCoordinateTransform)
from .dhcoptimizerplanheat.streets_downloader import streetsDownloader
from .layer_utils import load_file_as_layer, load_open_street_maps
from .dialogSources import CheckSourceDialog
from .utility.SourceAvailability import SourceAvailability
from .utility.SourceAvailabilityPostCalculation import SourceAvailabilityPostCalculation
from .utility.data_manager.DataTransfer import DataTransfer
from .city.src.FileManager import FileManager
from . import master_planning_config as mp_config
FORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), 'ui', 'Step0dockwidget.ui'))
class Step0Dialog(QtWidgets.QDockWidget, FORM_CLASS):
district_shp_loaded = pyqtSignal()
buildings_shp_loaded = pyqtSignal()
buildings_shp_loaded2 = pyqtSignal()
step0_closing_signal = pyqtSignal()
file_removed = pyqtSignal()
send_data_to_step2 = pyqtSignal(dict, dict)
buildings_shp_loaded_step1signal = pyqtSignal(QgsVectorLayer)
buildings_shp_loaded_step4signal = pyqtSignal(QgsVectorLayer)
step0_all_import_complete = pyqtSignal(QgsVectorLayer, DataTransfer)
# csv_loaded = pyqtSignal(QTableWidget)
send_tab_sources = pyqtSignal(QTableWidget)
h8760 = 8760
h24 = 24
day_per_month = {28: [2], 30: [11, 4, 6, 9], 31: [1, 3, 5, 7, 8, 10, 12]}
def __init__(self, iface, parent=None, work_folder=None):
"""Constructor."""
super(Step0Dialog, self).__init__(parent)
# Set up the user interface from Designer through FORM_CLASS.
# After self.setupUi() you can access any designer object by doing
# self.<objectname>, and you can use autoconnect slots - see
# http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html
# #widgets-and-dialogs-with-auto-connect
self.setupUi(self)
self.work_folder = work_folder
self.listWidget.hide()
self.comboLayer.hide()
self.iface = iface
self.district_shp_loaded.connect(self.fill_district_menu)
self.buildings_shp_loaded.connect(self.fill_buildings_table)
self.buildings_shp_loaded2.connect(self.fill_buildings_table_future)
self.listWidget.itemChanged.connect(self.list_district_select)
# QgsProject.instance().layersAdded.connect(self.fill_layers_combobox)
# QgsProject.instance().layerWasAdded.connect(self.fill_layers_combobox)
# QgsProject.instance().layerRemoved.connect(self.fill_layers_combobox)
self.btnSourcesAvailability.clicked.connect(self.source_availability)
#self.pushButton_4.clicked.connect(self.download_streets_from_comboBox_selection)
self.delete_file.clicked.connect(self.delete_import_file)
self.ok2.clicked.connect(self.send_tab_to_stap3)
self.baseline_scenario_layer = None
self.phases.setTabEnabled(0, False)
self.phases.setTabEnabled(2, False)
self.all_import_completed = True
self.geo_graph = None
self.data_transfer = DataTransfer()
self.data_transfer.geo_graph = self.geo_graph
self.data_transfer.buildings = self.baseline_scenario_layer
# QgsProject.layerRemoved.connect(self.update_source_combobox)
# QgsProject.layerWasAdded.connect(self.update_source_combobox)
self.dialog_source = CheckSourceDialog()
self.fill_layers_combobox(1)
self.pbar_Download.hide()
self.label_3.setEnabled(False)
self.label_9.setEnabled(False)
self.layerPath2.setEnabled(False)
self.layerPath3.setEnabled(False)
self.load1.setEnabled(False)
self.load2.setEnabled(False)
icon = QIcon()
icon_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "icons",
"open_file.png")
icon.addPixmap(QPixmap(icon_path), QIcon.Normal, QIcon.Off)
self.load.setIcon(icon)
self.load1.setIcon(icon)
self.load2.setIcon(icon)
self.load3.setIcon(icon)
self.load4.setIcon(icon)
#self.load_streets.setIcon(icon)
self.pushButton_5.setIcon(icon)
icon = QIcon()
icon_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "icons",
"untitled.png")
icon.addPixmap(QPixmap(icon_path), QIcon.Normal, QIcon.Off)
self.delete_file.setIcon(icon)
icon = QIcon()
icon_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "icons",
"save_as.png")
icon.addPixmap(QPixmap(icon_path), QIcon.Normal, QIcon.Off)
self.save_plugin.setIcon(icon)
icon = QIcon()
icon_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "icons",
"import.png")
icon.addPixmap(QPixmap(icon_path), QIcon.Normal, QIcon.Off)
self.pushButton_load_all_files.setIcon(icon)
self.combo_box_layer = None
self.future_scenario_layer = None
default_root = mp_config.CURRENT_MAPPING_DIRECTORY
self.layerPath.setEnabled(False)#setDefaultRoot(default_root)
self.layerPath.lineEdit().setText(os.path.join(default_root, "DMM",
mp_config.DMM_PREFIX+".shp"))
self.layerPath1.setEnabled(False)#.setDefaultRoot(default_root)
self.layerPath1.lineEdit().setText(os.path.join(default_root, "DMM",
mp_config.DMM_PREFIX+mp_config.DMM_FUTURE_SUFFIX+".shp"))
self.layerPath2.setEnabled(False)#.setDefaultRoot(default_root)
self.layerPath2.lineEdit().setText(os.path.join(default_root, "DMM",
mp_config.DMM_PREFIX+".scn"))
self.layerPath3.setEnabled(False)#.setDefaultRoot(default_root)
self.layerPath3.lineEdit().setText(os.path.join(default_root, "DMM",
mp_config.DMM_PREFIX+mp_config.DMM_FUTURE_SUFFIX+mp_config.DMM_HOURLY_SUFFIX+".csv"))
#self.layerPath_streets.setDefaultRoot(os.path.expanduser("~"))
self.folder.setEnabled(False)
self.btnSmm.setEnabled(False)
self.folder.setText(os.path.join(default_root, "SMM"))
self.sources_availability = None
self.sources_temperature = None
self.cancel.hide()
self.pushButton_2.hide()
self.pushButton.hide()
self.pushButton_3.hide()
def load_all_files_from_folder(self):
folder = self.folder.text()
if not os.path.exists(folder):
QMessageBox.warning(None, "Warning", "Folder " + folder + " does not exist")
return
# Counting number of files to be open in the selected folder
file_counter = 0
for root, dirs, files in os.walk(folder):
for file in files:
if file.endswith('.tif') or file.endswith('.shp'):
file_counter += 1
if file_counter==0:
QMessageBox.information(None, "Warning", "Folder " + folder + " does not contain .tif or .shp files. There's nothing to load, here!")
return
# setting progress bar
self.progressBar.setMaximum(file_counter)
self.progressBar.setMinimum(0)
self.progressBar.setValue(0)
self.progressBar.show()
progress = 0
# loading all .tif and .shp files. Files already loaded are ignored.
for root, dirs, files in os.walk(folder):
for filename in files:
if filename[-4:] == ".shp":
progress = progress + 1
file_path = folder + "\\" + filename
if not QgsProject.instance().mapLayersByName(filename):
load_file_as_layer(file_path, filename, 'Shapefiles from SMM', min_val=None,
max_val=None, mean_val=None, value_color=None, area=None)
else:
print("File " + file_path + "seems to be already loaded! Skipping file...")
if filename[-4:] == ".tif":
progress = progress + 1
file_path = folder + "\\" + filename
if not QgsProject.instance().mapLayersByName(filename):
load_file_as_layer(file_path, filename, 'Raster layers from SMM',
min_val=None, max_val=None, mean_val=None, value_color=None, area=None)
else:
print("File " + file_path + "seems to be already loaded! Skipping file...")
self.progressBar.setValue(progress)
load_open_street_maps()
# emit signal to activate and fill (reload) the district list in the district tab of step0
self.district_shp_loaded.emit()
self.progressBar.hide()
self.get_temperature_from_mapping_module(folder)
def load_folder(self):
folder = str(QFileDialog.getExistingDirectory(self, "Select Directory"))
self.folder.setText(folder)
def load_shp_file4(self):
print("Downloading baseline scenario ...")
layer_list = QgsProject.instance().mapLayersByName("baseline scenario")
if len(layer_list)>0:
self.baseline_scenario_layer = layer_list[0]
else:
layerShp = self.layerPath.filePath()
self.baseline_scenario_layer = load_file_as_layer(layerShp, 'baseline scenario', None, min_val=None,
max_val=None, mean_val=None, value_color=None, area=None)
self.buildings_shp_loaded.emit()
self.buildings_shp_loaded_step1signal.emit(self.baseline_scenario_layer)
print("End downloading baseline scenario")
def load_shp_file3(self):
print("Downloading future scenario ...")
Flayer = self.layerPath1.filePath()
self.future_scenario_layer= load_file_as_layer(Flayer, 'future scenario', None, min_val=None,
max_val=None, mean_val=None, value_color=None, area=None)
self.buildings_shp_loaded2.emit()
self.buildings_shp_loaded_step4signal.emit(self.future_scenario_layer)
print("End downloading future scenario ...")
def fill_district_menu(self):
layer_list = QgsProject.instance().mapLayersByName("projection_helper.shp")
if len(layer_list) > 0:
layer = layer_list[0]
else:
layer = None
if layer is not None:
#retrieve district code list and update
self.listWidget.clear()
district_code_list = []
features = layer.getFeatures()
for feature in features:
district_code_list.append(feature.attribute(2))
self.listWidget.addItems(district_code_list)
for i in range(self.listWidget.count()):
self.listWidget.item(i).setFlags(self.listWidget.item(i).flags() | QtCore.Qt.ItemIsUserCheckable)
self.listWidget.item(i).setCheckState(QtCore.Qt.Unchecked)
def add_district_selection(self):
layer_list = QgsProject.instance().mapLayersByName("projection_helper.shp")
if len(layer_list) > 0:
layer = layer_list[0]
else:
layer = None
if layer is not None:
features = layer.selectedFeatures()
for i in range(self.listWidget.count()):
for feature in features:
if self.listWidget.item(i).text() == feature.attribute(2):
self.listWidget.item(i).setCheckState(QtCore.Qt.Checked)
def override_district_selection(self):
# uncheck all the district and add the new selection
for i in range(self.listWidget.count()):
self.listWidget.item(i).setCheckState(QtCore.Qt.Unchecked)
self.add_district_selection()
def reverse_district_selection(self):
# check the state of each item and change it
for i in range(self.listWidget.count()):
if self.listWidget.item(i).checkState():
self.listWidget.item(i).setCheckState(QtCore.Qt.Unchecked)
else:
self.listWidget.item(i).setCheckState(QtCore.Qt.Checked)
def loadScenario(self):
fname = self.layerPath2.filePath()
with open(fname, "rb") as fichero:
load_data = pickle.load(fichero)
self.pname.setText(load_data.scenarioName)
self.pname.show()
self.label_6.show()
self.areaStudy.setText(str(load_data.scenarioVersion))
self.areaStudy.show()
self.label_7.show()
self.country.setText(load_data.country)
self.country.show()
self.label_8.show()
def fill_buildings_table(self):
layer_list = QgsProject.instance().mapLayersByName("baseline scenario")
if len(layer_list) > 0:
layer = layer_list[0]
else:
layer = None
if layer is not None:
features = layer.getFeatures()
building_item_list = []
fields = ["BuildingID", "AHeatDem", "AHeatDemM2", "ACoolDem", "ACoolDemM2", "ADHWDem",
"ADHWDemM2", "MaxHeatDem", "MaxCoolDem", "MaxDHWDem", "Use", "GrossFA"]
missing_fields = set()
for feature in features:
if len(feature.attributes()) > 13:
# Field names
# BuildingID AHeatDem AHeatDemM2 ACoolDem ACoolDemM2 ADHWDem ADHWDemDM2 MaxHeatDem MaxCoolDem MaxDHWDem USE, GrossFloor area
#string_list = [str(feature.attribute(3)), str(feature.attribute(5)), str(feature.attribute(6)),
# str(feature.attribute(7)), str(feature.attribute(8)), str(feature.attribute(9)),
# str(feature.attribute(10)), str(feature.attribute(11)), str(feature.attribute(12)),
# str(feature.attribute(13)), str(feature.attribute(14)), str(feature.attribute(18))]
string_list = []
for f in fields:
try:
string_list.append(str(feature[f]))
except:
missing_fields.add(f)
string_list.append('')
building_item_list.append(QTreeWidgetItem(string_list))
self.pmTree.addTopLevelItems(building_item_list)
if len(missing_fields) > 0:
self.iface.messageBar().pushMessage("Field {0} is missing in ths baseline shape file".format(missing_fields), level=1, duration=0)
def fill_buildings_table_future(self):
layer_name = "future scenario"
layer_list2 = QgsProject.instance().mapLayersByName(layer_name)
flag = True
if len(layer_list2) > 0:
layer2 = layer_list2[0]
else:
layer2 = None
if layer2 is not None:
features2 = layer2.getFeatures()
building_item_list2 = []
fields = ["BuildingID", "AHeatDem", "AHeatDemM2", "ACoolDem", "ACoolDemM2", "ADHWDem",
"ADHWDemM2", "MaxHeatDem", "MaxCoolDem", "MaxDHWDem", "Use", "GrossFA"]
missing_fields = set()
for feature in features2:
if len(feature.attributes()) > 13:
# Field names
# BuildingID AHeatDem AHeatDemM2 ACoolDem ACoolDemM2 ADHWDem ADHWDemDM2 MaxHeatDem MaxCoolDem MaxDHWDem USE, GrossFloor area
#string_list = [str(feature.attribute(3)), str(feature.attribute(5)), str(feature.attribute(6)),
# str(feature.attribute(7)), str(feature.attribute(8)), str(feature.attribute(9)),
# str(feature.attribute(10)), str(feature.attribute(11)), str(feature.attribute(12)),
# str(feature.attribute(13)), str(feature.attribute(14)), str(feature.attribute(18))]
string_list2 = []
for f in fields:
try:
string_list2.append(str(feature[f]))
except:
missing_fields.add(f)
string_list2.append('')
building_item_list2.append(QTreeWidgetItem(string_list2))
self.pmTree2.addTopLevelItems(building_item_list2)
if len(missing_fields) > 0:
self.iface.messageBar().pushMessage("Field {0} is missing in ths future shape file".format(missing_fields), level=1, duration=0)
def load_csv_file(self):
self.list_district_select()
# self.csv_loaded.emit(self.sources_available)
# confronta i distretti selezionati e riempe la tabella
def list_district_select(self):
lista = self.list_district()
csvFile = self.layerPath3.filePath()
if not os.path.exists(csvFile):
return
data = []
self.sources_available.clear()
self.sources_available.insertRow(0)
with open(csvFile)as inputFile:
csvReader = csv.reader(inputFile, delimiter='\t')
for row in csvReader:
data.append(row)
index_list = []
for i in range(len(lista)):
if lista[i] in data[0][:]:
index_list.append(data[0][:].index(lista[i]))
self.sources_available.clear()
totalColumns = self.sources_available.columnCount()
headerList = lista
headerList.insert(0, "Source Description")
headerList.append("Total")
self.sources_available.setRowCount(0)
for i in range(totalColumns - 1, -1, -1):
self.sources_available.removeColumn(i)
for j in range(len(lista)):
self.sources_available.insertColumn(j)
self.sources_available.setHorizontalHeaderLabels(headerList)
# data[riga][colonna]
for k in range(len(data) - 1):
self.sources_available.insertRow(k)
for z in range(len(lista)):
if z == 0:
self.sources_available.setItem(k, z, QTableWidgetItem(str(data[k + 1][0])))
else:
if z < len(index_list) + 1:
self.sources_available.setItem(k, z, QTableWidgetItem(str(data[k + 1][index_list[z - 1]])))
else:
self.sources_available.setItem(k, z, QTableWidgetItem(self.somma(k)))
def somma(self, q):
total = 0
try:
column = self.sources_available.columnCount() - 2
for p in range(column):
num = float(self.sources_available.item(q, p + 1).text())
total = total + num
except:
pass
return str(total)
def list_district(self):
# return list of selected district
distretti = []
for i in range(self.listWidget.count()):
if self.listWidget.item(i).checkState():
# self.listWidget.item(i).setCheckState(list.addItem())
distretti.append(self.listWidget.item(i).text())
return distretti
def reset_function(self):
self.pmTree.clear()
self.pmTree2.clear()
def closeEvent(self, event):
self.closeStep0()
event.accept()
def closeStep0(self):
self.hide()
self.step0_closing_signal.emit()
def activate_visualization_tabs(self):
self.listWidget.clear()
self.pushButton_2.setEnabled(False)
self.pushButton_3.setEnabled(False)
self.pushButton.setEnabled(False)
# Districts layer it's expected to be called projection_helper.shp
layer_list = QgsProject.instance().mapLayersByName("projection_helper.shp")
if len(layer_list) > 0:
layer = layer_list[0]
else:
layer = None
if layer is not None and self.baseline_scenario_layer is not None:
if not layer.crs() == self.baseline_scenario_layer.crs():
parameter = {'INPUT': layer, 'TARGET_CRS': self.baseline_scenario_layer.crs().authid(),
'OUTPUT': 'memory:'}
p = processing.run('qgis:reprojectlayer', parameter)
layer = p['OUTPUT']
# do this only if all_import_completed flag it's true
if self.all_import_completed:
# setting progress bar
self.progressBar.setMaximum(self.baseline_scenario_layer.featureCount())
self.progressBar.setMinimum(0)
self.progressBar.setValue(0)
self.progressBar.show()
progress = 0
district_list = []
# for every buildings check in what district it is placed
for feature in self.baseline_scenario_layer.getFeatures():
self.progressBar.setValue(progress)
progress = progress + 1
centroid = feature.geometry().centroid()
for district in layer.getFeatures():
# if building is in a specific district, check that district in listWidget
if district.geometry().contains(centroid):
# add the district name (which contain the building) to a list
if district.id() not in district_list:
district_list.append(district.id())
break
area_analyzed = QgsVectorLayer('Polygon?crs=' + layer.crs().authid(), "", "memory")
area_analyzed.startEditing()
area_analyzed.dataProvider().addAttributes([f for f in layer.fields()])
area_analyzed.updateFields()
area_analyzed.commitChanges()
# Set message bar :
self.iface.messageBar().pushMessage("Loading OSM data", "Please wait...", level=0, duration=0)
self.iface.mainWindow().repaint()
self.progressBar.setMaximum(len(district_list)+1)
self.progressBar.setMinimum(0)
self.progressBar.setValue(0)
self.progressBar.show()
progress = 0
p = streetsDownloader()
streets = []
streets_merged = None
for i in district_list:
progress = progress + 1
# clear the layer
area_analyzed.startEditing()
for f in area_analyzed.getFeatures():
area_analyzed.deleteFeature(f.id())
area_analyzed.commitChanges()
# add next district
area_analyzed.startEditing()
dp = area_analyzed.dataProvider()
dp.addFeatures([layer.getFeature(i)])
area_analyzed.commitChanges()
# if streets_merged is None:
streets.append(p.download_streets_from_osm(area_analyzed))
# It assumes the widget display districts in the same order of .getFeatures()
if self.listWidget.item(i) is not None:
self.listWidget.item(i).setCheckState(QtCore.Qt.Checked)
self.progressBar.setValue(progress)
for i in range(len(streets)):
if streets_merged is None:
streets_merged = streets[i]
else:
streets_merged.startEditing()
dp = streets_merged.dataProvider()
dp.addFeatures([s for s in streets[i].getFeatures()])
streets_merged.commitChanges()
streets_merged.startEditing()
f1 = QgsField('ID', QVariant.Int, 'int', 10)
f2 = QgsField('OSM_origin', QVariant.Int, 'int', 1, 0, '', QVariant.Invalid)
f3 = QgsField('diameter', QVariant.Double, 'double', 16, 3, '', QVariant.Invalid)
streets_merged.dataProvider().addAttributes([f1, f2, f3])
streets_merged.commitChanges()
streets_merged.startEditing()
i = 0
for f in streets_merged.getFeatures():
f.setAttribute(f.fieldNameIndex('ID'), i)
f.setAttribute(f.fieldNameIndex('OSM_origin'), 1)
streets_merged.updateFeature(f)
i = i + 1
streets_merged.commitChanges()
progress = progress + 1
self.progressBar.setValue(progress)
QgsProject.instance().addMapLayer(streets_merged)
self.step0_all_import_complete.emit(streets_merged)
self.progressBar.hide()
self.phases.setTabEnabled(1, True)
self.phases.setTabEnabled(2, True)
def get_temperature_from_mapping_module(self, dr):
sources_temperature = {}
# sources_temperature = self.dialog_source.source_temperature
for mapped_source in self.dialog_source.mapped_sources:
temperature = self.dialog_source.source_temperature[mapped_source]
sources_temperature[self.dialog_source.MM_to_DPM_sources_dict[mapped_source]] = [temperature for i in
range(self.h8760)]
try:
file_start = self.dialog_source.file_name_mapping[mapped_source]
except:
continue
if self.dialog_source.monthly_temperature[mapped_source]:
suffixs = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
else:
suffixs = [""]
for suffix in suffixs:
f = Step0Dialog.find_file(dr, file_start + str(suffix) + "_", ".tif")
if f is None:
continue
params = f.split("_")[1:-1]
for param in params:
try:
if param.startswith("t"):
sources_temperature = self.span_temperature(sources_temperature,
self.dialog_source.MM_to_DPM_sources_dict[mapped_source],
suffix, float(param.lstrip("t")))
if param.startswith("b"):
self.dialog_source.source_buffer[mapped_source] = float(param.lstrip("b"))
if param.startswith("e"):
self.dialog_source.source_efficiency[mapped_source] = float(param.lstrip("e"))
except:
print("Step0, get_temperature_from_mapping_module: failed to interpreter param", param,
"File:", f)
return sources_temperature
def span_temperature(self, sources_temperature, source, suffix, integral):
if suffix == "":
for i in range(len(sources_temperature[source])):
sources_temperature[source][i] = integral
else:
try:
suffix = int(suffix)
except ValueError:
return sources_temperature
for month_length in self.day_per_month.keys():
if int(suffix) in self.day_per_month[month_length]:
start, end = self.start_end_month(suffix)
for i in range(start, end):
sources_temperature[source][i] = integral
break
else:
print("Step0Dialog.py, span_integral, strange things are happening. suffix: ", suffix)
return sources_temperature
@staticmethod
def find_file(folder, start, end):
for root, dirs, files in os.walk(folder):
for f in files:
if f.endswith(end) and f.startswith(start):
return f
return None
def start_end_month(self, month):
if month is None:
return [1, 0]
try:
month = int(month)
except ValueError:
return [0, self.h8760]
if month < 1 or month > 12:
return [1, 0]
total_max = 0
for i in range(month):
for month_length in self.day_per_month.keys():
if i+1 in self.day_per_month[month_length]:
total_min = total_max
total_max = total_max + month_length*self.h24
# the interval is of kind [), first hour of year is 0
return [total_min, total_max]
def update_source_combobox(self):
pass
def fill_layers_combobox(self, a):
self.comboLayer.clear()
combo_list = []
for key in QgsProject.instance().mapLayers().keys():
if type(QgsProject.instance().mapLayers()[key]) == type(QgsVectorLayer()):
if QgsProject.instance().mapLayers()[key].geometryType() == 2:
if not QgsProject.instance().mapLayers()[key].name() in combo_list:
combo_list.append(QgsProject.instance().mapLayers()[key].name())
self.comboLayer.insertItems(0, combo_list)
# Master integration
for i in range(self.comboLayer.count()):
name = self.comboLayer.itemText(i)
if mp_config.BUILDING_SHAPE.lower().startswith(name.lower()):
self.comboLayer.setCurrentText(name)
def source_availability(self):
txt = self.comboLayer.currentText()
worker = SourceAvailability()
worker.txt = txt
worker.pbar_Download = self.pbar_Download
worker.cancel_button = self.cancel
worker.cancel_button.clicked.connect(self.interrupt)
worker.day_per_month = self.day_per_month
worker.dialog_source = self.dialog_source
self.worker = worker
sources_availability = worker.source_availability()
# log = MyLog(None, "log_SMM_source_availability.txt")
# log.log("sources_availability", sources_availability)
self.sources_availability = sources_availability
if sources_availability is None:
return
sources_temperature = self.get_temperature_from_mapping_module(self.folder.text())
# log.log("sources_temperature", sources_temperature)
self.sources_temperature = sources_temperature
self.sources_available.clear()
self.sources_available.setColumnCount(3)
self.sources_available.setRowCount(len(sources_availability))
self.sources_available.setHorizontalHeaderLabels(["Source", "Availability [MWh/y]", "Temperature [°C]"])
i = 0
for key in sources_availability.keys():
self.sources_available.setItem(i, 0, QTableWidgetItem(str(key)))
self.sources_available.setItem(i, 1, QTableWidgetItem(str(round(sum(sources_availability[key]), 2))))
self.sources_available.setItem(i, 2, QTableWidgetItem(str(round(sum(sources_temperature[key])/self.h8760, 2))))
i = i + 1
self.sources_available.setColumnWidth(0, 320)
self.sources_available.setColumnWidth(1, 140)
self.sources_available.setColumnWidth(2, 140)
# self.sources_available.horizontalHeader().setResizeMode(QHeaderView.Stretch)
#self.send_tab_to_stap3()
self.send_data_to_step2.emit(sources_availability, sources_temperature)
source_availability_post_calculation = SourceAvailabilityPostCalculation(self.sources_available)
source_availability_post_calculation.remove_temperature([self.dialog_source.sources[1],
self.dialog_source.sources[0],
self.dialog_source.sources[4],
self.dialog_source.sources[20]])
source_availability_post_calculation.set_new_text()
source_availability_post_calculation.swap_position(19, 13)
def send_tab_to_stap3(self):
self.send_tab_sources.emit(self.sources_available)
def interrupt(self):
self.worker.cancel = True
def download_streets_from_comboBox_selection(self):
p = streetsDownloader()
streets_merged = p.download_streets_from_osm()
self.geo_graph = p.geo_graph
self.data_transfer.geo_graph = self.geo_graph
self.data_transfer.buildings = self.baseline_scenario_layer
streets_merged.startEditing()
f1 = QgsField('ID', QVariant.Int, 'int', 10)
f2 = QgsField('OSM_origin', QVariant.Int, 'int', 1, 0, '', QVariant.Invalid)
f3 = QgsField('diameter', QVariant.Double, 'double', 16, 3, '', QVariant.Invalid)
streets_merged.dataProvider().addAttributes([f1, f2, f3])
streets_merged.commitChanges()
streets_merged.startEditing()
i = 0
for f in streets_merged.getFeatures():
f.setAttribute(f.fieldNameIndex('ID'), i)
f.setAttribute(f.fieldNameIndex('OSM_origin'), 1)
streets_merged.updateFeature(f)
i = i + 1
streets_merged.commitChanges()
layers = QgsProject.instance().mapLayersByName(streets_merged.name())
for layer in layers:
QgsProject.instance().removeMapLayer(layer.id())
try:
del layer
except:
print("Step0_dialog.py, download_streets_from_comboBox_selection."
+" Unable to remove layer.")
QgsProject.instance().addMapLayer(streets_merged)
self.step0_all_import_complete.emit(streets_merged, self.data_transfer)
def get_layer_streets_old(self):
file_path = self.layerPath_streets.filePath()
if not QgsProject.instance().mapLayersByName(os.path.basename(file_path)):
load_file_as_layer(file_path, os.path.basename(file_path), 'Streets layer', min_val=None,
max_val=None, mean_val=None, value_color=None, area=None)
else:
print("File " + file_path + "seems to be already loaded! Skipping file...")
self.download_streets_from_comboBox_selection()
def get_layer_streets(self):
layer = self.create_shape_buildings(display=True, save_output=True)
self.download_streets_from_comboBox_selection()
def create_shape_buildings(self, to_crs=None, display=False, save_output=False):
building_layer_path = os.path.join(mp_config.CURRENT_MAPPING_DIRECTORY, mp_config.DMM_FOLDER,
mp_config.DMM_PREFIX+".shp")
if not os.path.exists(building_layer_path):
raise Exception("The shape file from the District Mapping doesn't exist. "
"Please run the District Mapping Module first.")
if to_crs is None:
to_crs = mp_config.WGS84_CRS
# Read buildings file as geopandas
buildings_gdf = gpd.read_file(building_layer_path)
if len(buildings_gdf) == 0:
raise Exception("The shape file from the District Mapping is empty.")
# Buffer 0.0 to remove invalid polygons, even if there is a loss
if any(not p.is_valid for p in buildings_gdf["geometry"]):
buildings_gdf["geometry"] = buildings_gdf["geometry"].buffer(0.0)
# Remove the empty polygons
buildings_gdf = buildings_gdf[buildings_gdf["geometry"].is_empty == False]
# Project gdf to WGS84
buildings_gdf.to_crs(mp_config.WGS84_CRS, inplace=True)
# Project gdf in UTM
projected_gdf = ox.project_gdf(buildings_gdf)
# ConveHull of all buildings (unary_union)
hull = projected_gdf["geometry"].unary_union.convex_hull
# Buffer around it
buffer_hull = hull.buffer(mp_config.BUFFER_HULL_FOR_STREETS)
# Reproject store it in geodataframe
buffer_hull_gdf = gpd.GeoDataFrame(columns=["geometry"], crs=projected_gdf.crs)
buffer_hull_gdf.loc[0] = [buffer_hull]
# Reproject
buffer_hull_gdf = ox.project_gdf(buffer_hull_gdf, to_crs=to_crs)
print("Save as shapefile ...")
output_folder = os.path.join(mp_config.CURRENT_PLANNING_DIRECTORY, mp_config.DISTRICT_FOLDER,
mp_config.BUILDING_SHAPE)
os.makedirs(output_folder, exist_ok=True)
output_file = os.path.join(output_folder, mp_config.BUILDING_SHAPE+".shp")
buffer_hull_gdf.to_file(output_file, driver='ESRI Shapefile')
if display:
print("Displaying ...")
output_layer = QgsProject.instance().addMapLayer(QgsVectorLayer(output_file, "building_shape", "ogr"))
crs = output_layer.crs()
crs.createFromId(4326)
output_layer.setCrs(crs)
lyr = QgsProject.instance().layerTreeRoot().findLayer(output_layer.id())
lyr.setItemVisibilityChecked(False)
self.iface.mapCanvas().refresh()
return output_layer
def delete_import_file(self):
print("STEP_0.delete_import_file(): this functionality has been disabled!")
return
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Warning)
main_text = "Delete selected file?"
msgBox.setText(main_text)
msgBox.setWindowTitle("PlanHeat Planning and Simulation modules")
msgBox.setInformativeText("Pressing Ok the file will be removed from your drive. Press Cancel to return.")
msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
msgBox.setDefaultButton(QMessageBox.Cancel)
r = msgBox.exec()
if r == QMessageBox.Ok:
if self.work_folder is None:
folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), "save_utility", "DefaultSaveFolder")
else:
folder = self.work_folder
try:
os.path.exists(folder)
except TypeError:
folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), "save_utility", "DefaultSaveFolder")
file_manager = FileManager(work_folder=folder)
file_manager.create_work_folder()
file = self.comboBox.currentText()
file_path = file_manager.get_path_from_file_name(file, end_char=-5, search_folders=[folder])
file_manager.remove_shapefiles(file)
file_manager.purge_unused_network_folder_and_shapefiles()
file_manager.remove_file_from_file_path(file_path)
self.file_removed.emit()
| 46.836299 | 146 | 0.606261 |
ace2e755e2d1ee45ab9677d315c92f59964d7d24 | 3,259 | py | Python | dns/unit_tests/test_resource_record_set.py | omaray/gcloud-python | 87a13aaa140842111df2f76529a1b9ce4b6d28a6 | [
"Apache-2.0"
] | null | null | null | dns/unit_tests/test_resource_record_set.py | omaray/gcloud-python | 87a13aaa140842111df2f76529a1b9ce4b6d28a6 | [
"Apache-2.0"
] | null | null | null | dns/unit_tests/test_resource_record_set.py | omaray/gcloud-python | 87a13aaa140842111df2f76529a1b9ce4b6d28a6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestResourceRecordSet(unittest.TestCase):
def _getTargetClass(self):
from google.cloud.dns.resource_record_set import ResourceRecordSet
return ResourceRecordSet
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor(self):
zone = _Zone()
rrs = self._makeOne('test.example.com', 'CNAME', 3600,
['www.example.com'], zone)
self.assertEqual(rrs.name, 'test.example.com')
self.assertEqual(rrs.record_type, 'CNAME')
self.assertEqual(rrs.ttl, 3600)
self.assertEqual(rrs.rrdatas, ['www.example.com'])
self.assertIs(rrs.zone, zone)
def test_from_api_repr_missing_rrdatas(self):
zone = _Zone()
klass = self._getTargetClass()
with self.assertRaises(KeyError):
klass.from_api_repr({'name': 'test.example.com',
'type': 'CNAME',
'ttl': 3600}, zone=zone)
def test_from_api_repr_missing_ttl(self):
zone = _Zone()
klass = self._getTargetClass()
with self.assertRaises(KeyError):
klass.from_api_repr({'name': 'test.example.com',
'type': 'CNAME',
'rrdatas': ['www.example.com']}, zone=zone)
def test_from_api_repr_missing_type(self):
zone = _Zone()
klass = self._getTargetClass()
with self.assertRaises(KeyError):
klass.from_api_repr({'name': 'test.example.com',
'ttl': 3600,
'rrdatas': ['www.example.com']}, zone=zone)
def test_from_api_repr_missing_name(self):
zone = _Zone()
klass = self._getTargetClass()
with self.assertRaises(KeyError):
klass.from_api_repr({'type': 'CNAME',
'ttl': 3600,
'rrdatas': ['www.example.com']}, zone=zone)
def test_from_api_repr_bare(self):
zone = _Zone()
RESOURCE = {
'kind': 'dns#resourceRecordSet',
'name': 'test.example.com',
'type': 'CNAME',
'ttl': '3600',
'rrdatas': ['www.example.com'],
}
klass = self._getTargetClass()
rrs = klass.from_api_repr(RESOURCE, zone=zone)
self.assertEqual(rrs.name, 'test.example.com')
self.assertEqual(rrs.record_type, 'CNAME')
self.assertEqual(rrs.ttl, 3600)
self.assertEqual(rrs.rrdatas, ['www.example.com'])
self.assertIs(rrs.zone, zone)
class _Zone(object):
pass
| 34.305263 | 76 | 0.58699 |
ace2e80f0252fbf50c681bdfe3b3a0ad8af01c83 | 55,943 | py | Python | packages/Red9/startup/setup.py | jonntd/PipelineTools | 84412c2c2ce27de874afb6aa5d56fd94e12e4536 | [
"BSD-2-Clause"
] | 5 | 2019-07-19T22:11:07.000Z | 2022-02-15T02:02:51.000Z | packages/Red9/startup/setup.py | blueroseslol/PipelineTools | 6ba3da17dcc97b7ef0c99f9ebbbf4c41516b31c0 | [
"BSD-2-Clause"
] | null | null | null | packages/Red9/startup/setup.py | blueroseslol/PipelineTools | 6ba3da17dcc97b7ef0c99f9ebbbf4c41516b31c0 | [
"BSD-2-Clause"
] | 3 | 2018-06-05T09:00:13.000Z | 2020-04-27T14:13:44.000Z | '''
------------------------------------------
Red9 Studio Pack: Maya Pipeline Solutions
Author: Mark Jackson
email: rednineinfo@gmail.com
Red9 blog : http://red9-consultancy.blogspot.co.uk/
MarkJ blog: http://markj3d.blogspot.co.uk
------------------------------------------
This is the heart of the Red9 StudioPack's boot sequence, managing folder structures,
dependencies and menuItems.
######### THIS SHOULD NOT REQUIRE ANY OF THE RED9.core modules ##########
'''
__author__ = 'Mark Jackson'
__buildVersionID__ = 2.5
installedVersion= False
import sys
import os
import imp
import maya.cmds as cmds
import maya.mel as mel
from functools import partial
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
'''
Maya Version Mapping History:
====================================
Release -version -api python -qt prefs release extra info
-----------------------------------------------------------------------------------------
2008 . 2008 . ?????? . 2.5.1 na . 2008 . 2007-09-01
2009 . 2009 . ?????? . 2.5.1 na . 2009 . 2008-10-01
2010 . 2010 . 201000 . 2.6.1 na . 2010 . 2009-08-01
2011 Hotfix2 . 2011 . 201102 . 2.6.4 4.5.3 . 2011 .
2011 SAP . 2011 . 201104 . 2.6.4 4.5.3 . 2011.5 . 2010-09-29
2012 . 2012 . 201200 . 2.6.4 4.7.1 . 2012 . 2011-04-01
2012 SP1 . 2012 . ?????? . 2.6.4 4.7.1 . 2012 .
2012 SAP1 . 2012 . ?????? . 2.6.4 4.7.1 . 2012 . 2012-01-26
2012 SP2 . 2012 . 201217 . 2.6.4 4.7.1 . 2012 .
2013 SP1 . 2013 . 201301 . 2.6.4 4.7.1 . 2013 . 2012-07-00
2013 SP2 . 2013 . 201303 . 2.6.4 4.7.1 . 2013 . 2013-01-00
2013 EXT . 2013 . 201350? . 2.6.4 4.7.1 . 2013.5 . 2012-09-25 . 2013 binary incompatible
2013 EXT2 . 2013 . 201355 . 2.6.4 4.7.1 . 2013.5 . 2013-01-22 . 2013 binary incompatible
2014 . 2014 . 201400 . 2.6.4 4.8.2 . 2014 . 2013-04-10
2015 . 2015 . 201500 . 2.7 4.8.5 . 2015 . 2014-04-15
2015 SP6 . 2015 . 201516 . 2.7 4.8.5 . 2015
2016 . 2016 . 201600 . 2.7 4.8.6 . 2016 . 2015-04-15
2016 EXT1 SP6 . 2016 . 201614 . 2.7 4.8.6 . 2016
2016 EXT2 . 2016 . 201650 . 2.7 4.8.6 . 2016.5 . 2016-04-15 . 2016 binary incompatible
2017 . 2017 . 201700 . 2.7 5.6.1 . 2017 . 2016-05-15
------------------------------------------------------------------------------------------
'''
#=========================================================================================
# LANGUAGE MAPPING -----------------------------------------------------------------------
#=========================================================================================
#global LANGUAGE_MAP
import language_packs.language_english
LANGUAGE_MAP = language_packs.language_english
def get_language_maps():
languages=[]
language_path = os.path.join(os.path.dirname(__file__),'language_packs')
packs = os.listdir(language_path)
for p in packs:
if p.startswith('language_') and p.endswith('.py'):
languages.append(p.split('.py')[0])
return languages
def set_language(language='language_english', *args):
global LANGUAGE_MAP
language_path = os.path.join(os.path.dirname(__file__),'language_packs')
packs = get_language_maps()
if language in packs:
print 'Red9 : Importing Language Map : %s' % language
LANGUAGE_MAP = imp.load_source('language', os.path.join(language_path, language+'.py'))
set_language()
# -----------------------------------------------------------------------------------------
# MAYA DATA ---
# -----------------------------------------------------------------------------------------
MAYA_INTERNAL_DATA = {} # cached Maya internal vars for speed
def mayaFullSpecs():
print 'Maya version : ', mayaVersion()
print 'Maya API version: ', mayaVersionRelease()
print 'Maya Release: ', cmds.about(v=True)
print 'QT build: ', mayaVersionQT()
print 'Prefs folder: ',mayaPrefs()
print 'OS build: ', osBuild()
print MAYA_INTERNAL_DATA
def mayaVersion():
'''
get the application version back, this doesn't track service packs or extensions
TODO: need to manage this better and use the API version, eg: 2013.5 returns 2013
'''
if 'version' in MAYA_INTERNAL_DATA and MAYA_INTERNAL_DATA['version']:
return MAYA_INTERNAL_DATA['version']
else:
MAYA_INTERNAL_DATA['version'] = mel.eval('getApplicationVersionAsFloat')
return MAYA_INTERNAL_DATA['version']
def mayaInstallDir(version='2016'):
'''
This is more for future reference, we read the key from the win registry and return the MAYA_INSTALL_LOCATION
'''
try:
import _winreg
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\Autodesk\Maya\{0}\Setup\InstallPath".format(version), 0, _winreg.KEY_READ)
return _winreg.QueryValueEx(key, "MAYA_INSTALL_LOCATION")[0]
except:
raise StandardError('Given Maya key not found in Registry')
def mayaVersionRelease():
'''
get the api version back so we can track service packs etc
'''
if 'api' in MAYA_INTERNAL_DATA and MAYA_INTERNAL_DATA['api']:
return MAYA_INTERNAL_DATA['api']
else:
MAYA_INTERNAL_DATA['api'] = cmds.about(api=True)
return MAYA_INTERNAL_DATA['api']
def mayaRelease():
'''
wrap over the version and api to return EXT builds that modify the
codebase significantly, prefs being set to 20XX.5 is a general clue
but we use the api build id to be specific
'''
base=mayaVersion()
api=mayaVersionRelease()
if base==2016:
if api>=201615:
return 2016.5
return base
return base
def mayaVersionQT():
try:
if 'qt' in MAYA_INTERNAL_DATA and MAYA_INTERNAL_DATA['qt']:
return MAYA_INTERNAL_DATA['qt']
else:
MAYA_INTERNAL_DATA['qt'] = cmds.about(qt=True)
return MAYA_INTERNAL_DATA['qt']
except:
pass
def mayaPrefs():
'''
Root of Maya prefs folder
'''
if 'prefs' in MAYA_INTERNAL_DATA and MAYA_INTERNAL_DATA['prefs']:
return MAYA_INTERNAL_DATA['prefs']
else:
MAYA_INTERNAL_DATA['prefs'] = os.path.dirname(cmds.about(env=True))
return MAYA_INTERNAL_DATA['prefs']
def mayaUpAxis(setAxis=None):
import maya.OpenMaya as OpenMaya
if setAxis:
if setAxis.lower()=='y':
OpenMaya.MGlobal.setYAxisUp()
if setAxis.lower()=='z':
OpenMaya.MGlobal.setZAxisUp()
else:
vect=OpenMaya.MGlobal.upAxis()
if vect.z:
return 'z'
if vect.y:
return 'y'
def mayaIsBatch():
return cmds.about(batch=True)
def osBuild():
build = cmds.about(os=True)
if build == 'win64':
return 64
elif build == 'win32':
return 32
def getCurrentFPS():
'''
returns the current frames per second as a number, rather than a useless string
'''
fpsDict = {"game": 15.0, "film": 24.0, "pal": 25.0, "ntsc": 30.0, "show": 48.0, "palf": 50.0, "ntscf": 60.0}
return fpsDict[cmds.currentUnit(q=True, fullName=True, time=True)]
# -----------------------------------------------------------------------------------------
# MENU SETUPS ---
# -----------------------------------------------------------------------------------------
def menuSetup(parent='MayaWindow'):
#if exists remove all items, means we can update on the fly by restarting the Red9 pack
if cmds.menu('redNineMenuItemRoot', exists=True):
cmds.deleteUI('redNineMenuItemRoot')
log.info("Rebuilding Existing RedNine Menu")
# parent is an existing window with an existing menuBar?
if cmds.window(parent, exists=True):
if not cmds.window(parent, q=True, menuBar=True):
raise StandardError('given parent for Red9 Menu has no menuBarlayout %s' % parent)
else:
cmds.menu('redNineMenuItemRoot', l="RedNine", p=parent, tearOff=True, allowOptionBoxes=True)
log.info('new Red9 Menu added to current window : %s' % parent)
# parent is a menuBar?
elif cmds.menuBarLayout(parent, exists=True):
cmds.menu('redNineMenuItemRoot', l='RedNine', p=parent, tearOff=True, allowOptionBoxes=True)
log.info('New Red9 Sound Menu added to current windows menuBar : %s' % parent)
# parent is an existing menu?
elif cmds.menu(parent, exists=True):
cmds.menuItem('redNineMenuItemRoot', l='RedNine', sm=True, p=parent)
log.info('new Red9 subMenu added to current Menu : %s' % parent)
else:
raise StandardError('given parent for Red9 Menu is invalid %s' % parent)
try:
cmds.menuItem('redNineProRootItem',
l='PRO : PACK', sm=True, p='redNineMenuItemRoot', tearOff=True,i='red9.jpg')
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.red9_details+' : ProPack', i='info_30.png',
c='Red9.setup.get_pro_pack()',p='redNineProRootItem')
# Holder Menus for Client code
if get_client_modules():
cmds.menuItem(divider=True,p='redNineMenuItemRoot')
for client in get_client_modules():
cmds.menuItem('redNineClient%sItem' % client,
l='CLIENT : %s' % client, sm=True, p='redNineMenuItemRoot', tearOff=True, i='red9.jpg')
cmds.menuItem(divider=True,p='redNineMenuItemRoot')
#Add the main Menu items
cmds.menuItem('redNineAnimItem',
l=LANGUAGE_MAP._MainMenus_.animation_toolkit,
ann=LANGUAGE_MAP._MainMenus_.animation_toolkit_ann,
p='redNineMenuItemRoot', echoCommand=True, i='pose_30.png',
c="import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.AnimationUI.show()")
cmds.menuItem('redNineSnapItem',
l=LANGUAGE_MAP._MainMenus_.simple_snap,
ann=LANGUAGE_MAP._MainMenus_.simple_snap_ann,
p='redNineMenuItemRoot', echoCommand=True, i='locationon_30.png',
c="import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.AnimFunctions.snap()")
cmds.menuItem('redNineSearchItem',
l=LANGUAGE_MAP._MainMenus_.searchui,
ann=LANGUAGE_MAP._MainMenus_.searchui_ann,
p='redNineMenuItemRoot', echoCommand=True, i='search_30.png',
c="import Red9.core.Red9_CoreUtils as r9Core;r9Core.FilterNode_UI.show()")
cmds.menuItem('redNineLockChnsItem',
l=LANGUAGE_MAP._MainMenus_.lockchannels,
ann=LANGUAGE_MAP._MainMenus_.lockchannels_ann,
p='redNineMenuItemRoot', echoCommand=True, i='lock_30.png',
c="import Red9.core.Red9_CoreUtils as r9Core;r9Core.LockChannels.UI.show()")
cmds.menuItem('redNineMetaUIItem',
l=LANGUAGE_MAP._MainMenus_.metanodeui,
ann=LANGUAGE_MAP._MainMenus_.metanodeui_ann,
p='redNineMenuItemRoot', echoCommand=True, i='meta_node_30.png',
c="import Red9.core.Red9_Meta as r9Meta;r9Meta.MClassNodeUI.show()")
cmds.menuItem('redNineReporterUIItem',
l=LANGUAGE_MAP._MainMenus_.scene_reviewer,
ann=LANGUAGE_MAP._MainMenus_.scene_reviewer_ann,
p='redNineMenuItemRoot', echoCommand=True, i='hand_with_pen_30.png',
c="import Red9.core.Red9_Tools as r9Tools;r9Tools.SceneReviewerUI.show()")
cmds.menuItem('redNineMoCapItem',
l=LANGUAGE_MAP._MainMenus_.mouse_mocap,
ann=LANGUAGE_MAP._MainMenus_.mouse_mocap_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="import Red9.core.Red9_Tools as r9Tools;r9Tools.RecordAttrs.show()")
cmds.menuItem('redNineRandomizerItem',
l=LANGUAGE_MAP._MainMenus_.randomize_keyframes,
ann=LANGUAGE_MAP._MainMenus_.randomize_keyframes_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.RandomizeKeys.showOptions()")
cmds.menuItem('redNineFilterCurvesItem',
l=LANGUAGE_MAP._MainMenus_.interactive_curve_filter,
ann=LANGUAGE_MAP._MainMenus_.interactive_curve_filter_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.FilterCurves.show()")
cmds.menuItem('redNineMirrorUIItem',
l=LANGUAGE_MAP._MainMenus_.mirror_setup,
ann=LANGUAGE_MAP._MainMenus_.mirror_setup_ann,
p='redNineMenuItemRoot', echoCommand=True,
c="import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.MirrorSetup().show()")
cmds.menuItem('redNineCameraTrackItem', i='camera_30.png',
l='CameraTracker',sm=True,p='redNineMenuItemRoot')
cmds.menuItem('redNineCamerTrackFixedItem',
l=LANGUAGE_MAP._MainMenus_.camera_tracker_pan,
ann=LANGUAGE_MAP._MainMenus_.camera_tracker_pan_ann,
p='redNineCameraTrackItem', echoCommand=True,
c="from Red9.core.Red9_AnimationUtils import CameraTracker as camTrack;camTrack.cameraTrackView(fixed=True)")
if not mayaVersion()<=2009:
cmds.menuItem(optionBox=True,
ann=LANGUAGE_MAP._MainMenus_.tracker_tighness_ann,
p='redNineCameraTrackItem', echoCommand=True,
c="from Red9.core.Red9_AnimationUtils import CameraTracker as camTrack;camTrack(fixed=True)._showUI()")
cmds.menuItem('redNineCamerTrackFreeItem',
l=LANGUAGE_MAP._MainMenus_.camera_tracker_track,
ann=LANGUAGE_MAP._MainMenus_.camera_tracker_track_ann,
p='redNineCameraTrackItem', echoCommand=True,
c="from Red9.core.Red9_AnimationUtils import CameraTracker as camTrack;camTrack.cameraTrackView(fixed=False)")
if not mayaVersion()<=2009:
cmds.menuItem(optionBox=True,
ann=LANGUAGE_MAP._MainMenus_.tracker_tighness_ann,
p='redNineCameraTrackItem', echoCommand=True,
c="from Red9.core.Red9_AnimationUtils import CameraTracker as camTrack;camTrack(fixed=False)._showUI()")
cmds.menuItem(divider=True,p='redNineMenuItemRoot')
cmds.menuItem('redNineAnimBndItem',
l=LANGUAGE_MAP._MainMenus_.animation_binder,
ann=LANGUAGE_MAP._MainMenus_.animation_binder_ann,
p='redNineMenuItemRoot', echoCommand=True, i='workflow_30.png',
c="import Red9.core.AnimationBinder as animBnd;animBnd.AnimBinderUI()._UI()")
cmds.menuItem(divider=True,p='redNineMenuItemRoot')
cmds.menuItem('redNineHomepageItem',
l=LANGUAGE_MAP._MainMenus_.red9_homepage,
ann=LANGUAGE_MAP._MainMenus_.red9_homepage_ann,
p='redNineMenuItemRoot', echoCommand=True, i='domain_30.png',
c="Red9.setup.red9_website_home()")
cmds.menuItem('redNineBlogItem',
l=LANGUAGE_MAP._MainMenus_.red9_blog,
ann=LANGUAGE_MAP._MainMenus_.red9_blog_ann,
p='redNineMenuItemRoot', echoCommand=True, i='blogspot_30.png',
c="Red9.setup.red9_blog()")
cmds.menuItem('redNineVimeoItem',
l=LANGUAGE_MAP._MainMenus_.red9_vimeo,
ann=LANGUAGE_MAP._MainMenus_.red9_vimeo_ann,
p='redNineMenuItemRoot', echoCommand=True, i='vimeo_30.png',
c="Red9.setup.red9_vimeo()")
cmds.menuItem('redNineFacebookItem',
l=LANGUAGE_MAP._MainMenus_.red9_facebook,
ann=LANGUAGE_MAP._MainMenus_.red9_facebook_ann,
p='redNineMenuItemRoot', echoCommand=True, i='facebook_30.png',
c="Red9.setup.red9_facebook()")
cmds.menuItem('redNineAPIDocItem',
l=LANGUAGE_MAP._MainMenus_.red9_api_docs,
ann=LANGUAGE_MAP._MainMenus_.red9_api_docs_ann,
p='redNineMenuItemRoot', echoCommand=True, i='api_30.png',
c="Red9.setup.red9_apidocs()")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.red9_details, i='info_30.png',
c='Red9.setup.red9ContactInfo()',p='redNineMenuItemRoot')
cmds.menuItem(divider=True,p='redNineMenuItemRoot')
cmds.menuItem('redNineDebuggerItem', l=LANGUAGE_MAP._MainMenus_.red9_debugger,sm=True, i='bug_30.png', p='redNineMenuItemRoot')
cmds.menuItem('redNineLostAnimItem', p='redNineDebuggerItem',
l=LANGUAGE_MAP._MainMenus_.reconnect_anim,
ann=LANGUAGE_MAP._MainMenus_.reconnect_anim_ann,
echoCommand=True, c="import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.ReconnectAnimData().show()")
cmds.menuItem('redNineOpenCrashItem', p='redNineDebuggerItem',
l=LANGUAGE_MAP._MainMenus_.open_last_crash,
ann=LANGUAGE_MAP._MainMenus_.open_last_crash_ann,
echoCommand=True, c="import Red9.core.Red9_General as r9General;r9General.os_openCrashFile()")
cmds.menuItem(divider=True,p='redNineDebuggerItem')
cmds.menuItem('redNineDebugItem',
l=LANGUAGE_MAP._MainMenus_.systems_debug,
ann=LANGUAGE_MAP._MainMenus_.systems_debug_ann,
echoCommand=True, c="Red9.core._setlogginglevel_debug()")
cmds.menuItem('redNineInfoItem',
l=LANGUAGE_MAP._MainMenus_.systems_info,
ann=LANGUAGE_MAP._MainMenus_.systems_info_ann,
echoCommand=True, c="Red9.core._setlogginglevel_info()")
cmds.menuItem(divider=True,p='redNineDebuggerItem')
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.individual_debug, sm=True, p='redNineDebuggerItem')
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.debug+" : r9Core",
ann=LANGUAGE_MAP._MainMenus_.individual_debug_ann,
echoCommand=True, c="Red9.core._setlogginglevel_debug('r9Core')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.debug+" : r9Meta",
ann=LANGUAGE_MAP._MainMenus_.individual_debug_ann,
echoCommand=True, c="Red9.core._setlogginglevel_debug('r9Meta')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.debug+" : r9Anim",
ann=LANGUAGE_MAP._MainMenus_.individual_debug_ann,
echoCommand=True, c="Red9.core._setlogginglevel_debug('r9Anim')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.debug+" : r9Tools",
ann=LANGUAGE_MAP._MainMenus_.individual_debug_ann,
echoCommand=True, c="Red9.core._setlogginglevel_debug('r9Tools')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.debug+" : r9Pose",
ann=LANGUAGE_MAP._MainMenus_.individual_debug_ann,
echoCommand=True, c="Red9.core._setlogginglevel_debug('r9Pose')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.debug+" : r9General",
ann=LANGUAGE_MAP._MainMenus_.individual_debug_ann,
echoCommand=True, c="Red9.core._setlogginglevel_debug('r9General')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.debug+" : r9Audio",
ann=LANGUAGE_MAP._MainMenus_.individual_debug_ann,
echoCommand=True, c="Red9.core._setlogginglevel_debug('r9Audio')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.individual_info,sm=True,p='redNineDebuggerItem')
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.info+" : r9Core",
ann=LANGUAGE_MAP._MainMenus_.individual_info_ann,
echoCommand=True, c="Red9.core._setlogginglevel_info('r9Core')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.info+" : r9Meta",
ann=LANGUAGE_MAP._MainMenus_.individual_info_ann,
echoCommand=True, c="Red9.core._setlogginglevel_info('r9Meta')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.info+" : r9Anim",
ann=LANGUAGE_MAP._MainMenus_.individual_info_ann,
echoCommand=True, c="Red9.core._setlogginglevel_info('r9Anim')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.info+" : r9Tools",
ann=LANGUAGE_MAP._MainMenus_.individual_info_ann,
echoCommand=True, c="Red9.core._setlogginglevel_info('r9Tools')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.info+" : r9Pose",
ann=LANGUAGE_MAP._MainMenus_.individual_info_ann,
echoCommand=True, c="Red9.core._setlogginglevel_info('r9Pose')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.info+" : r9General",
ann=LANGUAGE_MAP._MainMenus_.individual_info_ann,
echoCommand=True, c="Red9.core._setlogginglevel_info('r9General')")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.info+" : r9Audio",
ann=LANGUAGE_MAP._MainMenus_.individual_info_ann,
echoCommand=True, c="Red9.core._setlogginglevel_info('r9Audio')")
cmds.menuItem(divider=True,p='redNineDebuggerItem')
cmds.menuItem('redNineReloadItem',l=LANGUAGE_MAP._MainMenus_.systems_reload, p='redNineDebuggerItem',
ann=LANGUAGE_MAP._MainMenus_.systems_reload_ann,
echoCommand=True, c=reload_Red9)
cmds.menuItem(divider=True,p='redNineDebuggerItem')
for language in get_language_maps():
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.language+" : %s" % language, c=partial(set_language,language),p='redNineDebuggerItem')
except:
raise StandardError('Unable to parent Red9 Menu to given parent %s' % parent)
def addToMayaMenus():
'''
Red9 Additions to the Maya Menu's themselves, additions to the timeSlider, fileMenu ETC..
'''
try:
# fileMenu additions
if not cmds.menuItem('redNineOpenFolderItem',q=True,ex=True):
mainFileMenu=mel.eval("string $f=$gMainFileMenu")
if not cmds.menu(mainFileMenu, q=True, ni=True):
mel.eval('buildFileMenu()')
cmds.menuItem(divider=True,p=mainFileMenu)
cmds.menuItem('redNineCopyPathItem',
l=LANGUAGE_MAP._MainMenus_.copy_to_clipboard,
ann=LANGUAGE_MAP._MainMenus_.copy_to_clipboard_ann,
p=mainFileMenu,
echoCommand=True,
c="import maya.cmds as cmds;import Red9.core.Red9_General as r9General;r9General.Clipboard.setText(cmds.file(q=True,sn=True))")
cmds.menuItem('redNineOpenFolderItem',
l=LANGUAGE_MAP._MainMenus_.open_in_explorer,
ann=LANGUAGE_MAP._MainMenus_.open_in_explorer_ann,
p=mainFileMenu,
echoCommand=True,
c="import maya.cmds as cmds;import Red9.core.Red9_General as r9General;r9General.os_OpenFileDirectory(cmds.file(q=True,sn=True))")
# timeSlider additions
if not cmds.menuItem('redNineTimeSliderCollapseItem',q=True,ex=True):
if mayaVersion >= 2011:
mel.eval('updateTimeSliderMenu TimeSliderMenu')
TimeSliderMenu='TimeSliderMenu'
cmds.menuItem(divider=True, p=TimeSliderMenu)
cmds.menuItem(subMenu=True, label=LANGUAGE_MAP._MainMenus_.range_submenu, p=TimeSliderMenu)
cmds.menuItem(label=LANGUAGE_MAP._MainMenus_.selectkeys_timerange,
ann=LANGUAGE_MAP._MainMenus_.selectkeys_timerange_ann,
c='import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.selectKeysByRange()')
cmds.menuItem(label=LANGUAGE_MAP._MainMenus_.setrangetoo,
ann=LANGUAGE_MAP._MainMenus_.setrangetoo_ann,
c='import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.setTimeRangeToo()')
cmds.menuItem(divider=True, p=TimeSliderMenu)
cmds.menuItem('redNineTimeSliderCollapseItem', label=LANGUAGE_MAP._MainMenus_.collapse_time,
ann=LANGUAGE_MAP._MainMenus_.collapse_time_ann,
c='import Red9.core.Red9_CoreUtils as r9Core;r9Core.timeOffset_collapseUI()',
p=TimeSliderMenu)
cmds.menuItem(subMenu=True, label=LANGUAGE_MAP._MainMenus_.insert_padding, p=TimeSliderMenu)
cmds.menuItem(label=LANGUAGE_MAP._MainMenus_.pad_selected,
ann=LANGUAGE_MAP._MainMenus_.pad_selected_ann,
c='import Red9.core.Red9_CoreUtils as r9Core;r9Core.timeOffset_addPadding(scene=False)')
cmds.menuItem(label=LANGUAGE_MAP._MainMenus_.pad_full_scene,
ann=LANGUAGE_MAP._MainMenus_.pad_full_scene_ann,
c='import Red9.core.Red9_CoreUtils as r9Core;r9Core.timeOffset_addPadding(scene=True)')
cmds.menuItem(label=LANGUAGE_MAP._MainMenus_.pad_mrigs,
ann=LANGUAGE_MAP._MainMenus_.pad_mrigs_ann,
c='import Red9.core.Red9_CoreUtils as r9Core;r9Core.timeOffset_addPadding(scene=False,mRigs=True)')
cmds.menuItem(subMenu=True, label=LANGUAGE_MAP._MainMenus_.inverse_anim, p=TimeSliderMenu)
cmds.menuItem(label=LANGUAGE_MAP._MainMenus_.inverse_selected,
ann=LANGUAGE_MAP._MainMenus_.inverse_selected_ann,
c='import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.AnimFunctions.inverseAnimCurves(mode="object", mRigs=False)')
cmds.menuItem(label=LANGUAGE_MAP._MainMenus_.inverse_mrigs,
ann=LANGUAGE_MAP._MainMenus_.inverse_mrigs_ann,
c='import Red9.core.Red9_AnimationUtils as r9Anim;r9Anim.AnimFunctions.inverseAnimCurves(mode="object", mRigs=True)')
if has_pro_pack():
cmds.menuItem(divider=True, p=TimeSliderMenu)
cmds.menuItem(subMenu=True, label='Red9 PRO: Timecode', p=TimeSliderMenu, i='red9.jpg')
cmds.menuItem(label='PRO: Toggle Timecode', i='red9.jpg',
ann='Toggle the timeline to view time in timecode or frame',
c="from Red9.pro_pack import r9pro;r9pro.r9import('r9paudio');import r9paudio;r9paudio.timecode_maya_toggle_timeline()")
cmds.menuItem(divider=True)
cmds.menuItem(label='PRO: Set Maya Production Timecode', i='red9.jpg',
ann='set the Internal Maya Production timecode mapping',
c="from Red9.pro_pack import r9pro;r9pro.r9import('r9paudio');import r9paudio;r9paudio.timecode_maya_set_production()")
cmds.menuItem(label='PRO: Reset Maya Production Timecode', i='red9.jpg',
ann='reset the Internal Maya Production timecode mapping',
c="from Red9.pro_pack import r9pro;r9pro.r9import('r9paudio');import r9paudio;r9paudio.timecode_maya_set_production(reset=True)")
else:
log.debug('Red9 Timeslider menus already built')
except:
log.debug('gMainFileMenu not found >> catch for unitTesting')
def addAudioMenu(parent=None, rootMenu='redNineTraxRoot'):
'''
Red9 Sound Menu setup
'''
print 'AudioMenu: given parent : ',parent
if not parent:
cmds.menu(rootMenu, l=LANGUAGE_MAP._MainMenus_.sound_red9_sound, tearOff=True, allowOptionBoxes=True)
print 'New r9Sound Menu added - no specific parent given so adding to whatever menu is currently being built!'
else:
# parent is a window containing a menuBar?
if cmds.window(parent, exists=True):
if not cmds.window(parent, q=True, menuBar=True):
raise StandardError('given parent for Red9 Sound Menu has no menuBarlayout %s' % parent)
else:
cmds.menu(rootMenu, l=LANGUAGE_MAP._MainMenus_.sound_red9_sound, p=parent, tearOff=True, allowOptionBoxes=True)
log.info('New Red9 Sound Menu added to current windows menuBar : %s' % parent)
# parent is a menuBar?
elif cmds.menuBarLayout(parent, exists=True):
cmds.menu(rootMenu, l=LANGUAGE_MAP._MainMenus_.sound_red9_sound, p=parent, tearOff=True, allowOptionBoxes=True)
log.info('New Red9 Sound Menu added to current windows menuBar : %s' % parent)
# parent is a menu already?
elif cmds.menu(parent, exists=True):
cmds.menuItem(rootMenu, l=LANGUAGE_MAP._MainMenus_.sound_red9_sound, sm=True, p=parent, allowOptionBoxes=True)
log.info('New Red9 Sound subMenu added to current Menu : %s' % parent)
else:
raise StandardError('given parent for Red9 Sound Menu is invalid %s' % parent)
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_offset_manager, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_offset_manager_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioToolsWrap().show()")
if has_pro_pack():
cmds.menuItem(l='PRO: Timecode Manager', p=rootMenu,
ann='Pro Time and Audio offset management tools',
c="from Red9.pro_pack import Pro_MenuStubs;Pro_MenuStubs('r9timecode_manager')")
cmds.menuItem(d=True)
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_activate_selected_audio, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_activate_selected_audio_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().setActive()")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_set_timeline_to_selected, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_set_timeline_to_selected_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().setTimelineToAudio()")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_focus_on_selected, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_focus_on_selected_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().setTimelineToAudio();r9Audio.AudioHandler().setActive()")
cmds.menuItem(d=True)
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_mute_selected, p=rootMenu,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().muteSelected(True)")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_unmute_selected, p=rootMenu,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().muteSelected(False)")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_lock_selected, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_lock_selected_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().lockTimeInputs(True)")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_unlock_selected, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_unlock_selected_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().lockTimeInputs(False)")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_delete_selected, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_delete_selected_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().deleteSelected()")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_format_soundnode_name, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_format_soundnode_name_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioHandler().formatNodes_to_Path()")
cmds.menuItem(d=True)
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_combine_audio, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_combine_audio_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.combineAudio()")
cmds.menuItem(d=True)
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_open_audio_path, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_open_audio_path_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.AudioNode().openAudioPath()")
cmds.menuItem(l=LANGUAGE_MAP._MainMenus_.sound_inspect_wav, p=rootMenu,
ann=LANGUAGE_MAP._MainMenus_.sound_inspect_wav_ann,
c="import Red9.core.Red9_Audio as r9Audio;r9Audio.inspect_wav()")
# -----------------------------------------------------------------------------------------
# GENERAL RED9 DATA ---
# -----------------------------------------------------------------------------------------
def red9ButtonBGC(colour, qt=False):
'''
Generic setting for the main button colours in the UI's
'''
if colour==1 or colour=='green':
rgb=[0.6, 1, 0.6]
elif colour==2 or colour=='grey':
rgb=[0.5, 0.5, 0.5]
elif colour==3 or colour=='red':
rgb=[1,0.3,0.3]
elif colour==4 or colour=='white':
rgb=[0.75,0.75,0.8]
elif colour==5 or colour=='dark':
rgb=[0.15,0.25,0.25]
if qt:
return [rgb[0]*255,rgb[1]*255,rgb[2]*255]
else:
return rgb
# def red9ContactInfo(*args):
# '''
# add icon link for : https://icons8.com
# '''
# import Red9.core.Red9_General as r9General # lazy load
# result=cmds.confirmDialog(title='Red9_StudioPack : build %f' % red9_getVersion(),
# message=("Author: Mark Jackson\r\r"+
# "Technical Animation Director\r\r"+
# "Contact me at info@red9Consultancy.com for more information\r\r"+
# "thanks for trying the toolset. If you have any\r"+
# "suggestions or bugs please let me know!"),
# button=['Red9Consultancy.com','ChangeLog','Close'],messageAlign='center')
# if result == 'ChangeLog':
# r9General.os_OpenFile(os.path.join(red9ModulePath(),'changeLog.txt'))
# if result =='Red9Consultancy.com':
# r9General.os_OpenFile('http://red9consultancy.com/')
def red9ContactInfo(*args):
'''
launch the stand-in UI for ProPack
'''
redSPWin='Red9StudioPack'
if cmds.window(redSPWin,exists=True):
cmds.deleteUI(redSPWin, window=True)
redProWin = cmds.window(redSPWin, title='Red9_StudioPack : build %s' % red9_getVersion(),s=False)
cmds.columnLayout()
cmds.rowColumnLayout(nc=2,cw=(1,50))
cmds.separator(style='none')
cmds.image( image='Red9_StudioPack_logo.png' )
cmds.setParent('..')
cmds.rowColumnLayout(nc=1,cw=(1,350))
cmds.text( fn='boldLabelFont',l="Red9 StudioPack\nAuthor: Mark Jackson\n\n"+\
"Red9 Consultancy\n"+\
"Contact: info@red9Consultancy.com\n\n"+\
"thanks for trying the toolset. If you have any\n"+\
"suggestions or bugs please let us know!\n\n")
cmds.button(label='Visit us for more Information',h=40,c="import Red9.core.Red9_General as r9General;r9General.os_OpenFile('http://red9consultancy.com/')")
cmds.separator(style='none',h=5)
cmds.button(label='GitHub project',h=40,c="import Red9.core.Red9_General as r9General;r9General.os_OpenFile('https://github.com/markj3d/Red9_StudioPack')")
cmds.separator(style='none',h=15)
cmds.text(l="with thanks to:")
cmds.text(l="icon libs: https://icons8.com")
cmds.text(l="pydub libs: https://github.com/jiaaro/pydub")
cmds.showWindow(redSPWin)
cmds.window(redSPWin, e=True,widthHeight=(350, 535))
def red9Presets():
'''
get the default presets dir for all filterSettings.cfg files
'''
return os.path.join(red9ModulePath(), 'presets')
def red9Presets_get():
'''
generic extraction of all cfg presets from the default location above
'''
try:
configs=[p for p in os.listdir(red9Presets()) if p.endswith('.cfg')]
configs.sort()
return configs
except:
log.debug('failed to retrieve the presets')
return []
def red9ModulePath():
'''
Returns the Main path to the Red9 root module folder
'''
return os.path.join(os.path.dirname(os.path.dirname(__file__)),'')
def red9MayaNativePath():
'''
Returns the MayaVersioned Hacked script path if valid and found
'''
_version=mayaRelease() # switched to manage ext builds that divert to a .5 release (2016.5)
basepath=os.path.join(red9ModulePath(),'startup','maya_native','maya_%s' % str(int(_version)))
path=os.path.join(red9ModulePath(),'startup','maya_native','maya_%s' % str(_version))
if os.path.exists(path):
# version including .5 handler
return path
elif os.path.exists(basepath):
# simple int version
return basepath
else:
log.info('Red9MayaHacked Folder not found for this build of Maya : %s' % path)
def red9_help(*args):
'''
open up the Red9 help docs
'''
import Red9.core.Red9_General as r9General # lazy load
helpFile=os.path.join(red9ModulePath(),'docs',r'Red9-StudioTools Help.pdf')
r9General.os_OpenFile(helpFile)
def red9_blog(*args):
'''
open up the Red9 Blog
'''
import Red9.core.Red9_General as r9General # lazy load
r9General.os_OpenFile('http://red9-consultancy.blogspot.com/')
def red9_website_home(*args):
'''
open up the Red9 Consultancy homepage
'''
import Red9.core.Red9_General as r9General # lazy load
r9General.os_OpenFile('http://red9consultancy.com/')
def red9_facebook(*args):
'''
open up the Red9 Facebook Page
'''
import Red9.core.Red9_General as r9General # lazy load
r9General.os_OpenFile('http://www.facebook.com/Red9StudioPack/')
def red9_vimeo(*args):
'''
open up the Red9 Vimeo Channel
'''
import Red9.core.Red9_General as r9General # lazy load
r9General.os_OpenFile('https://vimeo.com/user9491246')
def red9_apidocs(*args):
'''
open up the Red9 Vimeo Channel
'''
import Red9.core.Red9_General as r9General # lazy load
apidocs=os.path.join(red9ModulePath(),'docs', 'html', 'index.html')
r9General.os_OpenFile(apidocs)
def red9_getVersion():
return __buildVersionID__
def red9_getAuthor():
return __author__
# def get_pro_pack(*args):
# try:
# #new pro_pack build calls
# import Red9.pro_pack.r9pro as r9pro
# r9pro.r9import('r9wtools')
# import r9wtools
# r9wtools.MailRegistration().show()
# except:
# #legacy
# import Red9.core.Red9_General as r9General # lazy load
# result=cmds.confirmDialog(title='Red9_StudioPack : build %f' % red9_getVersion(),
# message=("Red9_ProPack Not Installed!\r\r"+
# "Contact info@red9consultancy.com for more information"),
# button=['Red9Consultancy.com','Get_Pro','Close'],messageAlign='center')
# if result == 'Get_Pro':
# log.warning('Red9 ProPack systems not yet available - watch this space!')
# if result =='Red9Consultancy.com':
# r9General.os_OpenFile('http://red9consultancy.com/')
def get_pro_pack(*args):
'''
launch the stand-in UI for ProPack
'''
redProWin='Red9ProPack'
if cmds.window(redProWin,exists=True):
cmds.deleteUI(redProWin, window=True)
redProWin = cmds.window(redProWin, title="Red9 ProPack",s=False)
cmds.columnLayout()
cmds.paneLayout()
cmds.image( image='Red9_ProPack_logo.png' )
cmds.setParent('..')
cmds.rowColumnLayout(nc=1,cw=(1,250))
cmds.text(l="Red9 ProPack : Not yet Installed!\n", fn='boldLabelFont')
cmds.button(label='Get Me ProPack!',h=40,c="import Red9.core.Red9_General as r9General;r9General.os_OpenFile('http://red9consultancy.com/')")
cmds.showWindow( redProWin )
cmds.window(redProWin,e=True,widthHeight=(250, 350))
# -----------------------------------------------------------------------------------------
# BOOT FUNCTIONS ---
# -----------------------------------------------------------------------------------------
def addScriptsPath(path):
'''
Add additional folders to the ScriptPath
'''
scriptsPath=os.environ.get('MAYA_SCRIPT_PATH')
if os.path.exists(path):
if not path in scriptsPath:
log.info('Adding To Script Paths : %s' % path)
os.environ['MAYA_SCRIPT_PATH']+='%s%s' % (os.pathsep,path)
else:
log.info('Red9 Script Path already setup : %s' % path)
else:
log.debug('Given Script Path is invalid : %s' % path)
def addPluginPath(path=None):
'''
Make sure the plugin path has been added. If run as a module
this will have already been added
'''
if not path:
path=os.path.join(red9ModulePath(),'plug-ins')
plugPaths=os.environ.get('MAYA_PLUG_IN_PATH')
if os.path.exists(path) and not path in plugPaths:
log.info('Adding Red9 Plug-ins to Plugin Paths : %s' % path)
os.environ['MAYA_PLUG_IN_PATH']+='%s%s' % (os.pathsep,path)
else:
log.info('Red9 Plug-in Path already setup')
def addIconsPath(path=None):
'''
Make sure the icons path has been added. If run as a module
this will have already been added
'''
if not path:
path=os.path.join(red9ModulePath(),'icons')
iconsPath=os.environ.get('XBMLANGPATH')
if os.path.exists(path) and not path in iconsPath:
log.info('Adding Red9 Icons To XBM Paths : %s' % path)
os.environ['XBMLANGPATH']+='%s%s' % (os.pathsep,path)
else:
log.info('Red9 Icons Path already setup')
def addPythonPackages():
'''
Add the packages folder which is where any external modules
will be stored
'''
red9Packages=os.path.join(red9ModulePath(),'packages')
if not red9Packages in sys.path:
log.info('Adding Red9Packages To Python Paths : %s' % red9Packages)
sys.path.append(red9Packages)
else:
log.info('Red9Packages Path already setup : %s' % red9Packages)
# PySide Management for pre 2014 x64 builds
if mayaVersion()<2014.0 and os.path.exists(os.path.join(red9Packages, 'PySide')):
pysidePath=os.path.join(red9Packages, 'PySide')
if mayaVersion()==2012.0:
pysidePath=os.path.join(pysidePath, 'PySide_2012_x64')
elif mayaVersion()==2013.0:
pysidePath=os.path.join(pysidePath, 'PySide_2013_x64')
if os.path.exists(pysidePath) and not pysidePath in sys.path:
sys.path.append(pysidePath)
log.info('Adding Red9Packages:PySide To Python Paths : %s' % pysidePath)
# Python compiled folders, if they exists
if mayaVersion()<=2014 and os.path.exists(os.path.join(red9Packages, 'python2.6')):
sys.path.append(os.path.join(red9Packages, 'python2.6'))
if mayaVersion()>=2015 and os.path.exists(os.path.join(red9Packages, 'python2.7')):
sys.path.append(os.path.join(red9Packages, 'python2.7'))
def sourceMelFolderContents(path):
'''
source all mel files in a given folder
'''
for script in [f for f in os.listdir(path) if f.lower().endswith('.mel')]:
log.info('Sourcing mel script : %s' % script)
mel.eval('source %s' % script)
def delete_shelf(shelf_name):
'''
Delete maya shelf and update maya shelf optionVars
:param shelf_name: string: name of the shelf to be deleted
:return:
'''
if mayaIsBatch():
return
if not cmds.shelfLayout(shelf_name, q=True, ex=True):
return
shelfs = cmds.optionVar(q='numShelves')
current_shelf = None
# Shelf preferences.
for i in range(shelfs + 1):
if shelf_name == cmds.optionVar(q="shelfName%i" % i):
current_shelf = i
break
# manage shelve ids
for i in range(current_shelf, shelfs + 1):
cmds.optionVar(iv=("shelfLoad%s" % str(i), cmds.optionVar(q="shelfLoad%s" % str(i + 1))))
cmds.optionVar(sv=("shelfName%s" % str(i), cmds.optionVar(q="shelfName%s" % str(i + 1))))
cmds.optionVar(sv=("shelfFile%s" % str(i), cmds.optionVar(q="shelfFile%s" % str(i + 1))))
cmds.optionVar(remove="shelfLoad%s" % shelfs)
cmds.optionVar(remove="shelfName%s" % shelfs)
cmds.optionVar(remove="shelfFile%s" % shelfs)
cmds.optionVar(iv=("numShelves", shelfs - 1))
cmds.deleteUI(shelf_name, layout=True)
pref_file = os.path.join(mayaPrefs(), 'prefs', 'shelves', 'shelf_%s.mel.deleted' % shelf_name)
if os.path.exists(pref_file):
os.remove(pref_file)
mel.eval("shelfTabChange")
log.info('Shelf deleted: % s' % shelf_name)
def load_shelf(shelf_path):
'''
load Maya shelf
:param shelf_path: string: file path to maya shelf
'''
if mayaIsBatch():
return
# get current top shelf
gShelfTopLevel = mel.eval("string $shelf_ly=$gShelfTopLevel")
top=cmds.shelfTabLayout(gShelfTopLevel, q=True, st=True)
if os.path.exists(shelf_path):
#print shelf_path
delete_shelf(shelf_path)
mel.eval('source "%s"' % shelf_path)
mel.eval('loadNewShelf("%s")' % shelf_path)
log.info('Shelf loaded: % s' % shelf_path)
return True
else:
log.error('Cant load shelf, file doesnt exist: %s' % shelf_path)
# restore users top shelf
cmds.shelfTabLayout(gShelfTopLevel, e=True, st=top)
# -----------------------------------------------------------------------------------------
# PRO PACK ---
# -----------------------------------------------------------------------------------------
PRO_PACK_STUBS=None
def pro_pack_path():
return os.path.join(red9ModulePath(),'pro_pack')
def has_pro_pack():
'''
Red9 Pro_Pack is available and activated as user
'''
if os.path.exists(pro_pack_path()):
try:
#new pro_pack call
import Red9.pro_pack.r9pro as r9pro
status=r9pro.checkr9user()
if status and not issubclass(type(status),str):
return True
else:
return False
except:
#we have the pro-pack folder so assume we're running legacy build (Dambusters support)
return True
else:
return False
class ProPack_UIError(Exception):
'''
custom exception so we can catch it, this launched the
get ProPack UI
'''
def __init__(self, *args):
get_pro_pack()
class ProPack_Error(Exception):
'''
custom exception so we can catch it. This is an in-function
error
'''
def __init__(self, *args):
super(ProPack_Error, self).__init__('ProPack missing from setup!')
class pro_pack_missing_stub(object):
'''
Exception to raised when the the Pro_Pack is missing
and the stubs are called
'''
def __init__(self):
raise ProPack_UIError()
# -----------------------------------------------------------------------------------------
# RED9 PRODUCTION MODULES ---
# -----------------------------------------------------------------------------------------
def has_internal_systems():
'''
Red9 Consultancy internal modules only
'''
if os.path.exists(internal_module_path()):
return True
def internal_module_path():
return os.path.join(os.path.dirname(os.path.dirname(red9ModulePath())),'Red9_Internals')
# -----------------------------------------------------------------------------------------
# CLIENT MODULES ---
# -----------------------------------------------------------------------------------------
CLIENTS_BOOTED=[]
def client_core_path():
return os.path.join(os.path.dirname(os.path.dirname(red9ModulePath())),'Red9_ClientCore')
def has_client_modules():
'''
Red9 Client Modules is the distribution of bespoke code to clients
that tightly integrates into our ProPack core
'''
if os.path.exists(client_core_path()):
return True
def get_client_modules():
'''
get all client modules ready for the boot sequence
#TODO: link this up with a management setup so we can determine
which client to boot if we have multiple client repositories in the system.
'''
clients=[]
if has_client_modules():
for f in os.listdir(client_core_path()):
if os.path.isdir(os.path.join(client_core_path(), f)):
if not f.startswith('.') and not f.startswith('_'):
clients.append(f)
return clients
def clients_booted():
'''
return the client modules booted by the system
'''
global CLIENTS_BOOTED
return CLIENTS_BOOTED
def boot_client_projects():
'''
Boot Client modules found in the Red9_ClientCore dir. This now propts
if multiple client projects were found.
'''
global CLIENTS_BOOTED
CLIENTS_BOOTED=[]
clients=get_client_modules()
clientsToBoot=[]
if clients and len(clients)>1 and not mayaIsBatch():
options=['All']
options.extend(clients)
result=cmds.confirmDialog(title='ProjectPicker',
message=("Multiple Projects Found!\r\r"+
"Which Project would you like to boot?"),
button=options, messageAlign='center')
if result == 'All':
clientsToBoot=clients
else:
clientsToBoot.append(result)
else:
clientsToBoot=clients
# boot the project / projects
for client in clientsToBoot:
#log.info('Booting Client Module : %s' % client)
cmds.evalDeferred("import Red9_ClientCore.%s" % client, lp=True) # Unresolved Import
CLIENTS_BOOTED.append(client)
# remove unused menuItems - added previously so that the menu grouping is clean
for client in clients:
if not client in clientsToBoot:
cmds.deleteUI('redNineClient%sItem' % client)
log.debug('Unused Client Menu Removed: %s' % client)
def __reload_clients__():
'''
used in the main reload_Red9 call below to ensure that
the reload sequence is correct for the MetaData registry
'''
for client in get_client_modules():
try:
path='Red9_ClientCore.%s' % client
cmds.evalDeferred("import %s;%s._reload()" % (path,path), lp=True) # Unresolved Import
log.info('Reloaded Client : "%s"' % path)
except:
log.info('Client : "%s" : does not have a _reload func internally' % path)
# -----------------------------------------------------------------------------------------
# BOOT CALL ---
# -----------------------------------------------------------------------------------------
def start(Menu=True, MayaUIHooks=True, MayaOverloads=True, parentMenu='MayaWindow'):
'''
Main entry point for the StudioPack
:param Menu: Add the Red9 Menu to the Maya Main Menus
:param MayUIHooks: Add the Red9 hooks to Maya Native UI's
:param MayaOverloads: run the Maya native script hacks for Red9 - integrates into native Maya ui's
'''
log.info('Red9 StudioPack v%s : author: %s' % (red9_getVersion(), red9_getAuthor()))
log.info('Red9 StudioPack Setup Calls :: Booting from >> %s' % red9ModulePath())
#check for current builds
# currentBuild=False
# try:
# currentBuild = mel.eval('$temp=$buildInstalled')
# except:
# print 'Red9 : version not found'
#
# if currentBuild:
# print 'Red9 : StudioPack already found : v', currentBuild
# if currentBuild<=red9_getVersion():
# print 'Red9 StudioPack Start Aborted : v%f is already installed' % currentBuild
# return
# else:
# print 'Red9 : no version currently loaded'
#Ensure the Plug-in and Icon paths are up
addPluginPath()
addIconsPath()
#Need to add a Mel Folder to the scripts path
addScriptsPath(os.path.join(red9ModulePath(),'core'))
#Add the Packages folder
addPythonPackages()
if not cmds.about(batch=True):
if Menu:
try:
menuSetup(parent=parentMenu)
except:
log.debug('Red9 main menu Build Failed!')
if MayaUIHooks:
#Source Maya Hacked Mel files
hacked=red9MayaNativePath()
if hacked and MayaOverloads:
addScriptsPath(os.path.join(red9ModulePath(),'startup','maya_native'))
addScriptsPath(hacked)
try:
mel.eval('source Red9_MelCore')
sourceMelFolderContents(hacked)
except StandardError, error:
log.info(error)
#Add custom items to standard built Maya menus
addToMayaMenus()
log.info('Red9 StudioPack Complete!')
# Rearrangement of the Boot core systems to better structure the boot sequence
# Boot main Red9.core
cmds.evalDeferred("import Red9.core", lp=True)
# Boot the Pro_Pack
if has_pro_pack():
cmds.evalDeferred("import Red9.pro_pack", lp=True) # Unresolved Import
# Boot the Red9_Internal systems
if has_internal_systems():
cmds.evalDeferred("import Red9_Internals", lp=True) # Unresolved Import
# Boot Client Codebases
if has_client_modules():
boot_client_projects()
#cmds.evalDeferred('import Red9.startup.setup as r9Setup;r9Setup.boot_client_projects()', lp=True)
def reload_Red9(*args):
'''
careful reload of the systems to maintain the integrity of the
MetaData registry setups for pro_pack, client_core and internals
'''
#global LANGUAGE_MAP
#reload(LANGUAGE_MAP)
import Red9.core
Red9.core._reload()
if has_pro_pack():
print '\nReloading ProPack Systems (INTERNAL USE)'
print '='*40
import Red9.pro_pack.core
Red9.pro_pack.core._reload()
if has_internal_systems():
print '\nReloading Internal Codebase'
print '='*40
import Red9_Internals
Red9_Internals._reload()
if has_client_modules():
print '\nReloading Client Codebase'
print '='*40
__reload_clients__()
PRO_PACK_STUBS=pro_pack_missing_stub
| 45.297976 | 160 | 0.60567 |
ace2e83bdcd63f88a25319df4eaff07424db5e10 | 7,013 | py | Python | test/models/nlp/text_classify/test_bilstm.py | q759729997/qytPytorch | b9b4b6aeff67596c493871c0842dc72c5b66c548 | [
"Apache-2.0"
] | null | null | null | test/models/nlp/text_classify/test_bilstm.py | q759729997/qytPytorch | b9b4b6aeff67596c493871c0842dc72c5b66c548 | [
"Apache-2.0"
] | null | null | null | test/models/nlp/text_classify/test_bilstm.py | q759729997/qytPytorch | b9b4b6aeff67596c493871c0842dc72c5b66c548 | [
"Apache-2.0"
] | null | null | null | """
main_module - 文本分类BiLSTM,测试时将对应方法的@unittest.skip注释掉.
Main members:
# __main__ - 程序入口.
"""
import sys
import unittest
import torch
import torchtext
from torch import nn
sys.path.insert(0, './') # 定义搜索路径的优先顺序,序号从0开始,表示最大优先级
import qytPytorch # noqa
print('qytPytorch module path :{}'.format(qytPytorch.__file__)) # 输出测试模块文件位置
from qytPytorch.models.nlp.text_classify import BiLSTM # noqa
from qytPytorch.utils.statistics_utils import get_parameter_number # noqa
from qytPytorch.utils.serialize import save_serialize_obj # noqa
from qytPytorch.utils.serialize import load_serialize_obj # noqa
from qytPytorch.core.vocabulary import get_tokenized_vocab # noqa
from qytPytorch.core.vocabulary import save_vocab_words # noqa
from qytPytorch.dataset.nlp.imdb import get_tokenized_imdb # noqa
from qytPytorch.dataset.nlp.imdb import get_imdb_data_iter # noqa
from qytPytorch.dataset.nlp.imdb import predict_sentiment # noqa
from qytPytorch.core.train import train_net # noqa
from qytPytorch.core.embedding import load_pretrained_embedding # noqa
class TestBiLSTM(unittest.TestCase):
"""BiLSTM.
Main methods:
test_get_out_shape - 计算神经网络输出形状.
test_get_parameter_number - 统计神经网络参数个数.
test_train - 模型训练.
test_train_use_pretrained_embedding - 模型训练,使用预训练embed.
test_net_predict - 模型预测.
"""
# @unittest.skip('debug')
def test_get_out_shape(self):
"""计算神经网络输出形状.
"""
print('{} test_get_out_shape {}'.format('-'*15, '-'*15))
input_shape = (8, 50) # 批量大小,句子长度
net = BiLSTM(vocab_size=1000, labels_size=2)
# print(net)
x = torch.randint(low=1, high=500, size=input_shape, dtype=torch.long) # 数据类型需要为long,以便于embed进行转换
y = net(x)
print(y.shape) # torch.Size([8, 2])
print(y)
"""
tensor([[ 0.0216, 0.0019],
[ 0.0485, 0.0487],
[ 0.0136, -0.0197],
[ 0.0255, -0.0421],
[ 0.0079, -0.0233],
[ 0.0694, 0.0226],
[ 0.0136, -0.0249],
[-0.0350, -0.0268]], grad_fn=<AddmmBackward>)
"""
@unittest.skip('debug')
def test_get_parameter_number(self):
""" 统计神经网络参数个数.
"""
print('{} test_get_parameter_number {}'.format('-'*15, '-'*15))
net = BiLSTM(vocab_size=100000, labels_size=2)
print(net)
print(get_parameter_number(net)) # {'total': 454802, 'trainable': 454802}
# vocab_size=1000 时 {'total': 454802, 'trainable': 454802}
# vocab_size=100000 时 {'total': 5404802, 'trainable': 5404802}
@unittest.skip('debug')
def test_train(self):
""" 模型训练.
"""
print('{} test_train {}'.format('-'*15, '-'*15))
# 数据集加载
test_data_pickle = './data/aclImdb/test_data.pkl'
test_data = load_serialize_obj(test_data_pickle)
test_data = test_data[:100] # 数据量比较大,cpu电脑跑不动,取一部分进行训练
test_data_tokenized = get_tokenized_imdb(imdb_data=test_data)
test_data_vocab = get_tokenized_vocab(test_data_tokenized)
vocab_size = len(test_data_vocab)
print('vocab len:{}'.format(vocab_size)) # vocab len:45098
test_iter = get_imdb_data_iter(test_data, test_data_vocab, batch_size=8, shuffle=True)
print('test_iter len:{}'.format(len(test_iter))) # test_iter len:3218
# 构造模型
net = BiLSTM(vocab_size=vocab_size, labels_size=2)
print('参数量:{}'.format(get_parameter_number(net))) # total:436.002 Thousand, trainable:436.002 Thousand
print(net)
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()))
loss_func = nn.CrossEntropyLoss()
# 训练
train_net(net, train_iter=test_iter, dev_iter=test_iter, max_epoch=5, optimizer=optimizer, loss_func=loss_func)
@unittest.skip('debug')
def test_train_use_pretrained_embedding(self):
""" 模型训练,使用预训练embed.
"""
print('{} test_train_use_pretrained_embedding {}'.format('-'*15, '-'*15))
# 数据集加载
test_data_pickle = './data/aclImdb/test_data.pkl'
test_data = load_serialize_obj(test_data_pickle)
test_data = test_data[:1000] # 数据量比较大,cpu电脑跑不动,取一部分进行训练
test_data_tokenized = get_tokenized_imdb(imdb_data=test_data)
test_data_vocab = get_tokenized_vocab(test_data_tokenized)
vocab_size = len(test_data_vocab)
print('vocab len:{}'.format(vocab_size)) # vocab len:45098
test_iter = get_imdb_data_iter(test_data, test_data_vocab, batch_size=8, shuffle=True)
print('test_iter len:{}'.format(len(test_iter))) # test_iter len:3218
# 构造模型
net = BiLSTM(vocab_size=vocab_size, labels_size=2)
print('参数量:{}'.format(get_parameter_number(net))) # total:436.002 Thousand, trainable:436.002 Thousand
# 使用预训练embed初始化
glove_embedding = torchtext.vocab.GloVe(name='6B', dim=50, cache='./data/torchtext')
print("glove_embedding 一共包含%d个词。" % len(glove_embedding.stoi)) # 一共包含400000个词。
words = test_data_vocab.itos
embed = load_pretrained_embedding(words=words, pretrained_vocab=glove_embedding) # There are 73 oov words.
net.embedding.weight.data.copy_(embed)
net.embedding.weight.requires_grad = False # 直接加载预训练好的, 所以不需要更新它
print('参数量:{}'.format(get_parameter_number(net))) # total:436.002 Thousand, trainable:404.802 Thousand
print(net)
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()))
loss_func = nn.CrossEntropyLoss()
# 训练
train_net(net, train_iter=test_iter, dev_iter=test_iter, max_epoch=2, optimizer=optimizer, loss_func=loss_func)
# 保存模型
torch.save(net, f='./data/save/text_classify/bilstm/model.pkl')
# 保存vocabulary
save_vocab_words(test_data_vocab, file_name='./data/save/text_classify/bilstm/vocab_words.txt')
save_serialize_obj(test_data_vocab, filename='./data/save/text_classify/bilstm/vocab.pkl')
@unittest.skip('debug')
def test_net_predict(self):
""" 模型预测.
"""
print('{} test_net_predict {}'.format('-'*15, '-'*15))
# 加载模型
model_file = './data/save/text_classify/bilstm/model.pkl'
net = torch.load(model_file)
print(net)
# 加载词典
vocab_file = './data/save/text_classify/bilstm/vocab.pkl'
vocab_obj = load_serialize_obj(vocab_file)
print('vocab len:{}'.format(len(vocab_obj))) # vocab len:624
# 预测
label = predict_sentiment(net=net, vocab_obj=vocab_obj, words=['this', 'movie', 'is', 'so', 'good'])
print(label)
label = predict_sentiment(net=net, vocab_obj=vocab_obj, words=['terrible', 'movie', 'is', 'so', 'bad'])
print(label)
if __name__ == "__main__":
unittest.main() # 运行当前源文件中的所有测试用例
| 44.106918 | 120 | 0.643804 |
ace2e853523602d30659ce639e511ba2d604705f | 13,215 | py | Python | app.py | asyrofist/tracereq | 4aa5071c6499e26771185f711af4393a865cb69a | [
"MIT"
] | 1 | 2020-11-24T05:32:04.000Z | 2020-11-24T05:32:04.000Z | app.py | asyrofist/tracereq | 4aa5071c6499e26771185f711af4393a865cb69a | [
"MIT"
] | null | null | null | app.py | asyrofist/tracereq | 4aa5071c6499e26771185f711af4393a865cb69a | [
"MIT"
] | null | null | null | import string, numpy as np, pandas as pd, streamlit as st
from multiprocessing import Pool
from gensim.models import Doc2Vec
from gensim.models.doc2vec import TaggedDocument
from nltk import word_tokenize
from sklearn.cluster import KMeans
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics import classification_report, pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
from cleaning import (apply_cleaning, build_idf_matrix, build_lexicon, freq,
fulldataset, idf, l2_normalizer, numDocsContaining)
st.write("""
# Simple Traceability SRS Document
Berikut ini algoritma yang digunakan untuk pengukuran keterlacakan pada dokumen
""")
#file upload
index0 = st.file_uploader("Choose a file")
if index0 is not None:
st.sidebar.header('Dataset Parameter')
x1 = pd.ExcelFile(index0)
index1 = st.sidebar.selectbox( 'What Dataset you choose?', x1.sheet_names)
# Load data example (dari functional maupun nonfunctional)
st.header('Dataset parameters')
statement = fulldataset(index0, index1)
# Get text to clean (dari row yang diinginkan)
text_to_clean = list(statement['Requirement Statement'])
# Clean text
print("Loading Original & Cleaned Text...")
cleaned_text = apply_cleaning(text_to_clean)
# Show first example
text_df = pd.DataFrame([text_to_clean, cleaned_text],index=['ORIGINAL','CLEANED'], columns= statement['ID']).T
st.write(text_df)
st.header('Traceability parameters')
id_requirement = fulldataset(index0, index1)['ID']
genre = st.sidebar.radio("What do you choose?",('Information_Retrieval', 'Ontology', 'IR+LSA', 'IR+LDA'))
if genre == 'Information_Retrieval':
st.subheader("bag of words")
count_vector = CountVectorizer(cleaned_text)
count_vector.fit(cleaned_text)
kolom_df = count_vector.get_feature_names()
doc_array = count_vector.transform(cleaned_text).toarray()
frequency_matrix = pd.DataFrame(doc_array, index= id_requirement, columns= kolom_df)
st.write(frequency_matrix)
# l2 normalizer
vocabulary = build_lexicon(cleaned_text)
mydoclist = cleaned_text
my_idf_vector = [idf(word, mydoclist) for word in vocabulary]
my_idf_matrix = build_idf_matrix(my_idf_vector)
doc_term_matrix_tfidf = []
#performing tf-idf matrix multiplication
for tf_vector in doc_array:
doc_term_matrix_tfidf.append(np.dot(tf_vector, my_idf_matrix))
#normalizing
doc_term_matrix_tfidf_l2 = []
for tf_vector in doc_term_matrix_tfidf:
doc_term_matrix_tfidf_l2.append(l2_normalizer(tf_vector))
hasil_tfidf = np.matrix(doc_term_matrix_tfidf_l2)
st.subheader("l2 tfidf normalizer")
frequency_TFIDF = pd.DataFrame(hasil_tfidf,index= id_requirement, columns= kolom_df)
st.write(frequency_TFIDF)
st.subheader("IR using cosine")
X = np.array(hasil_tfidf[0:])
Y = np.array(hasil_tfidf)
cosine_similaritas = pairwise_kernels(X, Y, metric='linear')
cosine_df = pd.DataFrame(cosine_similaritas, index= id_requirement, columns= id_requirement)
st.write(cosine_df)
# klaster
klaster_value = st.sidebar.slider("Berapa Cluster?", 0, 5, len(id_requirement))
kmeans = KMeans(n_clusters= klaster_value) # You want cluster the passenger records into 2: Survived or Not survived
kmeans_df = kmeans.fit(cosine_similaritas)
st.subheader("K-Means Cluster")
correct = 0
for i in range(len(cosine_similaritas)):
predict_me = np.array(cosine_similaritas[i].astype(float))
predict_me = predict_me.reshape(-1, len(predict_me))
prediction = kmeans.predict(predict_me)
if prediction[0] == cosine_similaritas[i].all():
correct += 1
st.sidebar.write(correct/len(cosine_similaritas))
klasterkm = kmeans.cluster_centers_
klaster_df = pd.DataFrame(klasterkm, columns= id_requirement)
st.write(klaster_df)
elif genre == 'Ontology':
# document bag of words
count_vector = CountVectorizer(cleaned_text)
count_vector.fit(cleaned_text)
doc_array = count_vector.transform(cleaned_text).toarray()
doc_feature = count_vector.get_feature_names()
st.subheader('BOW parameters')
id_requirement = fulldataset(index0, index1)['ID']
bow_matrix = pd.DataFrame(doc_array, index= id_requirement, columns= doc_feature)
st.dataframe(bow_matrix)
# tfidf
doc_term_matrix_l2 = []
# document l2 normalizaer
for vec in doc_array:
doc_term_matrix_l2.append(l2_normalizer(vec))
# vocabulary & idf matrix
vocabulary = build_lexicon(cleaned_text)
mydoclist = cleaned_text
my_idf_vector = [idf(word, mydoclist) for word in vocabulary]
my_idf_matrix = build_idf_matrix(my_idf_vector)
doc_term_matrix_tfidf = []
#performing tf-idf matrix multiplication
for tf_vector in doc_array:
doc_term_matrix_tfidf.append(np.dot(tf_vector, my_idf_matrix))
doc_term_matrix_tfidf_l2 = []
#normalizing
for tf_vector in doc_term_matrix_tfidf:
doc_term_matrix_tfidf_l2.append(l2_normalizer(tf_vector))
hasil_tfidf = np.matrix(doc_term_matrix_tfidf_l2)
st.subheader('TFIDF parameters')
tfidf_matrix = pd.DataFrame(hasil_tfidf, index= id_requirement, columns= doc_feature)
st.dataframe(tfidf_matrix)
#doc2vec
st.subheader('doc2vec parameters')
sentences = [word_tokenize(num) for num in cleaned_text]
for i in range(len(sentences)):
sentences[i] = TaggedDocument(words = sentences[i], tags = ['sent{}'.format(i)]) # converting each sentence into a TaggedDocument
st.sidebar.subheader("Model Parameter")
size_value = st.sidebar.slider('Berapa Size Model?', 0, 200, len(doc_feature))
iterasi_value = st.sidebar.slider('Berapa Iterasi Model?', 0, 100, 10)
window_value = st.sidebar.slider('Berapa Window Model?', 0, 10, 3)
dimension_value = st.sidebar.slider('Berapa Dimension Model', 0, 10, 1)
model = Doc2Vec(documents = sentences, dm = dimension_value, size = size_value, window = window_value, min_count = 1, iter = iterasi_value, workers = Pool()._processes)
model.init_sims(replace = True)
# nilai_vektor = [model.infer_vector("sent{}".format(num)) for num in enumerate(cleaned_text)]
nilai_vektor = [model.infer_vector(num) for num, sent in sentences]
id_requirement = fulldataset(index0, index1)['ID']
df_vektor = pd.DataFrame(nilai_vektor, index=id_requirement, columns= ['vektor {}'.format(num) for num in range(0, size_value)])
st.dataframe(df_vektor)
# Kmeans
st.subheader('Kmeans parameters')
true_k = len(nilai_vektor)
model = KMeans(n_clusters=true_k, init='k-means++', max_iter=iterasi_value, n_init=1)
model.fit(nilai_vektor)
order_centroids = model.cluster_centers_.argsort()[:, ::-1]
id_requirement = fulldataset(index0, index1)['ID']
df_kmeans = pd.DataFrame(order_centroids, index= id_requirement, columns= ['vektor {}'.format(num) for num in range(0, size_value)])
st.dataframe(df_kmeans)
elif genre == 'IR+LSA':
st.sidebar.subheader("Parameter LSA")
feature_value = st.sidebar.slider("Berapa Feature?", 10, 100, 1000)
df_value = st.sidebar.slider("Berapa df?", 0.0, 0.9, 0.5)
feature_value = st.sidebar.slider('Berapa Max Feature Model?', 0, 10, 1000)
iterasi_value = st.sidebar.slider('Berapa Dimension Model?', 0, 200, 100)
random_value = st.sidebar.slider('Berapa Random Model?', 0, 300, 122)
vectorizer = TfidfVectorizer(stop_words='english',
max_features= feature_value, # keep top 1000 terms
max_df = df_value,
smooth_idf=True)
X = vectorizer.fit_transform(cleaned_text)
fitur_id = vectorizer.get_feature_names()
svd_model = TruncatedSVD(n_components= (X.shape[0]), algorithm='randomized', n_iter= iterasi_value, random_state= random_value)
svd_model.fit(X)
jumlah_kata = svd_model.components_
tabel_lsa = pd.DataFrame(jumlah_kata, index= id_requirement, columns= fitur_id)
st.dataframe(tabel_lsa)
st.subheader("LSA using cosine")
X = np.array(jumlah_kata[0:])
Y = np.array(jumlah_kata)
cosine_similaritas = pairwise_kernels(X, Y, metric='linear')
cosine_df = pd.DataFrame(cosine_similaritas,index= id_requirement, columns= id_requirement)
st.write(cosine_df)
# klaster
klaster_value = st.sidebar.slider("Berapa Cluster?", 0, 5, len(id_requirement))
kmeans = KMeans(n_clusters= klaster_value) # You want cluster the passenger records into 2: Survived or Not survived
kmeans_df = kmeans.fit(cosine_similaritas)
st.subheader("K-Means Cluster")
correct = 0
for i in range(len(cosine_similaritas)):
predict_me = np.array(cosine_similaritas[i].astype(float))
predict_me = predict_me.reshape(-1, len(predict_me))
prediction = kmeans.predict(predict_me)
if prediction[0] == cosine_similaritas[i].all():
correct += 1
st.sidebar.write(correct/len(cosine_similaritas))
klasterkm = kmeans.cluster_centers_
klaster_df = pd.DataFrame(klasterkm, columns= id_requirement)
st.write(klaster_df)
elif genre == 'IR+LDA':
st.sidebar.subheader("Parameter LDA")
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.feature_extraction.text import (CountVectorizer,
TfidfVectorizer)
feature_value = st.sidebar.slider("Berapa Feature?", 10, 100, 1000)
maxdf_value = st.sidebar.slider("Berapa df?", 0.0, 1.05, 0.95)
mindf_value = st.sidebar.slider("Berapa df?", 0, 5, 2)
feature_value = st.sidebar.slider('Berapa Max Feature Model?', 0, 10, 1000)
iterasi_value = st.sidebar.slider('Berapa Dimension Model?', 0, 200, 5)
random_value = st.sidebar.slider('Berapa Random Model?', 0, 10, 1)
tf_vectorizer = CountVectorizer(max_df=maxdf_value, min_df=mindf_value,
max_features= feature_value,
stop_words='english')
tf = tf_vectorizer.fit_transform(cleaned_text)
lda = LatentDirichletAllocation(n_components= tf.shape[0], max_iter= iterasi_value,
learning_method='online',
learning_offset= 50.,
random_state= random_value)
lda.fit(tf)
tf_feature_names = tf_vectorizer.get_feature_names()
jumlah_kata = lda.components_
tabel_lsa = pd.DataFrame(jumlah_kata, index= id_requirement, columns= tf_feature_names)
st.dataframe(tabel_lsa)
st.subheader("LDA using cosine")
X = np.array(jumlah_kata[0:])
Y = np.array(jumlah_kata)
cosine_similaritas = pairwise_kernels(X, Y, metric='linear')
cosine_df = pd.DataFrame(cosine_similaritas,index= id_requirement, columns= id_requirement)
st.write(cosine_df)
# klaster
klaster_value = st.sidebar.slider("Berapa Cluster?", 0, 5, len(id_requirement))
kmeans = KMeans(n_clusters= klaster_value) # You want cluster the passenger records into 2: Survived or Not survived
kmeans_df = kmeans.fit(cosine_similaritas)
st.subheader("K-Means Cluster")
correct = 0
for i in range(len(cosine_similaritas)):
predict_me = np.array(cosine_similaritas[i].astype(float))
predict_me = predict_me.reshape(-1, len(predict_me))
prediction = kmeans.predict(predict_me)
if prediction[0] == cosine_similaritas[i].all():
correct += 1
st.sidebar.write(correct/len(cosine_similaritas))
klasterkm = kmeans.cluster_centers_
klaster_df = pd.DataFrame(klasterkm, columns= id_requirement)
st.write(klaster_df)
| 48.944444 | 180 | 0.63269 |
ace2ea5a851ff3068fa9281a6b1c628a65cef70f | 420 | py | Python | motion_detector.py | antontsv/r2d2 | 1b92778ab7995d2132adce32da2b2903d9a7a1c3 | [
"MIT"
] | 2 | 2015-06-26T08:36:22.000Z | 2020-03-30T12:01:12.000Z | motion_detector.py | antontsv/r2d2 | 1b92778ab7995d2132adce32da2b2903d9a7a1c3 | [
"MIT"
] | null | null | null | motion_detector.py | antontsv/r2d2 | 1b92778ab7995d2132adce32da2b2903d9a7a1c3 | [
"MIT"
] | 2 | 2016-08-09T17:38:25.000Z | 2020-03-30T12:01:13.000Z | #!/usr/bin/env python
from gpio_module import *
#
# Lets observe some infrared light
#
class MotionDetector (GPIO_Module):
PIR_PIN = 7
def __init__(self):
self.setup()
GPIO.setup(self.PIR_PIN, GPIO.IN)
def any_movement(self):
return bool(GPIO.input(self.PIR_PIN))
def wait_for_movement(self):
while not self.any_movement():
continue
return True
| 17.5 | 45 | 0.635714 |
ace2eac80079f808c11486d2771db42d4f031653 | 173 | py | Python | recipes/Python/252143_Invert_a_dictionary_oneliner/recipe-252143.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/252143_Invert_a_dictionary_oneliner/recipe-252143.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/252143_Invert_a_dictionary_oneliner/recipe-252143.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | # Invert a dictionary
mydict = {"Apple": "red", "Banana": "yellow", "Carrot": "orange"}
inverted_dict = dict([[v,k] for k,v in mydict.items()])
print inverted_dict["red"]
| 24.714286 | 65 | 0.653179 |
ace2eb3557c0792d2e60495c7538534b95d7cecc | 6,564 | py | Python | test/functional/wallet_txn_clone.py | Neomnf/NEOM | daf60c9ffc3f85d758c114f1e511d246a2fd178d | [
"MIT"
] | null | null | null | test/functional/wallet_txn_clone.py | Neomnf/NEOM | daf60c9ffc3f85d758c114f1e511d246a2fd178d | [
"MIT"
] | null | null | null | test/functional/wallet_txn_clone.py | Neomnf/NEOM | daf60c9ffc3f85d758c114f1e511d246a2fd178d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs."""
from test_framework.test_framework import NEOMTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
disconnect_nodes,
sync_blocks,
)
class TxnMallTest(NEOMTestFramework):
def set_test_params(self):
self.num_nodes = 4
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
parser.add_argument("--segwit", dest="segwit", default=False, action="store_true",
help="Test behaviour with SegWit txn (which should fail")
def setup_network(self):
# Start with split network:
super(TxnMallTest, self).setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
if self.options.segwit:
output_type = "p2sh-segwit"
else:
output_type = "legacy"
# All nodes should start with 1,250 NEOM:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress() # bug workaround, coins generated assigned to first getnewaddress!
self.nodes[0].settxfee(.001)
node0_address1 = self.nodes[0].getnewaddress(address_type=output_type)
node0_txid1 = self.nodes[0].sendtoaddress(node0_address1, 1219)
node0_tx1 = self.nodes[0].gettransaction(node0_txid1)
node0_address2 = self.nodes[0].getnewaddress(address_type=output_type)
node0_txid2 = self.nodes[0].sendtoaddress(node0_address2, 29)
node0_tx2 = self.nodes[0].gettransaction(node0_txid2)
assert_equal(self.nodes[0].getbalance(),
starting_balance + node0_tx1["fee"] + node0_tx2["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress()
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendtoaddress(node1_address, 40)
txid2 = self.nodes[0].sendtoaddress(node1_address, 20)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1, 1)
clone_inputs = [{"txid": rawtx1["vin"][0]["txid"], "vout": rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]: rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]: rawtx1["vout"][1]["value"]}
clone_locktime = rawtx1["locktime"]
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime)
# createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 40 NEOM serialized is 00286bee00000000
pos0 = 2 * (4 + 1 + 36 + 1 + 4 + 1)
hex40 = "00286bee00000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16:pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0:pos0 + 16] != hex40 or rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0:pos0 + 16] == hex40):
output0 = clone_raw[pos0:pos0 + output_len]
output1 = clone_raw[pos0 + output_len:pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransactionwithwallet(clone_raw, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50NEOM for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + node0_tx1["fee"] + node0_tx2["fee"]
if self.options.mine_block:
expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(node0_tx1["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
if self.options.segwit:
assert_equal(txid1, txid1_clone)
return
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(node0_tx2["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 100 NEOM for 2 matured,
# less possible orphaned matured subsidy
expected += 100
if (self.options.mine_block):
expected -= 50
assert_equal(self.nodes[0].getbalance(), expected)
if __name__ == '__main__':
TxnMallTest().main()
| 43.76 | 160 | 0.641377 |
ace2eca5f3f73bd6e320a0a37f734cf5f62ff776 | 359 | py | Python | appengine/findit/dto/int_range.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 2 | 2021-04-13T21:22:18.000Z | 2021-09-07T02:11:57.000Z | appengine/findit/dto/int_range.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 21 | 2020-09-06T02:41:05.000Z | 2022-03-02T04:40:01.000Z | appengine/findit/dto/int_range.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from libs.structured_object import StructuredObject
class IntRange(StructuredObject):
"""Represents a generic integer range to include an upper and lower bound."""
lower = int
upper = int
| 29.916667 | 79 | 0.768802 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.