hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
โ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
โ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
โ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
โ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
โ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
โ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
โ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
โ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
โ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
06dad1410e2a02e151efb9ec9b2aee561ebc438e
| 1,784
|
py
|
Python
|
b3j0f/aop/__init__.py
|
b3j0f/aop
|
22b9ba335d103edd929c25eb6dbb94037d3615bc
|
[
"MIT"
] | 6
|
2015-03-17T12:42:32.000Z
|
2020-04-19T04:03:54.000Z
|
b3j0f/aop/__init__.py
|
b3j0f/aop
|
22b9ba335d103edd929c25eb6dbb94037d3615bc
|
[
"MIT"
] | 3
|
2016-01-25T13:13:29.000Z
|
2020-04-19T21:23:17.000Z
|
b3j0f/aop/__init__.py
|
b3j0f/aop
|
22b9ba335d103edd929c25eb6dbb94037d3615bc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------
# The MIT License (MIT)
#
#ย Copyright (c) 2014 Jonathan Labรฉjof <jonathan.labejof@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------
"""Aspect Oriented Programming Library for Python.
Provides tools to (un)weave and get advices, and check joinpoint status."""
__all__ = [
'__version__',
'weave', 'weave_on', 'get_advices', 'unweave',
'Joinpoint', 'JoinpointError', 'is_intercepted', 'get_intercepted'
]
from .version import __version__
from .advice import weave, unweave, get_advices, weave_on
from .joinpoint import (
Joinpoint, JoinpointError, is_intercepted, get_intercepted
)
| 42.47619
| 79
| 0.700673
|
176795ef52d33e3324215277414451113e11980a
| 800
|
py
|
Python
|
04-Date.py
|
tgjeon/Zero-to-Hero-Python
|
cdb8dd5282152110d25262254741302d2d24ec6d
|
[
"MIT"
] | 3
|
2016-09-20T15:50:30.000Z
|
2019-05-14T03:46:53.000Z
|
04-Date.py
|
tgjeon/Zero-to-Hero-Python
|
cdb8dd5282152110d25262254741302d2d24ec6d
|
[
"MIT"
] | null | null | null |
04-Date.py
|
tgjeon/Zero-to-Hero-Python
|
cdb8dd5282152110d25262254741302d2d24ec6d
|
[
"MIT"
] | 2
|
2018-06-08T15:20:00.000Z
|
2019-05-14T03:46:53.000Z
|
# -*- coding: utf8 -*-
"""
04. ๋ ์ง๋ฅผ ์ฌ์ฉํด๋ด
์๋ค. (์์์๊ฐ: 16๋ถ ๊ฐ์)
- Original version (MVA): https://mva.microsoft.com/ko/training-courses/python%EC%9D%84-%EC%82%AC%EC%9A%A9%ED%95%9C-%ED%94%84%EB%A1%9C%EA%B7%B8%EB%9E%98%EB%B0%8D-%EC%86%8C%EA%B0%9C-8360
- Compact version (Youtube): https://youtu.be/emY34tSKXc4?t=1h17m24s
- ์ฐธ๊ณ :
- ์๋ ์ค๋ช
์ Compact version์ ๊ธฐ์ค์ผ๋ก ํฉ๋๋ค.
"""
"""
๋ ์ง ๋ฐ ์๊ฐ์ ๋ค๋ค๋ด
์๋ค.
"""
# datetime ํด๋์ค๋ฅผ ์ด์ฉํฉ๋๋ค.
import datetime
today = datetime.date.today()
print(today)
"""
์์ฃผ ์ฌ์ฉ๋๋ ๋ ์ง๋ค
%b: ์ (์ฝ์ด)
%B: ์
%y: 2์๋ฆฌ ์ฐ๋
%a: ์์ผ (์ฝ์ด)
%A: ์์ผ
์์ธํ ๋ด์ฉ: strftime.org
"""
print(today)
print(today.month)
print(today.year)
print(today.strftime('%d %b, %Y'))
"""
์ฌ์ฉ์๋ก๋ถํฐ ์
๋ ฅ์ ๋ฐ์์ ํฌ๋งท์ ๋ฐ๊ฟ๋ด
์๋ค.
"""
birthday = input("์์ผ์ด ์ธ์ ์
๋๊น? ")
birthdate = datetime.datetime.strptime(birthday, "%m/%d/%Y")
print(birthdate)
| 15.09434
| 185
| 0.65125
|
f1e7bd5562a319d29630285960901b0d24e6544a
| 304
|
py
|
Python
|
flask_app/app/api/__init__.py
|
ruteckimikolaj/demo-gatsby-flask-scraper
|
09490bac49147760a1301012ffa2619e1c690c78
|
[
"MIT"
] | null | null | null |
flask_app/app/api/__init__.py
|
ruteckimikolaj/demo-gatsby-flask-scraper
|
09490bac49147760a1301012ffa2619e1c690c78
|
[
"MIT"
] | 1
|
2021-03-31T19:32:20.000Z
|
2021-03-31T19:32:20.000Z
|
flask_app/app/api/__init__.py
|
ruteckimikolaj/demo-gatsby-flask-scraper
|
09490bac49147760a1301012ffa2619e1c690c78
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, Config, Flask
from app.api import receiver, sender, tokens
bp = Blueprint('api', __name__)
def create_app(config_class=Config):
app = Flask(__name__)
# ...
from app.api import bp as api_bp
app.register_blueprint(api_bp, url_prefix='/api')
| 20.266667
| 54
| 0.677632
|
32d480e9c81c44a68a02b8206991f417add1fd2d
| 10,430
|
py
|
Python
|
accelbyte_py_sdk/api/platform/operations/reward/query_rewards.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/api/platform/operations/reward/query_rewards.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | 1
|
2021-10-13T03:46:58.000Z
|
2021-10-13T03:46:58.000Z
|
accelbyte_py_sdk/api/platform/operations/reward/query_rewards.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# justice-platform-service (4.10.0)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from .....core import StrEnum
from ...models import RewardPagingSlicedResult
from ...models import ValidationErrorEntity
class SortByEnum(StrEnum):
NAMESPACE = "namespace"
NAMESPACE_ASC = "namespace:asc"
NAMESPACE_DESC = "namespace:desc"
REWARDCODE = "rewardCode"
REWARDCODE_ASC = "rewardCode:asc"
REWARDCODE_DESC = "rewardCode:desc"
class QueryRewards(Operation):
"""Query rewards by criteria (queryRewards)
This API is used to query rewards by criteria.
Other detail info:
* Required permission : resource="ADMIN:NAMESPACE:{namespace}:REWARD", action=2 (READ)
* Returns : the list of rewards
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:REWARD [READ]
Properties:
url: /platform/admin/namespaces/{namespace}/rewards/byCriteria
method: GET
tags: ["Reward"]
consumes: []
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
namespace: (namespace) REQUIRED str in path
event_topic: (eventTopic) OPTIONAL str in query
limit: (limit) OPTIONAL int in query
offset: (offset) OPTIONAL int in query
sort_by: (sortBy) OPTIONAL List[Union[str, SortByEnum]] in query
Responses:
200: OK - RewardPagingSlicedResult (successful operation)
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
"""
# region fields
_url: str = "/platform/admin/namespaces/{namespace}/rewards/byCriteria"
_method: str = "GET"
_consumes: List[str] = []
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"], ["BEARER_AUTH"]]
_location_query: str = None
namespace: str # REQUIRED in [path]
event_topic: str # OPTIONAL in [query]
limit: int # OPTIONAL in [query]
offset: int # OPTIONAL in [query]
sort_by: List[Union[str, SortByEnum]] # OPTIONAL in [query]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"path": self.get_path_params(),
"query": self.get_query_params(),
}
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
return result
def get_query_params(self) -> dict:
result = {}
if hasattr(self, "event_topic"):
result["eventTopic"] = self.event_topic
if hasattr(self, "limit"):
result["limit"] = self.limit
if hasattr(self, "offset"):
result["offset"] = self.offset
if hasattr(self, "sort_by"):
result["sortBy"] = self.sort_by
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_namespace(self, value: str) -> QueryRewards:
self.namespace = value
return self
def with_event_topic(self, value: str) -> QueryRewards:
self.event_topic = value
return self
def with_limit(self, value: int) -> QueryRewards:
self.limit = value
return self
def with_offset(self, value: int) -> QueryRewards:
self.offset = value
return self
def with_sort_by(self, value: List[Union[str, SortByEnum]]) -> QueryRewards:
self.sort_by = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
if hasattr(self, "event_topic") and self.event_topic:
result["eventTopic"] = str(self.event_topic)
elif include_empty:
result["eventTopic"] = ""
if hasattr(self, "limit") and self.limit:
result["limit"] = int(self.limit)
elif include_empty:
result["limit"] = 0
if hasattr(self, "offset") and self.offset:
result["offset"] = int(self.offset)
elif include_empty:
result["offset"] = 0
if hasattr(self, "sort_by") and self.sort_by:
result["sortBy"] = [str(i0) for i0 in self.sort_by]
elif include_empty:
result["sortBy"] = []
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, RewardPagingSlicedResult], Union[None, HttpResponse, ValidationErrorEntity]]:
"""Parse the given response.
200: OK - RewardPagingSlicedResult (successful operation)
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(code=code, content_type=content_type, content=content)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 200:
return RewardPagingSlicedResult.create_from_dict(content), None
if code == 422:
return None, ValidationErrorEntity.create_from_dict(content)
return None, self.handle_undocumented_response(code=code, content_type=content_type, content=content)
# endregion response methods
# region static methods
@classmethod
def create(
cls,
namespace: str,
event_topic: Optional[str] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
sort_by: Optional[List[Union[str, SortByEnum]]] = None,
) -> QueryRewards:
instance = cls()
instance.namespace = namespace
if event_topic is not None:
instance.event_topic = event_topic
if limit is not None:
instance.limit = limit
if offset is not None:
instance.offset = offset
if sort_by is not None:
instance.sort_by = sort_by
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> QueryRewards:
instance = cls()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
if "eventTopic" in dict_ and dict_["eventTopic"] is not None:
instance.event_topic = str(dict_["eventTopic"])
elif include_empty:
instance.event_topic = ""
if "limit" in dict_ and dict_["limit"] is not None:
instance.limit = int(dict_["limit"])
elif include_empty:
instance.limit = 0
if "offset" in dict_ and dict_["offset"] is not None:
instance.offset = int(dict_["offset"])
elif include_empty:
instance.offset = 0
if "sortBy" in dict_ and dict_["sortBy"] is not None:
instance.sort_by = [str(i0) for i0 in dict_["sortBy"]]
elif include_empty:
instance.sort_by = []
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"namespace": "namespace",
"eventTopic": "event_topic",
"limit": "limit",
"offset": "offset",
"sortBy": "sort_by",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"namespace": True,
"eventTopic": False,
"limit": False,
"offset": False,
"sortBy": False,
}
@staticmethod
def get_collection_format_map() -> Dict[str, Union[None, str]]:
return {
"sortBy": "csv", # in query
}
@staticmethod
def get_enum_map() -> Dict[str, List[Any]]:
return {
"sortBy": ["namespace", "namespace:asc", "namespace:desc", "rewardCode", "rewardCode:asc", "rewardCode:desc"],# in query
}
# endregion static methods
| 31.415663
| 171
| 0.598274
|
0a65278c2b6ccec0553194d6fea1d096e7717cbd
| 964
|
py
|
Python
|
idb/cli/commands/location.py
|
doc22940/idb
|
8eb2d82c6b560a5c243986da28124245284e65e9
|
[
"MIT"
] | null | null | null |
idb/cli/commands/location.py
|
doc22940/idb
|
8eb2d82c6b560a5c243986da28124245284e65e9
|
[
"MIT"
] | 5
|
2021-09-02T15:20:04.000Z
|
2022-02-27T09:50:05.000Z
|
idb/cli/commands/location.py
|
Rezduan83/idb
|
a43b499302a37ada164cd183b9edb0e3a4699a1d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import ArgumentParser, Namespace
from idb.cli.commands.base import CompanionCommand
from idb.common.types import IdbClient
class LocationSetCommand(CompanionCommand):
@property
def description(self) -> str:
return "Set a simulator's location"
@property
def name(self) -> str:
return "set-location"
def add_parser_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("latitude", help="Latitude to set", type=float)
parser.add_argument("longitude", help="Longitude to set", type=float)
super().add_parser_arguments(parser)
async def run_with_client(self, args: Namespace, client: IdbClient) -> None:
await client.set_location(args.latitude, args.longitude)
| 33.241379
| 80
| 0.724066
|
ede8136b2f9e57638aba43ef11f5a892e0f316a3
| 1,822
|
py
|
Python
|
spack/packages/dla-future/package.py
|
msimberg/DLA-Future
|
4b15374e234a0c30369ce6ae35b0816e2a19d687
|
[
"BSD-3-Clause"
] | null | null | null |
spack/packages/dla-future/package.py
|
msimberg/DLA-Future
|
4b15374e234a0c30369ce6ae35b0816e2a19d687
|
[
"BSD-3-Clause"
] | null | null | null |
spack/packages/dla-future/package.py
|
msimberg/DLA-Future
|
4b15374e234a0c30369ce6ae35b0816e2a19d687
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class DlaFuture(CMakePackage, CudaPackage):
"""DLA-Future library: Distributed Linear Algebra with Future"""
homepage = "https://github.com/eth-cscs/DLA-Future/wiki"
git = "https://github.com/eth-cscs/DLA-Future"
maintainers = ['teonnik', 'albestro', 'Sely85']
version('develop', branch='master')
variant('doc', default=False,
description='Build documentation.')
depends_on('cmake@3.14:', type='build')
depends_on('doxygen', type='build', when='+doc')
depends_on('mpi')
depends_on('blaspp')
depends_on('lapackpp')
depends_on('hpx@1.4.0:1.4.1 cxxstd=14 networking=none')
depends_on('hpx build_type=Debug', when='build_type=Debug')
depends_on('hpx build_type=Release', when='build_type=Release')
depends_on('hpx build_type=RelWithDebInfo', when='build_type=RelWithDebInfo')
def cmake_args(self):
spec = self.spec
# BLAS/LAPACK
if '^mkl' in spec:
args = [ self.define('DLAF_WITH_MKL', True) ]
else:
args = [
self.define('DLAF_WITH_MKL', False),
self.define('LAPACK_TYPE', 'Custom'),
self.define('LAPACK_LIBRARY',
' '.join([spec[dep].libs.ld_flags for dep in ['blas', 'lapack']]))
]
# CUDA
args.append(self.define_from_variant('DLAF_WITH_CUDA', 'cuda'))
# DOC
args.append(self.define_from_variant('BUILD_DOC', 'doc'))
# TESTs
args.append(self.define('DLAF_WITH_TEST', self.run_tests))
return args
| 31.413793
| 90
| 0.617453
|
9066833649dcf82854eb6f0ac4157768cc1e26df
| 8,173
|
py
|
Python
|
scripts/generate_synthetic_data.py
|
JonasFrey96/RPOSE
|
7da77499ab777ce7ee37b731541982870da8d40b
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/generate_synthetic_data.py
|
JonasFrey96/RPOSE
|
7da77499ab777ce7ee37b731541982870da8d40b
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/generate_synthetic_data.py
|
JonasFrey96/RPOSE
|
7da77499ab777ce7ee37b731541982870da8d40b
|
[
"BSD-3-Clause"
] | null | null | null |
import pickle
import numpy as np
import scipy.io as scio
import os
import numpy as np
import os
from scipy.spatial.transform import Rotation as R
import random
from math import radians
import time
from pathlib import Path
import scipy
import imageio
# RENDERING
from trimesh import collision
from pyrender.constants import RenderFlags
import trimesh
import pyrender
os.environ["PYOPENGL_PLATFORM"] = "egl"
with open( "/home/jonfrey/RPOSE/cfg/datasets/ycb/test.pkl", "rb") as f:
res = pickle.load(f)
np.unique( res['base_path_list'] ).shape, len( res['base_path_list'])
paths = np.unique( res['base_path_list'] ).tolist()
base = "/home/jonfrey/Datasets/ycb"
data = []
for j, pa in enumerate( paths ):
pa = os.path.join( base, pa+'-meta.mat' )
meta = scio.loadmat( pa )
for k,i in enumerate( meta['cls_indexes'][:,0].tolist()):
data.append( { 'pose': meta['poses'][:,:,k],
'index': i,
'scene_indexes': meta['cls_indexes'][:,0],
'path': pa,
'intrinsic_matrix': meta['intrinsic_matrix']} )
poses = np.array( [d['pose'] for d in data])
indexes = np.array( [d['index'] for d in data])
scene_indexes = np.array( [d['scene_indexes'] for d in data] )
# COMPUTE STATISTICS
stats = []
objects = 21
for i in range(1, objects+1):
try:
mask = indexes == i
me = poses[mask][:,:,3].mean(axis=0)
std = poses[mask][:,:,3].std(axis=0)
mi_val = poses[mask][:,:,3].min(axis=0)
ma_val = poses[mask][:,:,3].max(axis=0)
count_correlated = np.zeros(( objects ))
for j in scene_indexes[mask]:
for n in j:
count_correlated[n-1] += 1
# prior equally distributed
count_correlated += int( count_correlated.sum() /objects )
count_correlated /= count_correlated.sum()
stat = { 'indexes': i-1,
'count_correlated': count_correlated,
'mean': me,
'std': std,
'min_val': mi_val,
'max_val': ma_val}
stats.append(stat)
except:
pass
# LOAD DATA FOR RENDERING
objs_scaled = [ trimesh.load(f'{base}/models/{s}/scaled.obj')
for s in os.listdir( "/home/jonfrey/Datasets/ycb/models" ) ]
K = scio.loadmat("/home/jonfrey/Datasets/ycb/data_syn/000001-meta.mat")["intrinsic_matrix"]
objs = [ trimesh.load(f'{base}/models/{s}/textured.obj')
for s in os.listdir( "/home/jonfrey/Datasets/ycb/models" ) ]
camera = pyrender.camera.IntrinsicsCamera( K[0,0], K[1,1], K[0,2], K[1,2] )
camera_pose = np.eye(4)
camera_pose[:3,:3 ] = R.from_euler('xyz', [0,180,180], degrees=True).as_matrix()
# RENDERING HELPER AND METHODS
def get_random_light():
inten = np.random.uniform( 10, 25 , (1,) ) #5 + random.random() * 10
color = np.random.uniform( 0.7, 1 , (3,) )
if random.random() > 0.3:
light = pyrender.DirectionalLight(color= color, intensity= inten)
else:
light = pyrender.SpotLight(color=color, intensity=inten,
innerConeAngle=radians(30+(random.random()*29)),
outerConeAngle=radians(60+(random.random()*29)))
return light
def get_obj_pose(index):
pose = np.eye(4)
pose[:3,3] = np.random.uniform( stats[index]['min_val'],stats[index]['max_val'], (3,) )
pose[:3,:3] = R.random().as_matrix()
return pose
def get_neighbour_pose(pose):
pose = np.copy(pose)
pose[:3,:3] = R.random().as_matrix()
pose[:3,3] = pose[:3,3] + np.random.uniform( [-0.1,-0.1,-0.3] , [0.1,0.1,0.1], (3,) )
return pose
def render():
try:
index = np.random.randint( 0, len(objs) )
obj = objs[index]
H,W = 480,640
# Sample other object according to correlation in dataset
fac = stats[index]['count_correlated']
fac[index] = 0
index2 = np.argmax( np.random.rand(21) * fac )
# Set metallic randomly
side_obj = objs[index2]
mesh2 = pyrender.Mesh.from_trimesh(side_obj )
mesh2.primitives[0].material.metallicFactor = random.random() * 0.3
mesh = pyrender.Mesh.from_trimesh(obj )
mesh.primitives[0].material.metallicFactor = random.random() * 0.3
scene = pyrender.Scene()
# Get random pose uniformly in dataset boundary cube
obj_pose = get_obj_pose( index )
# Uniformly sample collision free pose for other object
cm = collision.CollisionManager()
cm.add_object(name="obj", mesh=objs_scaled[index], transform=obj_pose)
while True:
obj_pose_two = get_neighbour_pose( obj_pose )
is_collision = cm.in_collision_single(mesh=objs_scaled[index2], transform=obj_pose_two)
if not is_collision:
break
# Add objects to the scene
st = time.time()
n_mesh = pyrender.Node(mesh=mesh, matrix= obj_pose )
scene.add_node(n_mesh)
n_mesh2 = pyrender.Node(mesh=mesh2, matrix= obj_pose_two )
scene.add_node(n_mesh2)
# Add camera
scene.add(camera, pose=camera_pose)
# Add random light
light_pose = np.copy( camera_pose )
light_pose[2,3] -= 1
light_pose[:3,3] += np.random.uniform( 0.3, 0.3 , (3,) )
scene.add(get_random_light(), pose=light_pose)
# get mask
renderer = pyrender.OffscreenRenderer(640, 480)
flags = RenderFlags.DEPTH_ONLY
full_depth = renderer.render(scene, flags=flags)
for mn in scene.mesh_nodes:
mn.mesh.is_visible = False
segimg = np.zeros((H, W), dtype=np.uint8)
for ind, node in zip( [index, index2], [n_mesh, n_mesh2 ] ):
node.mesh.is_visible = True
depth = renderer.render(scene, flags=flags)
mask = np.logical_and(
(np.abs(depth - full_depth) < 1e-6), np.abs(full_depth) > 0
)
segimg[mask] = ind + 1
node.mesh.is_visible = False
if (segimg == index+1).sum() < 100:
return (False, )
# Show all meshes again
for mn in scene.mesh_nodes:
mn.mesh.is_visible = True
color, depth = renderer.render(scene)
ind = np.zeros( (1,1), dtype=np.float32 )
ind[0,0] = index + 1
meta = {
"cls_indexes": ind, # NR,1
"factor_depth": np.array([[10000]], dtype=np.float64),
'poses': obj_pose[:3,:,None].astype(np.float32) ,
'intrinsic_matrix': np.array([[1.066778e+03, 0.000000e+00, 3.129869e+02],
[0.000000e+00, 1.067487e+03, 2.413109e+02],
[0.000000e+00, 0.000000e+00, 1.000000e+00]]) #3,4,NR
}
depth = np.uint16( depth * meta["factor_depth"][0,0] )
co = np.full( (H,W,4),255, dtype=np.uint8)
co[:,:,:3] = color
del renderer
time.sleep(0.1)
return co, depth, meta, segimg
except Exception as e:
print("Failed")
return (False,)
dir_out = "/home/jonfrey/Datasets/ycb/data_syn_new"
def do(i):
np.random.seed(i)
random.seed(i)
while 1:
tup = render()
if len(tup) != 1: break
color, depth, meta, label = tup
p_color = os.path.join( dir_out, f"{i:06d}-color.png" )
p_depth = os.path.join( dir_out, f"{i:06d}-depth.png" )
p_label = os.path.join( dir_out, f"{i:06d}-label.png" )
p_meta = os.path.join( dir_out, f"{i:06d}-meta.mat" )
Path(p_color).parent.mkdir(exist_ok=True, parents= True)
imageio.imwrite( p_color, color)
imageio.imwrite( p_depth, depth)
imageio.imwrite( p_label, label)
scipy.io.savemat( p_meta, meta)
start_time = time.time()
la = 100000
if os.path.exists("/home/jonfrey/tmp/nr.npy"):
start = np.load( "/home/jonfrey/tmp/nr.npy" )
lam = start[0]
else:
lam = 3500
start= np.array( [ lam ] )
for j,i in enumerate( range(start[0], la)):
st = time.time()
do(i)
start[0] = i
np.save( "/home/jonfrey/tmp/nr.npy", start)
if j > 0:
print( f"{i} Time gone: ", (time.time()-st), "Time left: ", (time.time()-start_time)/(i-lam)*(la-i) )
| 32.05098
| 105
| 0.589135
|
ee4eb29e742b47b4208b1a3d3744b448dd472c48
| 233
|
py
|
Python
|
laboratorios/quiz1.py
|
ncenteno31/uip-prog3
|
90fc05cd7ef1c89fbadb54dc68c917f40a8cae14
|
[
"MIT"
] | null | null | null |
laboratorios/quiz1.py
|
ncenteno31/uip-prog3
|
90fc05cd7ef1c89fbadb54dc68c917f40a8cae14
|
[
"MIT"
] | null | null | null |
laboratorios/quiz1.py
|
ncenteno31/uip-prog3
|
90fc05cd7ef1c89fbadb54dc68c917f40a8cae14
|
[
"MIT"
] | null | null | null |
# Clase 3 QUIZ
# Noel Centeno 8-840-2233
# Calcular el Area y el Perimetro de un Rectangulo.
base= 5
altura= 7
metro= 100
pulgadas= 2.54
area = base * altura
perimetro = base*2 + altura*2
print (str (area))
print (str (perimetro))
| 17.923077
| 51
| 0.703863
|
9bf2041d4955c00801be5a56ccc3cc8413796fe8
| 7,757
|
py
|
Python
|
Assignment 2/super_baseline_tagger.py
|
anishvaidya/CSCI-544---Applied-Natural-Language-Processing
|
c2a96f25d99e9dc664f6b4e81a12acb5e43a7d36
|
[
"MIT"
] | 1
|
2020-05-10T04:08:20.000Z
|
2020-05-10T04:08:20.000Z
|
Assignment 2/super_baseline_tagger.py
|
anishvaidya/CSCI-544---Applied-Natural-Language-Processing
|
c2a96f25d99e9dc664f6b4e81a12acb5e43a7d36
|
[
"MIT"
] | null | null | null |
Assignment 2/super_baseline_tagger.py
|
anishvaidya/CSCI-544---Applied-Natural-Language-Processing
|
c2a96f25d99e9dc664f6b4e81a12acb5e43a7d36
|
[
"MIT"
] | null | null | null |
import random
import sys
import shutil
import glob
import os
import subprocess
max_accuracy = 0
train_files = []
test_files = []
max_acc_test = []
def data_splitter():
global train_files, test_files
k = 4
os.chdir("all")
shutil.rmtree("test")
shutil.rmtree("train")
os.mkdir("train")
os.mkdir("test")
all_files = glob.glob("*.csv")
random.shuffle(all_files)
test_files = random.sample(all_files, len(all_files)//k)
train_files = list(set(all_files) - set(test_files))
os.chdir("..")
for each_files in test_files:
shutil.copy2(str("all/"+each_files), str("all/test/"+each_files))
for each_files in train_files:
shutil.copy2(str("all/"+each_files), str("all/train/"+each_files))
print("TEST: ",len(test_files))
print("TRAIN: ",len(train_files))
'''Baseline Tagger
This code tags each utterance in a conversation with a label called dialog act.
The data source is taken from command line along with final out file name and destination.
Author - Anish Amul Vaidya
'''
# Import required libraries
# import sys
import pycrfsuite
import hw2_corpus_tool
import time
# Retrieve file-paths and file-names from command line
INPUTDIR = "all/train"
TESTDIR = "all/test"
# OUTPUTFILE = sys.argv[3]
class BaselineTagger():
''' Take the input data path and generate 2 lists:
1. a list of list features for each utterance in each conversation.
2. a list of labels for utterances in each conversation.
'''
def generate_features_and_labels(data):
features = []
labels = []
for conversation in data:
speaker1 = None
speaker2 = None
conversation_start = True
conversation_features = []
conversation_labels = []
for dialog in conversation:
dialog_features = []
speaker2 = getattr(dialog, "speaker")
if conversation_start:
dialog_features.append("Speaker change: False")
dialog_features.append("First Utterance: True")
conversation_start = False
else:
if speaker1 != speaker2:
dialog_features.append("Speaker change: True")
dialog_features.append("First Utterance: False")
else:
dialog_features.append("Speaker change: False")
dialog_features.append("First Utterance: False")
postag_object_list = getattr(dialog, "pos")
if postag_object_list is not None:
for postag_object in postag_object_list:
token = getattr(postag_object, "token")
pos = getattr(postag_object, "pos")
dialog_features.append("TOKEN_" + token)
dialog_features.append("POS_" + pos)
else:
# dialog_features.append("TOKEN_BLANK")
# dialog_features.append("POS_BLANK")
dialog_features.append("NO_WORDS")
conversation_features.append(dialog_features)
act_tag = getattr(dialog, "act_tag")
if act_tag:
conversation_labels.append(act_tag)
speaker1 = speaker2
features.append(conversation_features)
labels.append(conversation_labels)
return features, labels
''' Train the pycrfsuite model:
Set the required model parameters.
Input data to model and train.
'''
def train_model(train_features, train_labels):
trainer=pycrfsuite.Trainer(verbose = False)
for x, y in zip(train_features, train_labels):
trainer.append(x, y)
trainer.set_params({
'c1': 1.0, # coefficient for L1 penalty
'c2': 1e-3, # coefficient for L2 penalty
'max_iterations': 50, # stop earlier
# include transitions that are possible, but not observed
'feature.possible_transitions': True
})
trainer.train("baseline_dialog_act_tagger.crfsuite")
''' Predict on test data:
Use the trained model on the given test data.
'''
def predict(test_features, test_labels):
global max_acc_test, test_files, max_accuracy
predictor = pycrfsuite.Tagger(verbose = False)
predictor.open("baseline_dialog_act_tagger.crfsuite")
# output_file = open(OUTPUTFILE, "w+")
correct_predictions = 0
total_predictions = 0
for conversation in range(len(test_features)):
for label_index, predicted_label in enumerate(predictor.tag(test_features[conversation])):
if predicted_label == test_labels[conversation][label_index]:
correct_predictions += 1
total_predictions += 1
predicted_label += "\n"
# output_file.writelines(predicted_label)
# output_file.writelines("\n")
# output_file.close()
accuracy = (correct_predictions / total_predictions)
print ("Accuracy is " , accuracy)
if accuracy > max_accuracy:
max_accuracy = accuracy
max_acc_test = test_files
# def main():
# global max_accuracy
# for i in range(50):
# print ("-----------------------------------------------------------")
# print ("Iteration ", i)
# data_splitter()
# training_set = list(hw2_corpus_tool.get_data(INPUTDIR))
# dev_set = list(hw2_corpus_tool.get_data(TESTDIR))
# train_features, train_labels = BaselineTagger.generate_features_and_labels(training_set)
# test_features, test_labels = BaselineTagger.generate_features_and_labels(dev_set)
# BaselineTagger.train_model(train_features, train_labels)
# BaselineTagger.predict(test_features, test_labels)
# print ("Max accuracy is ", max_accuracy)
if __name__ == "__main__":
input_dir_list = ["data/train", "data/train_best", "data/train_sid", "data/train1"]
test_dir_list = ["data/dev", "data/dev_best", "data/dev_sid", "data/dev1"]
for INPUTDIR, TESTDIR in zip(input_dir_list, test_dir_list):
start = time.time()
training_set = list(hw2_corpus_tool.get_data(INPUTDIR))
dev_set = list(hw2_corpus_tool.get_data(TESTDIR))
train_features, train_labels = BaselineTagger.generate_features_and_labels(training_set)
test_features, test_labels = BaselineTagger.generate_features_and_labels(dev_set)
print ("Training model " + INPUTDIR)
BaselineTagger.train_model(train_features, train_labels)
BaselineTagger.predict(test_features, test_labels)
print ("Time taken (in seconds) :", (time.time() - start))
print ("-----------------------------------------------------------------")
'''
Training model data/train
Accuracy is 0.72200983069361
Time taken (in seconds) : 68.87009882926941
-----------------------------------------------------------------
Training model data/train_best
Accuracy is 0.7335149756939
Time taken (in seconds) : 71.86131548881531
-----------------------------------------------------------------
Training model data/train_sid
Accuracy is 0.7334733041618429
Time taken (in seconds) : 81.44987893104553
-----------------------------------------------------------------
Training model data/train1
Accuracy is 0.7215160225699989
Time taken (in seconds) : 67.47267532348633
-----------------------------------------------------------------
'''
| 39.375635
| 102
| 0.592239
|
f06c83e6768c2bfa7588f77dc40f4f111cbff866
| 1,170
|
py
|
Python
|
src/foolscap/test/test_eventual.py
|
jaraco/foolscap
|
845bea550447991b194ef884713a7b3be4b4a6c2
|
[
"MIT"
] | 29
|
2015-01-05T19:37:27.000Z
|
2021-03-03T21:59:13.000Z
|
src/foolscap/test/test_eventual.py
|
jaraco/foolscap
|
845bea550447991b194ef884713a7b3be4b4a6c2
|
[
"MIT"
] | 65
|
2015-03-01T03:18:03.000Z
|
2022-03-24T16:00:48.000Z
|
src/foolscap/test/test_eventual.py
|
jaraco/foolscap
|
845bea550447991b194ef884713a7b3be4b4a6c2
|
[
"MIT"
] | 22
|
2015-01-28T10:51:46.000Z
|
2022-01-26T07:56:25.000Z
|
from twisted.trial import unittest
from foolscap.eventual import eventually, fireEventually, flushEventualQueue
class TestEventual(unittest.TestCase):
def tearDown(self):
return flushEventualQueue()
def testSend(self):
results = []
eventually(results.append, 1)
self.assertFalse(results)
def _check():
self.assertEqual(results, [1])
eventually(_check)
def _check2():
self.assertEqual(results, [1,2])
eventually(results.append, 2)
eventually(_check2)
def testFlush(self):
results = []
eventually(results.append, 1)
eventually(results.append, 2)
d = flushEventualQueue()
def _check(res):
self.assertEqual(results, [1,2])
d.addCallback(_check)
return d
def testFire(self):
results = []
fireEventually(1).addCallback(results.append)
fireEventually(2).addCallback(results.append)
self.assertFalse(results)
def _check(res):
self.assertEqual(results, [1,2])
d = flushEventualQueue()
d.addCallback(_check)
return d
| 27.209302
| 76
| 0.610256
|
1ccd41224b74967e7188facf4c21964dfa810e03
| 965
|
py
|
Python
|
time_audit/scratch.py
|
mweiden/productivity-tools
|
b7026908ff5ea8db001884dd9abbf8316c06c5a7
|
[
"MIT"
] | null | null | null |
time_audit/scratch.py
|
mweiden/productivity-tools
|
b7026908ff5ea8db001884dd9abbf8316c06c5a7
|
[
"MIT"
] | null | null | null |
time_audit/scratch.py
|
mweiden/productivity-tools
|
b7026908ff5ea8db001884dd9abbf8316c06c5a7
|
[
"MIT"
] | null | null | null |
while ind_transitions < (len(transitions) - 1):
print(
f"day_bucket[{ind_day_buckets}]={day_buckets[ind_day_buckets]}, transitions[{ind_transitions}]={transitions[ind_transitions][0].isoformat()}"
)
if transitions[ind_transitions + 1][0] < day_buckets[ind_day_buckets + 1]:
print("advance transitions")
ind_transitions += 1
else:
print("advance day bucket")
ind_day_buckets += 1
active_set = set()
for dt, changes in sorted(transitions.items(), key=lambda x: x[0]):
added = set()
removed = set()
for summary, delta in changes.items():
if delta > 0:
added.add(summary)
elif delta < 0:
removed.add(summary)
did_not_change = active_set - removed
active_set |= added
active_set -= removed
date_ind = date_to_ind[dt.replace(hour=0, minute=0, second=0, microsecond=0)]
for summary in added:
summary_ind = summary_to_ind[summary]
| 30.15625
| 149
| 0.642487
|
a28e20a636ce23665e010908ffbc6c5f914856a4
| 21,272
|
py
|
Python
|
asv_bench/benchmarks/frame_methods.py
|
rendner/pandas
|
47494a48edf25d5a49b0fb5b896b454c15c83595
|
[
"BSD-3-Clause"
] | 1
|
2019-11-01T08:44:40.000Z
|
2019-11-01T08:44:40.000Z
|
asv_bench/benchmarks/frame_methods.py
|
sdrees/pandas
|
bef454f0893efe2fa5e49317635f89c03467d16e
|
[
"BSD-3-Clause"
] | null | null | null |
asv_bench/benchmarks/frame_methods.py
|
sdrees/pandas
|
bef454f0893efe2fa5e49317635f89c03467d16e
|
[
"BSD-3-Clause"
] | null | null | null |
import string
import warnings
import numpy as np
from pandas import (
DataFrame,
MultiIndex,
NaT,
Series,
date_range,
isnull,
period_range,
timedelta_range,
)
from .pandas_vb_common import tm
class GetNumericData:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 25))
self.df["foo"] = "bar"
self.df["bar"] = "baz"
self.df = self.df._consolidate()
def time_frame_get_numeric_data(self):
self.df._get_numeric_data()
class Lookup:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 8), columns=list("abcdefgh"))
self.df["foo"] = "bar"
self.row_labels = list(self.df.index[::10])[:900]
self.col_labels = list(self.df.columns) * 100
self.row_labels_all = np.array(
list(self.df.index) * len(self.df.columns), dtype="object"
)
self.col_labels_all = np.array(
list(self.df.columns) * len(self.df.index), dtype="object"
)
def time_frame_fancy_lookup(self):
self.df.lookup(self.row_labels, self.col_labels)
def time_frame_fancy_lookup_all(self):
self.df.lookup(self.row_labels_all, self.col_labels_all)
class Reindex:
def setup(self):
N = 10**3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.idx_cols = np.random.randint(0, N, N)
self.df2 = DataFrame(
{
c: {
0: np.random.randint(0, 2, N).astype(np.bool_),
1: np.random.randint(0, N, N).astype(np.int16),
2: np.random.randint(0, N, N).astype(np.int32),
3: np.random.randint(0, N, N).astype(np.int64),
}[np.random.randint(0, 4)]
for c in range(N)
}
)
def time_reindex_axis0(self):
self.df.reindex(self.idx)
def time_reindex_axis1(self):
self.df.reindex(columns=self.idx_cols)
def time_reindex_axis1_missing(self):
self.df.reindex(columns=self.idx)
def time_reindex_both_axes(self):
self.df.reindex(index=self.idx, columns=self.idx_cols)
def time_reindex_upcast(self):
self.df2.reindex(np.random.permutation(range(1200)))
class Rename:
def setup(self):
N = 10**3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.dict_idx = {k: k for k in self.idx}
self.df2 = DataFrame(
{
c: {
0: np.random.randint(0, 2, N).astype(np.bool_),
1: np.random.randint(0, N, N).astype(np.int16),
2: np.random.randint(0, N, N).astype(np.int32),
3: np.random.randint(0, N, N).astype(np.int64),
}[np.random.randint(0, 4)]
for c in range(N)
}
)
def time_rename_single(self):
self.df.rename({0: 0})
def time_rename_axis0(self):
self.df.rename(self.dict_idx)
def time_rename_axis1(self):
self.df.rename(columns=self.dict_idx)
def time_rename_both_axes(self):
self.df.rename(index=self.dict_idx, columns=self.dict_idx)
def time_dict_rename_both_axes(self):
self.df.rename(index=self.dict_idx, columns=self.dict_idx)
class Iteration:
# mem_itertuples_* benchmarks are slow
timeout = 120
def setup(self):
N = 1000
self.df = DataFrame(np.random.randn(N * 10, N))
self.df2 = DataFrame(np.random.randn(N * 50, 10))
self.df3 = DataFrame(
np.random.randn(N, 5 * N), columns=["C" + str(c) for c in range(N * 5)]
)
self.df4 = DataFrame(np.random.randn(N * 1000, 10))
def time_items(self):
# (monitor no-copying behaviour)
if hasattr(self.df, "_item_cache"):
self.df._item_cache.clear()
for name, col in self.df.items():
pass
def time_items_cached(self):
for name, col in self.df.items():
pass
def time_iteritems_indexing(self):
for col in self.df3:
self.df3[col]
def time_itertuples_start(self):
self.df4.itertuples()
def time_itertuples_read_first(self):
next(self.df4.itertuples())
def time_itertuples(self):
for row in self.df4.itertuples():
pass
def time_itertuples_to_list(self):
list(self.df4.itertuples())
def mem_itertuples_start(self):
return self.df4.itertuples()
def peakmem_itertuples_start(self):
self.df4.itertuples()
def mem_itertuples_read_first(self):
return next(self.df4.itertuples())
def peakmem_itertuples(self):
for row in self.df4.itertuples():
pass
def mem_itertuples_to_list(self):
return list(self.df4.itertuples())
def peakmem_itertuples_to_list(self):
list(self.df4.itertuples())
def time_itertuples_raw_start(self):
self.df4.itertuples(index=False, name=None)
def time_itertuples_raw_read_first(self):
next(self.df4.itertuples(index=False, name=None))
def time_itertuples_raw_tuples(self):
for row in self.df4.itertuples(index=False, name=None):
pass
def time_itertuples_raw_tuples_to_list(self):
list(self.df4.itertuples(index=False, name=None))
def mem_itertuples_raw_start(self):
return self.df4.itertuples(index=False, name=None)
def peakmem_itertuples_raw_start(self):
self.df4.itertuples(index=False, name=None)
def peakmem_itertuples_raw_read_first(self):
next(self.df4.itertuples(index=False, name=None))
def peakmem_itertuples_raw(self):
for row in self.df4.itertuples(index=False, name=None):
pass
def mem_itertuples_raw_to_list(self):
return list(self.df4.itertuples(index=False, name=None))
def peakmem_itertuples_raw_to_list(self):
list(self.df4.itertuples(index=False, name=None))
def time_iterrows(self):
for row in self.df.iterrows():
pass
class ToString:
def setup(self):
self.df = DataFrame(np.random.randn(100, 10))
def time_to_string_floats(self):
self.df.to_string()
class ToHTML:
def setup(self):
nrows = 500
self.df2 = DataFrame(np.random.randn(nrows, 10))
self.df2[0] = period_range("2000", periods=nrows)
self.df2[1] = range(nrows)
def time_to_html_mixed(self):
self.df2.to_html()
class ToDict:
params = [["dict", "list", "series", "split", "records", "index"]]
param_names = ["orient"]
def setup(self, orient):
data = np.random.randint(0, 1000, size=(10000, 4))
self.int_df = DataFrame(data)
self.datetimelike_df = self.int_df.astype("timedelta64[ns]")
def time_to_dict_ints(self, orient):
self.int_df.to_dict(orient=orient)
def time_to_dict_datetimelike(self, orient):
self.datetimelike_df.to_dict(orient=orient)
class ToNumpy:
def setup(self):
N = 10000
M = 10
self.df_tall = DataFrame(np.random.randn(N, M))
self.df_wide = DataFrame(np.random.randn(M, N))
self.df_mixed_tall = self.df_tall.copy()
self.df_mixed_tall["foo"] = "bar"
self.df_mixed_tall[0] = period_range("2000", periods=N)
self.df_mixed_tall[1] = range(N)
self.df_mixed_wide = self.df_wide.copy()
self.df_mixed_wide["foo"] = "bar"
self.df_mixed_wide[0] = period_range("2000", periods=M)
self.df_mixed_wide[1] = range(M)
def time_to_numpy_tall(self):
self.df_tall.to_numpy()
def time_to_numpy_wide(self):
self.df_wide.to_numpy()
def time_to_numpy_mixed_tall(self):
self.df_mixed_tall.to_numpy()
def time_to_numpy_mixed_wide(self):
self.df_mixed_wide.to_numpy()
def time_values_tall(self):
self.df_tall.values
def time_values_wide(self):
self.df_wide.values
def time_values_mixed_tall(self):
self.df_mixed_tall.values
def time_values_mixed_wide(self):
self.df_mixed_wide.values
class ToRecords:
def setup(self):
N = 100_000
data = np.random.randn(N, 2)
mi = MultiIndex.from_arrays(
[
np.arange(N),
date_range("1970-01-01", periods=N, freq="ms"),
]
)
self.df = DataFrame(data)
self.df_mi = DataFrame(data, index=mi)
def time_to_records(self):
self.df.to_records(index=True)
def time_to_records_multiindex(self):
self.df_mi.to_records(index=True)
class Repr:
def setup(self):
nrows = 10000
data = np.random.randn(nrows, 10)
arrays = np.tile(np.random.randn(3, nrows // 100), 100)
idx = MultiIndex.from_arrays(arrays)
self.df3 = DataFrame(data, index=idx)
self.df4 = DataFrame(data, index=np.random.randn(nrows))
self.df_tall = DataFrame(np.random.randn(nrows, 10))
self.df_wide = DataFrame(np.random.randn(10, nrows))
def time_html_repr_trunc_mi(self):
self.df3._repr_html_()
def time_html_repr_trunc_si(self):
self.df4._repr_html_()
def time_repr_tall(self):
repr(self.df_tall)
def time_frame_repr_wide(self):
repr(self.df_wide)
class MaskBool:
def setup(self):
data = np.random.randn(1000, 500)
df = DataFrame(data)
df = df.where(df > 0)
self.bools = df > 0
self.mask = isnull(df)
def time_frame_mask_bools(self):
self.bools.mask(self.mask)
def time_frame_mask_floats(self):
self.bools.astype(float).mask(self.mask)
class Isnull:
def setup(self):
N = 10**3
self.df_no_null = DataFrame(np.random.randn(N, N))
sample = np.array([np.nan, 1.0])
data = np.random.choice(sample, (N, N))
self.df = DataFrame(data)
sample = np.array(list(string.ascii_letters + string.whitespace))
data = np.random.choice(sample, (N, N))
self.df_strings = DataFrame(data)
sample = np.array(
[
NaT,
np.nan,
None,
np.datetime64("NaT"),
np.timedelta64("NaT"),
0,
1,
2.0,
"",
"abcd",
]
)
data = np.random.choice(sample, (N, N))
self.df_obj = DataFrame(data)
def time_isnull_floats_no_null(self):
isnull(self.df_no_null)
def time_isnull(self):
isnull(self.df)
def time_isnull_strngs(self):
isnull(self.df_strings)
def time_isnull_obj(self):
isnull(self.df_obj)
class Fillna:
params = (
[True, False],
["pad", "bfill"],
[
"float64",
"float32",
"object",
"Int64",
"Float64",
"datetime64[ns]",
"datetime64[ns, tz]",
"timedelta64[ns]",
],
)
param_names = ["inplace", "method", "dtype"]
def setup(self, inplace, method, dtype):
N, M = 10000, 100
if dtype in ("datetime64[ns]", "datetime64[ns, tz]", "timedelta64[ns]"):
data = {
"datetime64[ns]": date_range("2011-01-01", freq="H", periods=N),
"datetime64[ns, tz]": date_range(
"2011-01-01", freq="H", periods=N, tz="Asia/Tokyo"
),
"timedelta64[ns]": timedelta_range(start="1 day", periods=N, freq="1D"),
}
self.df = DataFrame({f"col_{i}": data[dtype] for i in range(M)})
self.df[::2] = None
else:
values = np.random.randn(N, M)
values[::2] = np.nan
if dtype == "Int64":
values = values.round()
self.df = DataFrame(values, dtype=dtype)
def time_frame_fillna(self, inplace, method, dtype):
self.df.fillna(inplace=inplace, method=method)
class Dropna:
params = (["all", "any"], [0, 1])
param_names = ["how", "axis"]
def setup(self, how, axis):
self.df = DataFrame(np.random.randn(10000, 1000))
self.df.iloc[50:1000, 20:50] = np.nan
self.df.iloc[2000:3000] = np.nan
self.df.iloc[:, 60:70] = np.nan
self.df_mixed = self.df.copy()
self.df_mixed["foo"] = "bar"
def time_dropna(self, how, axis):
self.df.dropna(how=how, axis=axis)
def time_dropna_axis_mixed_dtypes(self, how, axis):
self.df_mixed.dropna(how=how, axis=axis)
class Count:
params = [0, 1]
param_names = ["axis"]
def setup(self, axis):
self.df = DataFrame(np.random.randn(10000, 1000))
self.df.iloc[50:1000, 20:50] = np.nan
self.df.iloc[2000:3000] = np.nan
self.df.iloc[:, 60:70] = np.nan
self.df_mixed = self.df.copy()
self.df_mixed["foo"] = "bar"
self.df.index = MultiIndex.from_arrays([self.df.index, self.df.index])
self.df.columns = MultiIndex.from_arrays([self.df.columns, self.df.columns])
self.df_mixed.index = MultiIndex.from_arrays(
[self.df_mixed.index, self.df_mixed.index]
)
self.df_mixed.columns = MultiIndex.from_arrays(
[self.df_mixed.columns, self.df_mixed.columns]
)
def time_count_level_multi(self, axis):
self.df.count(axis=axis, level=1)
def time_count_level_mixed_dtypes_multi(self, axis):
self.df_mixed.count(axis=axis, level=1)
class Apply:
def setup(self):
self.df = DataFrame(np.random.randn(1000, 100))
self.s = Series(np.arange(1028.0))
self.df2 = DataFrame({i: self.s for i in range(1028)})
self.df3 = DataFrame(np.random.randn(1000, 3), columns=list("ABC"))
def time_apply_user_func(self):
self.df2.apply(lambda x: np.corrcoef(x, self.s)[(0, 1)])
def time_apply_axis_1(self):
self.df.apply(lambda x: x + 1, axis=1)
def time_apply_lambda_mean(self):
self.df.apply(lambda x: x.mean())
def time_apply_np_mean(self):
self.df.apply(np.mean)
def time_apply_pass_thru(self):
self.df.apply(lambda x: x)
def time_apply_ref_by_name(self):
self.df3.apply(lambda x: x["A"] + x["B"], axis=1)
class Dtypes:
def setup(self):
self.df = DataFrame(np.random.randn(1000, 1000))
def time_frame_dtypes(self):
self.df.dtypes
class Equals:
def setup(self):
N = 10**3
self.float_df = DataFrame(np.random.randn(N, N))
self.float_df_nan = self.float_df.copy()
self.float_df_nan.iloc[-1, -1] = np.nan
self.object_df = DataFrame("foo", index=range(N), columns=range(N))
self.object_df_nan = self.object_df.copy()
self.object_df_nan.iloc[-1, -1] = np.nan
self.nonunique_cols = self.object_df.copy()
self.nonunique_cols.columns = ["A"] * len(self.nonunique_cols.columns)
self.nonunique_cols_nan = self.nonunique_cols.copy()
self.nonunique_cols_nan.iloc[-1, -1] = np.nan
def time_frame_float_equal(self):
self.float_df.equals(self.float_df)
def time_frame_float_unequal(self):
self.float_df.equals(self.float_df_nan)
def time_frame_nonunique_equal(self):
self.nonunique_cols.equals(self.nonunique_cols)
def time_frame_nonunique_unequal(self):
self.nonunique_cols.equals(self.nonunique_cols_nan)
def time_frame_object_equal(self):
self.object_df.equals(self.object_df)
def time_frame_object_unequal(self):
self.object_df.equals(self.object_df_nan)
class Interpolate:
params = [None, "infer"]
param_names = ["downcast"]
def setup(self, downcast):
N = 10000
# this is the worst case, where every column has NaNs.
arr = np.random.randn(N, 100)
# NB: we need to set values in array, not in df.values, otherwise
# the benchmark will be misleading for ArrayManager
arr[::2] = np.nan
self.df = DataFrame(arr)
self.df2 = DataFrame(
{
"A": np.arange(0, N),
"B": np.random.randint(0, 100, N),
"C": np.random.randn(N),
"D": np.random.randn(N),
}
)
self.df2.loc[1::5, "A"] = np.nan
self.df2.loc[1::5, "C"] = np.nan
def time_interpolate(self, downcast):
self.df.interpolate(downcast=downcast)
def time_interpolate_some_good(self, downcast):
self.df2.interpolate(downcast=downcast)
class Shift:
# frame shift speedup issue-5609
params = [0, 1]
param_names = ["axis"]
def setup(self, axis):
self.df = DataFrame(np.random.rand(10000, 500))
def time_shift(self, axis):
self.df.shift(1, axis=axis)
class Nunique:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 1000))
def time_frame_nunique(self):
self.df.nunique()
class SeriesNuniqueWithNan:
def setup(self):
self.ser = Series(100000 * (100 * [np.nan] + list(range(100)))).astype(float)
def time_series_nunique_nan(self):
self.ser.nunique()
class Duplicated:
def setup(self):
n = 1 << 20
t = date_range("2015-01-01", freq="S", periods=(n // 64))
xs = np.random.randn(n // 64).round(2)
self.df = DataFrame(
{
"a": np.random.randint(-1 << 8, 1 << 8, n),
"b": np.random.choice(t, n),
"c": np.random.choice(xs, n),
}
)
self.df2 = DataFrame(np.random.randn(1000, 100).astype(str)).T
def time_frame_duplicated(self):
self.df.duplicated()
def time_frame_duplicated_wide(self):
self.df2.duplicated()
def time_frame_duplicated_subset(self):
self.df.duplicated(subset=["a"])
class XS:
params = [0, 1]
param_names = ["axis"]
def setup(self, axis):
self.N = 10**4
self.df = DataFrame(np.random.randn(self.N, self.N))
def time_frame_xs(self, axis):
self.df.xs(self.N / 2, axis=axis)
class SortValues:
params = [True, False]
param_names = ["ascending"]
def setup(self, ascending):
self.df = DataFrame(np.random.randn(1000000, 2), columns=list("AB"))
def time_frame_sort_values(self, ascending):
self.df.sort_values(by="A", ascending=ascending)
class SortIndexByColumns:
def setup(self):
N = 10000
K = 10
self.df = DataFrame(
{
"key1": tm.makeStringIndex(N).values.repeat(K),
"key2": tm.makeStringIndex(N).values.repeat(K),
"value": np.random.randn(N * K),
}
)
def time_frame_sort_values_by_columns(self):
self.df.sort_values(by=["key1", "key2"])
class Quantile:
params = [0, 1]
param_names = ["axis"]
def setup(self, axis):
self.df = DataFrame(np.random.randn(1000, 3), columns=list("ABC"))
def time_frame_quantile(self, axis):
self.df.quantile([0.1, 0.5], axis=axis)
class Rank:
param_names = ["dtype"]
params = [
["int", "uint", "float", "object"],
]
def setup(self, dtype):
self.df = DataFrame(
np.random.randn(10000, 10).astype(dtype), columns=range(10), dtype=dtype
)
def time_rank(self, dtype):
self.df.rank()
class GetDtypeCounts:
# 2807
def setup(self):
self.df = DataFrame(np.random.randn(10, 10000))
def time_frame_get_dtype_counts(self):
with warnings.catch_warnings(record=True):
self.df.dtypes.value_counts()
def time_info(self):
self.df.info()
class NSort:
params = ["first", "last", "all"]
param_names = ["keep"]
def setup(self, keep):
self.df = DataFrame(np.random.randn(100000, 3), columns=list("ABC"))
def time_nlargest_one_column(self, keep):
self.df.nlargest(100, "A", keep=keep)
def time_nlargest_two_columns(self, keep):
self.df.nlargest(100, ["A", "B"], keep=keep)
def time_nsmallest_one_column(self, keep):
self.df.nsmallest(100, "A", keep=keep)
def time_nsmallest_two_columns(self, keep):
self.df.nsmallest(100, ["A", "B"], keep=keep)
class Describe:
def setup(self):
self.df = DataFrame(
{
"a": np.random.randint(0, 100, 10**6),
"b": np.random.randint(0, 100, 10**6),
"c": np.random.randint(0, 100, 10**6),
}
)
def time_series_describe(self):
self.df["a"].describe()
def time_dataframe_describe(self):
self.df.describe()
class MemoryUsage:
def setup(self):
self.df = DataFrame(np.random.randn(100000, 2), columns=list("AB"))
self.df2 = self.df.copy()
self.df2["A"] = self.df2["A"].astype("object")
def time_memory_usage(self):
self.df.memory_usage(deep=True)
def time_memory_usage_object_dtype(self):
self.df2.memory_usage(deep=True)
from .pandas_vb_common import setup # noqa: F401 isort:skip
| 27.590143
| 88
| 0.588943
|
bf4687e6da8e97cd946723376014a59c0d6c5875
| 2,173
|
py
|
Python
|
examples/example5.py
|
jvansteirteghem/twunnel3
|
717ad40e114cfe63fa366465643ecb9f16ea6df9
|
[
"MIT"
] | 13
|
2015-01-01T09:41:57.000Z
|
2019-09-25T18:02:22.000Z
|
examples/example5.py
|
jvansteirteghem/twunnel3
|
717ad40e114cfe63fa366465643ecb9f16ea6df9
|
[
"MIT"
] | 3
|
2015-08-30T06:59:05.000Z
|
2019-05-16T11:23:12.000Z
|
examples/example5.py
|
jvansteirteghem/twunnel3
|
717ad40e114cfe63fa366465643ecb9f16ea6df9
|
[
"MIT"
] | 4
|
2015-06-11T13:10:21.000Z
|
2019-11-20T12:00:29.000Z
|
import sys
import os
sys.path.insert(0, os.path.abspath(".."))
import asyncio
from twunnel3 import local_proxy_server, logger
from examples import example
configuration = \
{
"LOGGER":
{
"LEVEL": 3
}
}
logger.configure(configuration)
loop = asyncio.get_event_loop()
configuration = \
{
"PROXY_SERVERS": [],
"LOCAL_PROXY_SERVER":
{
"TYPE": "HTTPS",
"ADDRESS": "127.0.0.1",
"PORT": 8080
}
}
https_server = loop.run_until_complete(local_proxy_server.create_server(configuration))
configuration = \
{
"PROXY_SERVERS": [],
"LOCAL_PROXY_SERVER":
{
"TYPE": "SOCKS4",
"ADDRESS": "127.0.0.1",
"PORT": 8081
}
}
socks4_server = loop.run_until_complete(local_proxy_server.create_server(configuration))
configuration = \
{
"PROXY_SERVERS": [],
"LOCAL_PROXY_SERVER":
{
"TYPE": "SOCKS5",
"ADDRESS": "127.0.0.1",
"PORT": 8082,
"ACCOUNTS":
[
{
"NAME": "",
"PASSWORD": ""
}
]
}
}
socks5_server = loop.run_until_complete(local_proxy_server.create_server(configuration))
configuration = \
{
"PROXY_SERVERS":
[
{
"TYPE": "HTTPS",
"ADDRESS": "127.0.0.1",
"PORT": 8080,
"ACCOUNT":
{
"NAME": "",
"PASSWORD": ""
}
},
{
"TYPE": "SOCKS4",
"ADDRESS": "127.0.0.1",
"PORT": 8081,
"ACCOUNT":
{
"NAME": ""
}
},
{
"TYPE": "SOCKS5",
"ADDRESS": "127.0.0.1",
"PORT": 8082,
"ACCOUNT":
{
"NAME": "",
"PASSWORD": ""
}
}
]
}
loop.call_later(5, example.create_connection, configuration)
loop.call_later(10, example.create_connection, configuration, True)
loop.call_later(15, socks5_server.close)
loop.call_later(20, socks4_server.close)
loop.call_later(25, https_server.close)
loop.call_later(30, loop.stop)
loop.run_forever()
| 19.93578
| 88
| 0.505292
|
090b035d66a637fe1f62c8760e21443532b659e2
| 3,097
|
py
|
Python
|
alicraper/settings.py
|
VictorVSa/alicraper
|
54a9d005a38140ab4f4a39564269cc056840b7a0
|
[
"MIT"
] | 3
|
2021-04-22T08:50:43.000Z
|
2021-12-10T16:07:09.000Z
|
alicraper/settings.py
|
VictorVSa/alicraper
|
54a9d005a38140ab4f4a39564269cc056840b7a0
|
[
"MIT"
] | null | null | null |
alicraper/settings.py
|
VictorVSa/alicraper
|
54a9d005a38140ab4f4a39564269cc056840b7a0
|
[
"MIT"
] | 2
|
2021-05-18T13:16:41.000Z
|
2021-12-10T16:07:16.000Z
|
# -*- coding: utf-8 -*-
# Scrapy settings for alicraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'alicraper'
SPIDER_MODULES = ['alicraper.spiders']
NEWSPIDER_MODULE = 'alicraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'alicraper (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'es',
}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'alicraper.middlewares.AlicraperSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'alicraper.middlewares.AlicraperDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'alicraper.pipelines.AlicraperPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 34.411111
| 104
| 0.776881
|
882aab73404e6fdc435416d53e5bb7ac37d48ac2
| 26,765
|
py
|
Python
|
sharpy/solvers/steplinearuvlm.py
|
ostodieck/sharpy
|
b85aa1c001a0ec851af4eb259cce7c01dfa68b9e
|
[
"BSD-3-Clause"
] | 1
|
2020-07-27T05:15:35.000Z
|
2020-07-27T05:15:35.000Z
|
sharpy/solvers/steplinearuvlm.py
|
briandesilva/sharpy
|
aed86428ff88fd14d36cabd91cf7e04b5fc9a39a
|
[
"BSD-3-Clause"
] | null | null | null |
sharpy/solvers/steplinearuvlm.py
|
briandesilva/sharpy
|
aed86428ff88fd14d36cabd91cf7e04b5fc9a39a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Time domain solver to integrate the linear UVLM aerodynamic system developed by S. Maraniello
N Goizueta
Nov 18
"""
from sharpy.utils.solver_interface import BaseSolver, solver
import numpy as np
import sharpy.utils.settings as settings
import sharpy.utils.generator_interface as gen_interface
import sharpy.utils.algebra as algebra
import sharpy.linear.src.linuvlm as linuvlm
@solver
class StepLinearUVLM(BaseSolver):
r"""
Time domain aerodynamic solver that uses a linear UVLM formulation to be used with the
:class:`solvers.DynamicCoupled` solver.
To use this solver, the ``solver_id = StepLinearUVLM`` must be given as the name for the ``aero_solver``
is the case of an aeroelastic solver, where the setting below would be parsed through ``aero_solver_settings``.
Notes:
The ``integr_order`` variable refers to the finite differencing scheme used to calculate the bound circulation
derivative with respect to time :math:`\dot{\mathbf{\Gamma}}`. A first order scheme is used when
``integr_order == 1``
.. math:: \dot{\mathbf{\Gamma}}^{n+1} = \frac{\mathbf{\Gamma}^{n+1}-\mathbf{\Gamma}^n}{\Delta t}
If ``integr_order == 2`` a higher order scheme is used (but it isn't exactly second order accurate [1]).
.. math:: \dot{\mathbf{\Gamma}}^{n+1} = \frac{3\mathbf{\Gamma}^{n+1}-4\mathbf{\Gamma}^n + \mathbf{\Gamma}^{n-1}}
{2\Delta t}
If ``track_body`` is ``True``, the UVLM is projected onto a frame ``U`` that is:
* Coincident with ``G`` at the linearisation timestep.
* Thence, rotates by the same quantity as the FoR ``A``.
It is similar to a stability axes and is recommended any time rigid body dynamics are included.
See Also:
:class:`sharpy.sharpy.linear.assembler.linearuvlm.LinearUVLM`
References:
[1] Maraniello, S., & Palacios, R.. State-Space Realizations and Internal Balancing in Potential-Flow
Aerodynamics with Arbitrary Kinematics. AIAA Journal, 57(6), 1โ14. 2019. https://doi.org/10.2514/1.J058153
"""
solver_id = 'StepLinearUVLM'
solver_classification = 'aero'
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_types['dt'] = 'float'
settings_default['dt'] = 0.1
settings_description['dt'] = 'Time step'
settings_types['integr_order'] = 'int'
settings_default['integr_order'] = 2
settings_description['integr_order'] = 'Integration order of the circulation derivative. Either ``1`` or ``2``.'
settings_types['ScalingDict'] = 'dict'
settings_default['ScalingDict'] = dict()
settings_description['ScalingDict'] = 'Dictionary of scaling factors to achieve normalised UVLM realisation.'
settings_types['remove_predictor'] = 'bool'
settings_default['remove_predictor'] = True
settings_description['remove_predictor'] = 'Remove the predictor term from the UVLM equations'
settings_types['use_sparse'] = 'bool'
settings_default['use_sparse'] = True
settings_description['use_sparse'] = 'Assemble UVLM plant matrix in sparse format'
settings_types['density'] = 'float'
settings_default['density'] = 1.225
settings_description['density'] = 'Air density'
settings_types['track_body'] = 'bool'
settings_default['track_body'] = True
settings_description['track_body'] = 'UVLM inputs and outputs projected to coincide with lattice at linearisation'
settings_types['track_body_number'] = 'int'
settings_default['track_body_number'] = -1
settings_description['track_body_number'] = 'Frame of reference number to follow. If ``-1`` track ``A`` frame.'
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description)
scaling_settings_types = dict()
scaling_settings_default = dict()
scaling_settings_description = dict()
scaling_settings_types['length'] = 'float'
scaling_settings_default['length'] = 1.0
scaling_settings_description['length'] = 'Reference length to be used for UVLM scaling'
scaling_settings_types['speed'] = 'float'
scaling_settings_default['speed'] = 1.0
scaling_settings_description['speed'] = 'Reference speed to be used for UVLM scaling'
scaling_settings_types['density'] = 'float'
scaling_settings_default['density'] = 1.0
scaling_settings_description['density'] = 'Reference density to be used for UVLM scaling'
__doc__ += settings_table.generate(scaling_settings_types,
scaling_settings_default,
scaling_settings_description, header_line='The settings that ``ScalingDict`` '
'accepts are the following:')
def __init__(self):
self.data = None
self.settings = None
self.lin_uvlm_system = None
self.velocity_generator = None
def initialise(self, data, custom_settings=None):
r"""
Initialises the Linear UVLM aerodynamic solver and the chosen velocity generator.
Settings are parsed into the standard SHARPy settings format for solvers. It then checks whether there is
any previous information about the linearised system (in order for a solution to be restarted without
overwriting the linearisation).
If a linearised system does not exist, a linear UVLM system is created linearising about the current time step.
The reference values for the input and output are transformed into column vectors :math:`\mathbf{u}`
and :math:`\mathbf{y}`, respectively.
The information pertaining to the linear system is stored in a dictionary ``self.data.aero.linear`` within
the main ``data`` variable.
Args:
data (PreSharpy): class containing the problem information
custom_settings (dict): custom settings dictionary
"""
self.data = data
if custom_settings is None:
self.settings = data.settings[self.solver_id]
else:
self.settings = custom_settings
settings.to_custom_types(self.settings, self.settings_types, self.settings_default, no_ctype=True)
settings.to_custom_types(self.settings['ScalingDict'], self.scaling_settings_types,
self.scaling_settings_default, no_ctype=True)
# Check whether linear UVLM has been initialised
try:
self.data.aero.linear
except AttributeError:
self.data.aero.linear = dict()
aero_tstep = self.data.aero.timestep_info[-1]
### Record body orientation/velocities at time 0
# This option allows to rotate the linearised UVLM with the A frame
# or a specific body (multi-body solution)
if self.settings['track_body']:
self.num_body_track = self.settings['track_body_number']
# track A frame
if self.num_body_track == -1:
self.quat0 = self.data.structure.timestep_info[0].quat.copy()
self.for_vel0 = self.data.structure.timestep_info[0].for_vel.copy()
else: # track a specific body
self.quat0 = \
self.data.structure.timestep_info[0].mb_quat[self.num_body_track,:].copy()
self.for_vel0 = \
self.data.structure.timestep_info[0].mb_FoR_vel[self.num_body_track ,:].copy()
# convert to G frame
self.Cga0 = algebra.quat2rotation(self.quat0)
self.Cga = self.Cga0.copy()
self.for_vel0[:3] = self.Cga0.dot(self.for_vel0[:3])
self.for_vel0[3:] = self.Cga0.dot(self.for_vel0[3:])
else: # check/record initial rotation speed
self.num_body_track = None
self.quat0 = None
self.Cag0 = None
self.Cga = None
self.for_vel0 = np.zeros((6,))
# TODO: verify of a better way to implement rho
aero_tstep.rho = self.settings['density']
# Generate instance of linuvlm.Dynamic()
lin_uvlm_system = linuvlm.DynamicBlock(aero_tstep,
dynamic_settings=self.settings,
# dt=self.settings['dt'].value,
# integr_order=self.settings['integr_order'].value,
# ScalingDict=self.settings['ScalingDict'],
# RemovePredictor=self.settings['remove_predictor'].value,
# UseSparse=self.settings['use_sparse'].value,
for_vel=self.for_vel0)
# add rotational speed
for ii in range(lin_uvlm_system.MS.n_surf):
lin_uvlm_system.MS.Surfs[ii].omega = self.for_vel0[3:]
# Save reference values
# System Inputs
u_0 = self.pack_input_vector()
# Linearised state
dt = self.settings['dt']
x_0 = self.pack_state_vector(aero_tstep, None, dt, self.settings['integr_order'])
# Reference forces
f_0 = np.concatenate([aero_tstep.forces[ss][0:3].reshape(-1, order='C')
for ss in range(aero_tstep.n_surf)])
# Assemble the state space system
lin_uvlm_system.assemble_ss()
self.data.aero.linear['System'] = lin_uvlm_system
self.data.aero.linear['SS'] = lin_uvlm_system.SS
self.data.aero.linear['x_0'] = x_0
self.data.aero.linear['u_0'] = u_0
self.data.aero.linear['y_0'] = f_0
# self.data.aero.linear['gamma_0'] = gamma
# self.data.aero.linear['gamma_star_0'] = gamma_star
# self.data.aero.linear['gamma_dot_0'] = gamma_dot
# TODO: Implement in AeroTimeStepInfo a way to store the state vectors
# aero_tstep.linear.x = x_0
# aero_tstep.linear.u = u_0
# aero_tstep.linear.y = f_0
# Initialise velocity generator
velocity_generator_type = gen_interface.generator_from_string(self.settings['velocity_field_generator'])
self.velocity_generator = velocity_generator_type()
self.velocity_generator.initialise(self.settings['velocity_field_input'])
def run(self,
aero_tstep,
structure_tstep,
convect_wake=False,
dt=None,
t=None,
unsteady_contribution=False):
r"""
Solve the linear aerodynamic UVLM model at the current time step ``n``. The step increment is solved as:
.. math::
\mathbf{x}^n &= \mathbf{A\,x}^{n-1} + \mathbf{B\,u}^n \\
\mathbf{y}^n &= \mathbf{C\,x}^n + \mathbf{D\,u}^n
A change of state is possible in order to solve the system without the predictor term. In which case the system
is solved by:
.. math::
\mathbf{h}^n &= \mathbf{A\,h}^{n-1} + \mathbf{B\,u}^{n-1} \\
\mathbf{y}^n &= \mathbf{C\,h}^n + \mathbf{D\,u}^n
Variations are taken with respect to initial reference state. The state and input vectors for the linear
UVLM system are of the form:
If ``integr_order==1``:
.. math:: \mathbf{x}_n = [\delta\mathbf{\Gamma}^T_n,\,
\delta\mathbf{\Gamma_w}_n^T,\,
\Delta t \,\delta\mathbf{\dot{\Gamma}}_n^T]^T
Else, if ``integr_order==2``:
.. math:: \mathbf{x}_n = [\delta\mathbf{\Gamma}_n^T,\,
\delta\mathbf{\Gamma_w}_n^T,\,
\Delta t \,\delta\mathbf{\dot{\Gamma}}_n^T,\,
\delta\mathbf{\Gamma}_{n-1}^T]^T
And the input vector:
.. math:: \mathbf{u}_n = [\delta\mathbf{\zeta}_n^T,\,
\delta\dot{\mathbf{\zeta}}_n^T,\,\delta\mathbf{u_{ext}}^T_n]^T
where the subscript ``n`` refers to the time step.
The linear UVLM system is then solved as detailed in :func:`sharpy.linear.src.linuvlm.Dynamic.solve_step`.
The output is a column vector containing the aerodynamic forces at the panel vertices.
To Do: option for impulsive start?
Args:
aero_tstep (AeroTimeStepInfo): object containing the aerodynamic data at the current time step
structure_tstep (StructTimeStepInfo): object containing the structural data at the current time step
convect_wake (bool): for backward compatibility only. The linear UVLM assumes a frozen wake geometry
dt (float): time increment
t (float): current time
unsteady_contribution (bool): (backward compatibily). Unsteady aerodynamic effects are always included
Returns:
PreSharpy: updated ``self.data`` class with the new forces and circulation terms of the system
"""
if aero_tstep is None:
aero_tstep = self.data.aero.timestep_info[-1]
if structure_tstep is None:
structure_tstep = self.data.structure.timestep_info[-1]
if dt is None:
dt = self.settings['dt']
if t is None:
t = self.data.ts*dt
integr_order = self.settings['integr_order']
### Define Input
# Generate external velocity field u_ext
self.velocity_generator.generate({'zeta': aero_tstep.zeta,
'override': True,
't': t,
'ts': self.data.ts,
'dt': dt,
'for_pos': structure_tstep.for_pos},
aero_tstep.u_ext)
### Proj from FoR G to linearisation frame
# - proj happens in self.pack_input_vector and unpack_ss_vectors
if self.settings['track_body']:
# track A frame
if self.num_body_track == -1:
self.Cga = algebra.quat2rotation( structure_tstep.quat )
else: # track a specific body
self.Cga = algebra.quat2rotation(
structure_tstep.mb_quat[self.num_body_track,:] )
# Column vector that will be the input to the linearised UVLM system
# Input is at time step n, since it is updated in the aeroelastic solver prior to aerodynamic solver
u_n = self.pack_input_vector()
du_n = u_n - self.data.aero.linear['u_0']
if self.settings['remove_predictor']:
u_m1 = self.pack_input_vector()
du_m1 = u_m1 - self.data.aero.linear['u_0']
else:
du_m1 = None
# Retrieve State vector at time n-1
if len(self.data.aero.timestep_info) < 2:
x_m1 = self.pack_state_vector(aero_tstep, None, dt, integr_order)
else:
x_m1 = self.pack_state_vector(aero_tstep, self.data.aero.timestep_info[-2], dt, integr_order)
# dx is at timestep n-1
dx_m1 = x_m1 - self.data.aero.linear['x_0']
### Solve system - output is the variation in force
dx_n, dy_n = self.data.aero.linear['System'].solve_step(dx_m1, du_m1, du_n, transform_state=True)
x_n = self.data.aero.linear['x_0'] + dx_n
y_n = self.data.aero.linear['y_0'] + dy_n
# if self.settings['physical_model']:
forces, gamma, gamma_dot, gamma_star = self.unpack_ss_vectors(y_n, x_n, u_n, aero_tstep)
aero_tstep.forces = forces
aero_tstep.gamma = gamma
aero_tstep.gamma_dot = gamma_dot
aero_tstep.gamma_star = gamma_star
return self.data
def add_step(self):
self.data.aero.add_timestep()
def update_grid(self, beam):
self.data.aero.generate_zeta(beam, self.data.aero.aero_settings, -1, beam_ts=-1)
def update_custom_grid(self, structure_tstep, aero_tstep):
self.data.aero.generate_zeta_timestep_info(structure_tstep, aero_tstep, self.data.structure, self.data.aero.aero_settings)
def unpack_ss_vectors(self, y_n, x_n, u_n, aero_tstep):
r"""
Transform column vectors used in the state space formulation into SHARPy format
The column vectors are transformed into lists with one entry per aerodynamic surface. Each entry contains a
matrix with the quantities at each grid vertex.
.. math::
\mathbf{y}_n \longrightarrow \mathbf{f}_{aero}
.. math:: \mathbf{x}_n \longrightarrow \mathbf{\Gamma}_n,\,
\mathbf{\Gamma_w}_n,\,
\mathbf{\dot{\Gamma}}_n
If the ``track_body`` option is on, the output forces are projected from
the linearization frame, to the G frame. Note that the linearisation
frame is:
a. equal to the FoR G at time 0 (linearisation point)
b. rotates as the body frame specified in the ``track_body_number``
Args:
y_n (np.ndarray): Column output vector of linear UVLM system
x_n (np.ndarray): Column state vector of linear UVLM system
u_n (np.ndarray): Column input vector of linear UVLM system
aero_tstep (AeroTimeStepInfo): aerodynamic timestep information class instance
Returns:
tuple: Tuple containing:
forces (list):
Aerodynamic forces in a list with ``n_surf`` entries.
Each entry is a ``(6, M+1, N+1)`` matrix, where the first 3
indices correspond to the components in ``x``, ``y`` and ``z``. The latter 3 are zero.
gamma (list):
Bound circulation list with ``n_surf`` entries. Circulation is stored in an ``(M+1, N+1)``
matrix, corresponding to the panel vertices.
gamma_dot (list):
Bound circulation derivative list with ``n_surf`` entries.
Circulation derivative is stored in an ``(M+1, N+1)`` matrix, corresponding to the panel
vertices.
gamma_star (list):
Wake (free) circulation list with ``n_surf`` entries. Wake circulation is stored in an
``(M_star+1, N+1)`` matrix, corresponding to the panel vertices of the wake.
"""
### project forces from uvlm FoR to FoR G
if self.settings['track_body']:
Cg_uvlm = np.dot( self.Cga, self.Cga0.T )
f_aero = y_n
gamma_vec, gamma_star_vec, gamma_dot_vec = self.data.aero.linear['System'].unpack_state(x_n)
# Reshape output into forces[i_surface] where forces[i_surface] is a (6,M+1,N+1) matrix and circulation terms
# where gamma is a [i_surf](M+1, N+1) matrix
forces = []
gamma = []
gamma_star = []
gamma_dot = []
worked_points = 0
worked_panels = 0
worked_wake_panels = 0
for i_surf in range(aero_tstep.n_surf):
# Tuple with dimensions of the aerogrid zeta, which is the same shape for forces
dimensions = aero_tstep.zeta[i_surf].shape
dimensions_gamma = self.data.aero.aero_dimensions[i_surf]
dimensions_wake = self.data.aero.aero_dimensions_star[i_surf]
# Number of entries in zeta
points_in_surface = aero_tstep.zeta[i_surf].size
panels_in_surface = aero_tstep.gamma[i_surf].size
panels_in_wake = aero_tstep.gamma_star[i_surf].size
# Append reshaped forces to each entry in list (one for each surface)
forces.append(f_aero[worked_points:worked_points+points_in_surface].reshape(dimensions, order='C'))
### project forces.
# - forces are in UVLM linearisation frame. Hence, these are projected
# into FoR (using rotation matrix Cag0 time 0) A and back to FoR G
if self.settings['track_body']:
for mm in range(dimensions[1]):
for nn in range(dimensions[2]):
forces[i_surf][:,mm,nn] = np.dot(Cg_uvlm, forces[i_surf][:,mm,nn])
# Add the null bottom 3 rows to to the forces entry
forces[i_surf] = np.concatenate((forces[i_surf], np.zeros(dimensions)))
# Reshape bound circulation terms
gamma.append(gamma_vec[worked_panels:worked_panels+panels_in_surface].reshape(
dimensions_gamma, order='C'))
gamma_dot.append(gamma_dot_vec[worked_panels:worked_panels+panels_in_surface].reshape(
dimensions_gamma, order='C'))
# Reshape wake circulation terms
gamma_star.append(gamma_star_vec[worked_wake_panels:worked_wake_panels+panels_in_wake].reshape(
dimensions_wake, order='C'))
worked_points += points_in_surface
worked_panels += panels_in_surface
worked_wake_panels += panels_in_wake
return forces, gamma, gamma_dot, gamma_star
def pack_input_vector(self):
r"""
Transform a SHARPy AeroTimestep instance into a column vector containing the input to the linear UVLM system.
.. math:: [\zeta,\, \dot{\zeta}, u_{ext}] \longrightarrow \mathbf{u}
If the ``track_body`` option is on, the function projects all the input
into a frame that:
a. is equal to the FoR G at time 0 (linearisation point)
b. rotates as the body frame specified in the ``track_body_number``
Returns:
np.ndarray: Input vector
"""
aero_tstep = self.data.aero.timestep_info[-1]
### re-compute projection in G frame as if A was not rotating
# - u_n is in FoR G. Hence, this is project in FoR A and back to FoR G
# using rotation matrix aat time 0 (as if FoR A was not rotating).
if self.settings['track_body']:
Cuvlm_g = np.dot( self.Cga0, self.Cga.T )
zeta_uvlm, zeta_dot_uvlm, u_ext_uvlm = [], [], []
for i_surf in range(aero_tstep.n_surf):
Mp1, Np1 = aero_tstep.dimensions[i_surf] + 1
zeta_uvlm.append( np.empty((3,Mp1,Np1)) )
zeta_dot_uvlm.append( np.empty((3,Mp1,Np1)) )
u_ext_uvlm.append( np.empty((3,Mp1,Np1)) )
for mm in range(Mp1):
for nn in range(Np1):
zeta_uvlm[i_surf][:,mm,nn] = \
np.dot(Cuvlm_g, aero_tstep.zeta[i_surf][:,mm,nn])
zeta_dot_uvlm[i_surf][:,mm,nn] = \
np.dot(Cuvlm_g, aero_tstep.zeta_dot[i_surf][:,mm,nn])
u_ext_uvlm[i_surf][:,mm,nn] = \
np.dot(Cuvlm_g, aero_tstep.u_ext[i_surf][:,mm,nn])
zeta = np.concatenate([zeta_uvlm[i_surf].reshape(-1, order='C')
for i_surf in range(aero_tstep.n_surf)])
zeta_dot = np.concatenate([zeta_dot_uvlm[i_surf].reshape(-1, order='C')
for i_surf in range(aero_tstep.n_surf)])
u_ext = np.concatenate([u_ext_uvlm[i_surf].reshape(-1, order='C')
for i_surf in range(aero_tstep.n_surf)])
else:
zeta = np.concatenate([aero_tstep.zeta[i_surf].reshape(-1, order='C')
for i_surf in range(aero_tstep.n_surf)])
zeta_dot = np.concatenate([aero_tstep.zeta_dot[i_surf].reshape(-1, order='C')
for i_surf in range(aero_tstep.n_surf)])
u_ext = np.concatenate([aero_tstep.u_ext[i_surf].reshape(-1, order='C')
for i_surf in range(aero_tstep.n_surf)])
u = np.concatenate((zeta, zeta_dot, u_ext))
return u
@staticmethod
def pack_state_vector(aero_tstep, aero_tstep_m1, dt, integr_order):
r"""
Transform SHARPy Aerotimestep format into column vector containing the state information.
The state vector is of a different form depending on the order of integration chosen. If a second order
scheme is chosen, the state includes the bound circulation at the previous timestep,
hence the timestep information for the previous timestep shall be parsed.
The transformation is of the form:
- If ``integr_order==1``:
.. math:: \mathbf{x}_n = [\mathbf{\Gamma}^T_n,\,
\mathbf{\Gamma_w}_n^T,\,
\Delta t \,\mathbf{\dot{\Gamma}}_n^T]^T
- Else, if ``integr_order==2``:
.. math:: \mathbf{x}_n = [\mathbf{\Gamma}_n^T,\,
\mathbf{\Gamma_w}_n^T,\,
\Delta t \,\mathbf{\dot{\Gamma}}_n^T,\,
\mathbf{\Gamma}_{n-1}^T]^T
For the second order integration scheme, if the previous timestep information is not parsed, a first order
stencil is employed to estimate the bound circulation at the previous timestep:
.. math:: \mathbf{\Gamma}^{n-1} = \mathbf{\Gamma}^n - \Delta t \mathbf{\dot{\Gamma}}^n
Args:
aero_tstep (AeroTimeStepInfo): Aerodynamic timestep information at the current timestep ``n``.
aero_tstep_m1 (AeroTimeStepInfo) Aerodynamic timestep information at the previous timestep ``n-1``.
Returns:
np.ndarray: State vector
"""
# Extract current state...
gamma = np.concatenate([aero_tstep.gamma[ss].reshape(-1, order='C')
for ss in range(aero_tstep.n_surf)])
gamma_star = np.concatenate([aero_tstep.gamma_star[ss].reshape(-1, order='C')
for ss in range(aero_tstep.n_surf)])
gamma_dot = np.concatenate([aero_tstep.gamma_dot[ss].reshape(-1, order='C')
for ss in range(aero_tstep.n_surf)])
if integr_order == 1:
gamma_m1 = []
else:
if aero_tstep_m1:
gamma_m1 = np.concatenate([aero_tstep_m1.gamma[ss].reshape(-1, order='C')
for ss in range(aero_tstep.n_surf)])
else:
gamma_m1 = gamma - dt * gamma_dot
x = np.concatenate((gamma, gamma_star, dt * gamma_dot, gamma_m1))
return x
| 43.662316
| 130
| 0.598057
|
3f07ec471823faf0bb8f6e730695900a6e7cb1bc
| 30,414
|
py
|
Python
|
MSPDE4d_tf1class.py
|
Blue-Giant/MscaleDNN_tf1Class
|
ca36906724d41c51e5ae73bf011ebc0e2f2b3a26
|
[
"MIT"
] | null | null | null |
MSPDE4d_tf1class.py
|
Blue-Giant/MscaleDNN_tf1Class
|
ca36906724d41c51e5ae73bf011ebc0e2f2b3a26
|
[
"MIT"
] | null | null | null |
MSPDE4d_tf1class.py
|
Blue-Giant/MscaleDNN_tf1Class
|
ca36906724d41c51e5ae73bf011ebc0e2f2b3a26
|
[
"MIT"
] | null | null | null |
"""
@author: LXA
Date: 2020 ๅนด 5 ๆ 31 ๆฅ
"""
import os
import sys
import tensorflow as tf
import numpy as np
import matplotlib
import platform
import shutil
import time
import DNN_Class_base
import DNN_tools
import DNN_data
import MS_LaplaceEqs
import General_Laplace
import Load_data2Mat
import saveData
import plotData
import DNN_Log_Print
class MscaleDNN(object):
def __init__(self, input_dim=4, out_dim=1, hidden_layer=None, Model_name='DNN', name2actIn='relu',
name2actHidden='relu', name2actOut='linear', opt2regular_WB='L2', type2numeric='float32',
factor2freq=None, sFourier=1.0):
super(MscaleDNN, self).__init__()
if 'DNN' == str.upper(Model_name):
self.DNN = DNN_Class_base.Pure_Dense_Net(
indim=input_dim, outdim=out_dim, hidden_units=hidden_layer, name2Model=Model_name, actName2in=name2actIn,
actName=name2actHidden, actName2out=name2actOut, type2float=type2numeric)
elif 'SCALE_DNN' == str.upper(Model_name) or 'DNN_SCALE' == str.upper(Model_name):
self.DNN = DNN_Class_base.Dense_ScaleNet(
indim=input_dim, outdim=out_dim, hidden_units=hidden_layer, name2Model=Model_name, actName2in=name2actIn,
actName=name2actHidden, actName2out=name2actOut, type2float=type2numeric)
elif 'FOURIER_DNN' == str.upper(Model_name) or 'DNN_FOURIERBASE' == str.upper(Model_name):
self.DNN = DNN_Class_base.Dense_FourierNet(
indim=input_dim, outdim=out_dim, hidden_units=hidden_layer, name2Model=Model_name, actName2in=name2actIn,
actName=name2actHidden, actName2out=name2actOut, type2float=type2numeric)
if type2numeric == 'float32':
self.float_type = tf.float32
elif type2numeric == 'float64':
self.float_type = tf.float64
elif type2numeric == 'float16':
self.float_type = tf.float16
self.factor2freq = factor2freq
self.opt2regular_WB = opt2regular_WB
self.sFourier = sFourier
def loss_it2Laplace(self, XYZS=None, fside=None, if_lambda2fside=True, loss_type='ritz_loss'):
assert (XYZS is not None)
assert (fside is not None)
shape2XYZS = XYZS.get_shape().as_list()
lenght2XYZS_shape = len(shape2XYZS)
assert (lenght2XYZS_shape == 2)
assert (shape2XYZS[-1] == 2)
X = tf.reshape(XYZS[:, 0], shape=[-1, 1])
Y = tf.reshape(XYZS[:, 1], shape=[-1, 1])
Z = tf.reshape(XYZS[:, 2], shape=[-1, 1])
S = tf.reshape(XYZS[:, 3], shape=[-1, 1])
if if_lambda2fside:
force_side = fside(X, Y, Z, S)
else:
force_side = fside
UNN = self.DNN(XYZS, scale=self.factor2freq, sFourier=self.sFourier)
dUNN = tf.gradients(UNN, XYZS)[0] # * ่ก 2 ๅ
if str.lower(loss_type) == 'ritz_loss' or str.lower(loss_type) == 'variational_loss':
dUNN_Norm = tf.reshape(tf.sqrt(tf.reduce_sum(tf.square(dUNN), axis=-1)), shape=[-1, 1]) # ๆ่กๆฑๅ
dUNN_2Norm = tf.square(dUNN_Norm)
loss_it_ritz = (1.0/2)*dUNN_2Norm-tf.multiply(tf.reshape(force_side, shape=[-1, 1]), UNN)
loss_it = tf.reduce_mean(loss_it_ritz)
elif str.lower(loss_type) == 'l2_loss':
dUNN_x = tf.gather(dUNN, [0], axis=-1)
dUNN_y = tf.gather(dUNN, [1], axis=-1)
dUNN_z = tf.gather(dUNN, [2], axis=-1)
dUNN_s = tf.gather(dUNN, [3], axis=-1)
dUNNxxyzs = tf.gradients(dUNN_x, XYZS)[0]
dUNNyxyzs = tf.gradients(dUNN_y, XYZS)[0]
dUNNzxyzs = tf.gradients(dUNN_z, XYZS)[0]
dUNNsxyzs = tf.gradients(dUNN_s, XYZS)[0]
dUNNxx = tf.gather(dUNNxxyzs, [0], axis=-1)
dUNNyy = tf.gather(dUNNyxyzs, [1], axis=-1)
dUNNzz = tf.gather(dUNNzxyzs, [2], axis=-1)
dUNNss = tf.gather(dUNNsxyzs, [3], axis=-1)
# -Laplace U=f --> -Laplace U - f --> -(Laplace U + f)
loss_it_L2 = dUNNxx + dUNNyy + dUNNzz + dUNNss + tf.reshape(force_side, shape=[-1, 1])
square_loss_it = tf.square(loss_it_L2)
loss_it = tf.reduce_mean(square_loss_it)
return UNN, loss_it
def loss_it2pLaplace(self, XYZS=None, Aeps=None, if_lambda2Aeps=True, fside=None, if_lambda2fside=True,
loss_type='ritz_loss', p_index=2):
assert (XYZS is not None)
assert (fside is not None)
shape2XYZS = XYZS.get_shape().as_list()
lenght2XYZS_shape = len(shape2XYZS)
assert (lenght2XYZS_shape == 2)
assert (shape2XYZS[-1] == 2)
X = tf.reshape(XYZS[:, 0], shape=[-1, 1])
Y = tf.reshape(XYZS[:, 1], shape=[-1, 1])
Z = tf.reshape(XYZS[:, 2], shape=[-1, 1])
S = tf.reshape(XYZS[:, 3], shape=[-1, 1])
if if_lambda2Aeps:
a_eps = Aeps(X, Y, Z, S) # * ่ก 1 ๅ
else:
a_eps = Aeps
if if_lambda2fside:
force_side = fside(X, Y, Z, S)
else:
force_side = fside
UNN = self.DNN(XYZS, scale=self.factor2freq, sFourier=self.sFourier)
dUNN = tf.gradients(UNN, XYZS)[0] # * ่ก 2 ๅ
# ๅๅๅฝขๅผ็loss of interior๏ผ่ฎญ็ปๅพๅฐ็ UNN ๆฏ * ่ก 1 ๅ
if str.lower(loss_type) == 'ritz_loss' or str.lower(loss_type) == 'variational_loss':
dUNN_Norm = tf.reshape(tf.sqrt(tf.reduce_sum(tf.square(dUNN), axis=-1)), shape=[-1, 1]) # ๆ่กๆฑๅ
AdUNN_pNorm = tf.multiply(a_eps, tf.pow(dUNN_Norm, p_index))
loss_it_ritz = (1.0/p_index)*AdUNN_pNorm-tf.multiply(tf.reshape(force_side, shape=[-1, 1]), UNN)
loss_it = tf.reduce_mean(loss_it_ritz)
return UNN, loss_it
def loss_it2Possion_Boltzmann(self, XYZS=None, Aeps=None, if_lambda2Aeps=True, Kappa_eps=None, if_lambda2Kappa=True,
fside=None, if_lambda2fside=True, loss_type='ritz_loss', p_index=2):
assert (XYZS is not None)
assert (fside is not None)
shape2XYZS = XYZS.get_shape().as_list()
lenght2XYZS_shape = len(shape2XYZS)
assert (lenght2XYZS_shape == 2)
assert (shape2XYZS[-1] == 2)
X = tf.reshape(XYZS[:, 0], shape=[-1, 1])
Y = tf.reshape(XYZS[:, 1], shape=[-1, 1])
Z = tf.reshape(XYZS[:, 2], shape=[-1, 1])
S = tf.reshape(XYZS[:, 3], shape=[-1, 1])
if if_lambda2Aeps:
a_eps = Aeps(X, Y, Z, S) # * ่ก 1 ๅ
else:
a_eps = Aeps
if if_lambda2Kappa:
Kappa = Kappa_eps(X, Y, Z, S)
else:
Kappa = Kappa_eps
if if_lambda2fside:
force_side = fside(X, Y, Z, S)
else:
force_side = fside
UNN = self.DNN(XYZS, scale=self.factor2freq, sFourier=self.sFourier)
dUNN = tf.gradients(UNN, XYZS)[0] # * ่ก 2 ๅ
if str.lower(loss_type) == 'ritz_loss' or str.lower(loss_type) == 'variational_loss':
dUNN_Norm = tf.reshape(tf.sqrt(tf.reduce_sum(tf.square(dUNN), axis=-1)), shape=[-1, 1]) # ๆ่กๆฑๅ
AdUNN_pNorm = tf.multiply(a_eps, tf.pow(dUNN_Norm, p_index))
loss_it_ritz = (1.0 / p_index) * (AdUNN_pNorm + Kappa * UNN * UNN) - \
tf.multiply(tf.reshape(force_side, shape=[-1, 1]), UNN)
loss_it = tf.reduce_mean(loss_it_ritz)
return UNN, loss_it
def loss2bd(self, XYZS_bd=None, Ubd_exact=None, if_lambda2Ubd=True):
X_bd = tf.reshape(XYZS_bd[:, 0], shape=[-1, 1])
Y_bd = tf.reshape(XYZS_bd[:, 1], shape=[-1, 1])
Z_bd = tf.reshape(XYZS_bd[:, 2], shape=[-1, 1])
S_bd = tf.reshape(XYZS_bd[:, 2], shape=[-1, 1])
if if_lambda2Ubd:
Ubd = Ubd_exact(X_bd, Y_bd, Z_bd, S_bd)
else:
Ubd=Ubd_exact
UNN_bd = self.DNN(XYZS_bd, scale=self.factor2freq, sFourier=self.sFourier)
loss_bd_square = tf.square(UNN_bd - Ubd)
loss_bd = tf.reduce_mean(loss_bd_square)
return loss_bd
def get_regularSum2WB(self):
sum2WB = self.DNN.get_regular_sum2WB(self.opt2regular_WB)
return sum2WB
def evalue_MscaleDNN(self, XYZS_points=None):
UNN = self.DNN(XYZS_points, scale=self.factor2freq, sFourier=self.sFourier)
return UNN
def solve_Multiscale_PDE(R):
log_out_path = R['FolderName'] # ๅฐ่ทฏๅพไปๅญๅ
ธ R ไธญๆๅๅบๆฅ
if not os.path.exists(log_out_path): # ๅคๆญ่ทฏๅพๆฏๅฆๅทฒ็ปๅญๅจ
os.mkdir(log_out_path) # ๆ log_out_path ่ทฏๅพ๏ผๅๅปบไธไธช log_out_path ่ทฏๅพ
logfile_name = '%s%s.txt' % ('log2', R['activate_func'])
log_fileout = open(os.path.join(log_out_path, logfile_name), 'w') # ๅจ่ฟไธช่ทฏๅพไธๅๅปบๅนถๆๅผไธไธชๅฏๅ็ log_train.txtๆไปถ
DNN_Log_Print.dictionary_out2file(R, log_fileout)
# ไธ่ฌ laplace ้ฎ้ข้่ฆ็่ฎพ็ฝฎ
batchsize_it = R['batch_size2interior']
batchsize_bd = R['batch_size2boundary']
bd_penalty_init = R['init_boundary_penalty'] # Regularization parameter for boundary conditions
penalty2WB = R['penalty2weight_biases'] # Regularization parameter for weights and biases
lr_decay = R['learning_rate_decay']
learning_rate = R['learning_rate']
hidden_layers = R['hidden_layers']
act_func = R['activate_func']
input_dim = R['input_dim']
out_dim = R['output_dim']
# pLaplace ็ฎๅญ้่ฆ็่ฎพ็ฝฎ
p_index = R['order2pLaplace_operator']
mesh_number = 2
region_lb = 0.0
region_rt = 1.0
if R['PDE_type'] == 'general_Laplace':
# -laplace u = f
region_lb = 0.0
region_rt = 1.0
f, u_true, u00, u01, u10, u11, u20, u21, u30, u31, u40, u41 = General_Laplace.get_infos2Laplace_5D(
input_dim=input_dim, out_dim=out_dim, intervalL=region_lb, intervalR=region_rt, equa_name=R['equa_name'])
elif R['PDE_type'] == 'pLaplace':
region_lb = 0.0
region_rt = 1.0
u_true, f, A_eps, u00, u01, u10, u11, u20, u21, u30, u31 = MS_LaplaceEqs.get_infos2pLaplace_4D(
input_dim=input_dim, out_dim=out_dim, intervalL=0.0, intervalR=1.0, equa_name=R['equa_name'])
mscalednn = MscaleDNN(input_dim=R['input_dim'], out_dim=R['output_dim'], hidden_layer=R['hidden_layers'],
Model_name=R['model2NN'], name2actIn=R['name2act_in'], name2actHidden=R['name2act_hidden'],
name2actOut=R['name2act_out'], opt2regular_WB='L0', type2numeric='float32',
factor2freq=R['freq'], sFourier=R['sfourier'])
global_steps = tf.compat.v1.Variable(0, trainable=False)
with tf.device('/gpu:%s' % (R['gpuNo'])):
with tf.variable_scope('vscope', reuse=tf.AUTO_REUSE):
XYZS_it = tf.compat.v1.placeholder(tf.float32, name='XYZS_it', shape=[None, input_dim])
XYZS00 = tf.compat.v1.placeholder(tf.float32, name='XYZS00', shape=[None, input_dim])
XYZS01 = tf.compat.v1.placeholder(tf.float32, name='XYZS01', shape=[None, input_dim])
XYZS10 = tf.compat.v1.placeholder(tf.float32, name='XYZS10', shape=[None, input_dim])
XYZS11 = tf.compat.v1.placeholder(tf.float32, name='XYZS11', shape=[None, input_dim])
XYZS20 = tf.compat.v1.placeholder(tf.float32, name='XYZS20', shape=[None, input_dim])
XYZS21 = tf.compat.v1.placeholder(tf.float32, name='XYZS21', shape=[None, input_dim])
XYZS30 = tf.compat.v1.placeholder(tf.float32, name='XYZS30', shape=[None, input_dim])
XYZS31 = tf.compat.v1.placeholder(tf.float32, name='XYZS31', shape=[None, input_dim])
boundary_penalty = tf.compat.v1.placeholder_with_default(input=1e3, shape=[], name='bd_p')
in_learning_rate = tf.compat.v1.placeholder_with_default(input=1e-5, shape=[], name='lr')
X_it = tf.reshape(XYZS_it[:, 0], shape=[-1, 1])
Y_it = tf.reshape(XYZS_it[:, 1], shape=[-1, 1])
Z_it = tf.reshape(XYZS_it[:, 2], shape=[-1, 1])
S_it = tf.reshape(XYZS_it[:, 3], shape=[-1, 1])
if R['PDE_type'] == 'Laplace' or R['PDE_type'] == 'general_Laplace':
UNN2train, loss_it = mscalednn.loss_it2Laplace(XY=XYZS_it, fside=f, loss_type=R['loss_type'])
elif R['PDE_type'] == 'pLaplace' or R['PDE_type'] == 'pLaplace_implicit' or R[
'PDE_type'] == 'pLaplace_explicit':
UNN2train, loss_it = mscalednn.loss_it2pLaplace(XY=XYZS_it, Aeps=A_eps, fside=f, loss_type=R['loss_type'],
p_index=2)
elif R['PDE_type'] == 'Possion_Boltzmann':
UNN2train, loss_it = mscalednn.loss_it2Possion_Boltzmann(
XY=XYZS_it, Aeps=A_eps, fside=f, loss_type=R['loss_type'], p_index=2)
U_00 = tf.constant(0.0)
U_01 = tf.constant(0.0)
U_10 = tf.constant(0.0)
U_11 = tf.constant(0.0)
U_20 = tf.constant(0.0)
U_21 = tf.constant(0.0)
U_30 = tf.constant(0.0)
U_31 = tf.constant(0.0)
loss_bd00 = mscalednn.loss2bd(XYZS_bd=XYZS00, Ubd_exact=U_00, if_lambda2Ubd=False)
loss_bd01 = mscalednn.loss2bd(XYZS_bd=XYZS01, Ubd_exact=U_01, if_lambda2Ubd=False)
loss_bd10 = mscalednn.loss2bd(XYZS_bd=XYZS10, Ubd_exact=U_10, if_lambda2Ubd=False)
loss_bd11 = mscalednn.loss2bd(XYZS_bd=XYZS11, Ubd_exact=U_11, if_lambda2Ubd=False)
loss_bd20 = mscalednn.loss2bd(XYZS_bd=XYZS20, Ubd_exact=U_20, if_lambda2Ubd=False)
loss_bd21 = mscalednn.loss2bd(XYZS_bd=XYZS21, Ubd_exact=U_21, if_lambda2Ubd=False)
loss_bd30 = mscalednn.loss2bd(XYZS_bd=XYZS30, Ubd_exact=U_30, if_lambda2Ubd=False)
loss_bd31 = mscalednn.loss2bd(XYZS_bd=XYZS31, Ubd_exact=U_31, if_lambda2Ubd=False)
loss_bd = loss_bd00 + loss_bd01 + loss_bd10 + loss_bd11 + loss_bd20 + loss_bd21 + loss_bd30 + loss_bd31
regularSum2WB = mscalednn.get_regularSum2WB()
PWB = penalty2WB * regularSum2WB
loss = loss_it + boundary_penalty * loss_bd + PWB # ่ฆไผๅ็loss function
my_optimizer = tf.compat.v1.train.AdamOptimizer(in_learning_rate)
if R['train_model'] == 'group3_training':
train_op1 = my_optimizer.minimize(loss_it, global_step=global_steps)
train_op2 = my_optimizer.minimize(loss_bd, global_step=global_steps)
train_op3 = my_optimizer.minimize(loss, global_step=global_steps)
train_my_loss = tf.group(train_op1, train_op2, train_op3)
elif R['train_model'] == 'group2_training':
train_op2bd = my_optimizer.minimize(loss_bd, global_step=global_steps)
train_op2union = my_optimizer.minimize(loss, global_step=global_steps)
train_my_loss = tf.gruop(train_op2union, train_op2bd)
elif R['train_model'] == 'union_training':
train_my_loss = my_optimizer.minimize(loss, global_step=global_steps)
UNN2test = mscalednn.evalue_MscaleDNN(XYZS_points=XYZS_it)
t0 = time.time()
loss_it_all, loss_bd_all, loss_all, train_mse_all, train_rel_all = [], [], [], [], [] # ็ฉบๅ่กจ, ไฝฟ็จ append() ๆทปๅ ๅ
็ด
test_mse_all, test_rel_all = [], []
test_epoch = []
# ็ป็ฝๆ ผ่งฃๅพ
if R['testData_model'] == 'random_generate':
# ็ๆๆต่ฏๆฐๆฎ๏ผ็จไบๆต่ฏ่ฎญ็ปๅ็็ฝ็ป
# test_bach_size = 400
# size2test = 20
# test_bach_size = 900
# size2test = 30
test_bach_size = 1600
size2test = 40
# test_bach_size = 4900
# size2test = 70
# test_bach_size = 10000
# size2test = 100
# test_bach_size = 40000
# size2test = 200
# test_bach_size = 250000
# size2test = 500
# test_bach_size = 1000000
# size2test = 1000
test_xyzs_bach = DNN_data.rand_it(test_bach_size, input_dim, region_lb, region_rt)
saveData.save_testData_or_solus2mat(test_xyzs_bach, dataName='testXYZS', outPath=R['FolderName'])
elif R['testData_model'] == 'loadData':
test_bach_size = 1600
size2test = 40
mat_data_path = 'dataMat_highDim'
test_xyzs_bach = Load_data2Mat.get_randomData2mat(dim=input_dim, data_path=mat_data_path)
saveData.save_testData_or_solus2mat(test_xyzs_bach, dataName='testXYZS', outPath=R['FolderName'])
# ConfigProto ๅ ไธallow_soft_placement=Trueๅฐฑๅฏไปฅไฝฟ็จ gpu ไบ
config = tf.compat.v1.ConfigProto(allow_soft_placement=True) # ๅๅปบsess็ๆถๅๅฏนsess่ฟ่กๅๆฐ้
็ฝฎ
config.gpu_options.allow_growth = True # Trueๆฏ่ฎฉTensorFlowๅจ่ฟ่ก่ฟ็จไธญๅจๆ็ณ่ฏทๆพๅญ๏ผ้ฟๅ
่ฟๅค็ๆพๅญๅ ็จใ
config.allow_soft_placement = True # ๅฝๆๅฎ็่ฎพๅคไธๅญๅจๆถ๏ผๅ
่ฎธ้ๆฉไธไธชๅญๅจ็่ฎพๅค่ฟ่กใๆฏๅฆgpuไธๅญๅจ๏ผ่ชๅจ้ๅฐcpuไธ่ฟ่ก
with tf.compat.v1.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
tmp_lr = learning_rate
for i_epoch in range(R['max_epoch'] + 1):
xyzs_it_batch = DNN_data.rand_it(batchsize_it, input_dim, region_a=region_lb, region_b=region_rt)
xyzs00_batch, xyzs01_batch, xyzs10_batch, xyzs11_batch, xyzs20_batch, xyzs21_batch, xyzs30_batch, \
xyzs31_batch = DNN_data.rand_bd_4D(batchsize_bd, input_dim, region_a=region_lb, region_b=region_rt)
tmp_lr = tmp_lr * (1 - lr_decay)
if R['activate_penalty2bd_increase'] == 1:
if i_epoch < int(R['max_epoch'] / 10):
temp_penalty_bd = bd_penalty_init
elif i_epoch < int(R['max_epoch'] / 5):
temp_penalty_bd = 10 * bd_penalty_init
elif i_epoch < int(R['max_epoch'] / 4):
temp_penalty_bd = 50 * bd_penalty_init
elif i_epoch < int(R['max_epoch'] / 2):
temp_penalty_bd = 100 * bd_penalty_init
elif i_epoch < int(3 * R['max_epoch'] / 4):
temp_penalty_bd = 200 * bd_penalty_init
else:
temp_penalty_bd = 500 * bd_penalty_init
elif R['activate_penalty2bd_increase'] == 2:
if i_epoch < int(R['max_epoch'] / 10):
temp_penalty_bd = 5*bd_penalty_init
elif i_epoch < int(R['max_epoch'] / 5):
temp_penalty_bd = 1 * bd_penalty_init
elif i_epoch < int(R['max_epoch'] / 4):
temp_penalty_bd = 0.5 * bd_penalty_init
elif i_epoch < int(R['max_epoch'] / 2):
temp_penalty_bd = 0.1 * bd_penalty_init
elif i_epoch < int(3 * R['max_epoch'] / 4):
temp_penalty_bd = 0.05 * bd_penalty_init
else:
temp_penalty_bd = 0.02 * bd_penalty_init
else:
temp_penalty_bd = bd_penalty_init
_, loss_it_tmp, loss_bd_tmp, loss_tmp, train_mse_tmp, train_rel_tmp, pwb = sess.run(
[train_my_loss, loss_it, loss_bd, loss, train_mse, train_rel, PWB],
feed_dict={XYZS_it: xyzs_it_batch, XYZS00: xyzs00_batch, XYZS01: xyzs01_batch,
XYZS10: xyzs10_batch, XYZS11: xyzs11_batch, XYZS20: xyzs20_batch,
XYZS21: xyzs21_batch, XYZS30: xyzs30_batch, XYZS31: xyzs31_batch, in_learning_rate: tmp_lr,
boundary_penalty: temp_penalty_bd})
loss_it_all.append(loss_it_tmp)
loss_bd_all.append(loss_bd_tmp)
loss_all.append(loss_tmp)
train_mse_all.append(train_mse_tmp)
train_rel_all.append(train_rel_tmp)
if i_epoch % 1000 == 0:
run_times = time.time() - t0
DNN_Log_Print.print_and_log_train_one_epoch(
i_epoch, run_times, tmp_lr, temp_penalty_bd, pwb, loss_it_tmp, loss_bd_tmp, loss_tmp, train_mse_tmp,
train_rel_tmp, log_out=log_fileout)
# --------------------------- test network ----------------------------------------------
test_epoch.append(i_epoch / 1000)
if R['PDE_type'] == 'general_laplace' or R['PDE_type'] == 'pLaplace' or R['PDE_type'] == 'Possion_Boltzmann':
u_true2test, u_nn2test = sess.run(
[U_true, UNN2test], feed_dict={XYZS_it: test_xyzs_bach})
else:
u_true2test = u_true
u_nn2test = sess.run(UNN2test, feed_dict={XYZS_it: test_xyzs_bach})
point_square_error = np.square(u_true2test - u_nn2test)
mse2test = np.mean(point_square_error)
test_mse_all.append(mse2test)
res2test = mse2test / np.mean(np.square(u_true2test))
test_rel_all.append(res2test)
DNN_Log_Print.print_and_log_test_one_epoch(mse2test, res2test, log_out=log_fileout)
# ------------------- save the testing results into mat file and plot them -------------------------
saveData.save_trainLoss2mat_1actFunc(loss_it_all, loss_bd_all, loss_all, actName=act_func,
outPath=R['FolderName'])
saveData.save_train_MSE_REL2mat(train_mse_all, train_rel_all, actName=act_func, outPath=R['FolderName'])
plotData.plotTrain_loss_1act_func(loss_it_all, lossType='loss_it', seedNo=R['seed'], outPath=R['FolderName'])
plotData.plotTrain_loss_1act_func(loss_bd_all, lossType='loss_bd', seedNo=R['seed'], outPath=R['FolderName'],
yaxis_scale=True)
plotData.plotTrain_loss_1act_func(loss_all, lossType='loss', seedNo=R['seed'], outPath=R['FolderName'])
saveData.save_train_MSE_REL2mat(train_mse_all, train_rel_all, actName=act_func, outPath=R['FolderName'])
plotData.plotTrain_MSE_REL_1act_func(train_mse_all, train_rel_all, actName=act_func, seedNo=R['seed'],
outPath=R['FolderName'], yaxis_scale=True)
# ---------------------- save testing results to mat files, then plot them --------------------------------
saveData.save_2testSolus2mat(u_true2test, u_nn2test, actName='utrue', actName1=act_func, outPath=R['FolderName'])
# ็ปๅถ่งฃ็็ญๅๅพ(็่งฃๅDNN่งฃ)
plotData.plot_Hot_solution2test(u_true2test, size_vec2mat=size2test, actName='Utrue', seedNo=R['seed'],
outPath=R['FolderName'])
plotData.plot_Hot_solution2test(u_nn2test, size_vec2mat=size2test, actName=act_func, seedNo=R['seed'],
outPath=R['FolderName'])
saveData.save_testMSE_REL2mat(test_mse_all, test_rel_all, actName=act_func, outPath=R['FolderName'])
plotData.plotTest_MSE_REL(test_mse_all, test_rel_all, test_epoch, actName=act_func,
seedNo=R['seed'], outPath=R['FolderName'], yaxis_scale=True)
saveData.save_test_point_wise_err2mat(point_square_error, actName=act_func, outPath=R['FolderName'])
plotData.plot_Hot_point_wise_err(point_square_error, size_vec2mat=size2test, actName=act_func,
seedNo=R['seed'], outPath=R['FolderName'])
if __name__ == "__main__":
R={}
# -------------------------------------- CPU or GPU ้ๆฉ -----------------------------------------------
R['gpuNo'] = 0
if platform.system() == 'Windows':
os.environ["CDUA_VISIBLE_DEVICES"] = "%s" % (R['gpuNo'])
else:
print('-------------------------------------- linux -----------------------------------------------')
# Linux็ป็ซฏๆฒกๆGUI, ้่ฆๆทปๅ ๅฆไธไปฃ็ ๏ผ่ไธๅฟ
้กปๆทปๅ ๅจ import matplotlib.pyplot ไนๅ๏ผๅฆๅๆ ๆใ
matplotlib.use('Agg')
if tf.test.is_gpu_available():
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3" # ่ฎพ็ฝฎๅฝๅไฝฟ็จ็GPU่ฎพๅคไป
ไธบ็ฌฌ 0,1,2,3 ๅGPU, ่ฎพๅคๅ็งฐไธบ'/gpu:0'
else:
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# ๆไปถไฟๅญ่ทฏๅพ่ฎพ็ฝฎ
# store_file = 'Laplace4D'
store_file = 'pLaplace4D'
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
OUT_DIR = os.path.join(BASE_DIR, store_file)
if not os.path.exists(OUT_DIR):
print('---------------------- OUT_DIR ---------------------:', OUT_DIR)
os.mkdir(OUT_DIR)
R['seed'] = np.random.randint(1e5)
seed_str = str(R['seed']) # int ๅ่ฝฌไธบๅญ็ฌฆไธฒๅ
FolderName = os.path.join(OUT_DIR, seed_str) # ่ทฏๅพ่ฟๆฅ
R['FolderName'] = FolderName
if not os.path.exists(FolderName):
print('--------------------- FolderName -----------------:', FolderName)
os.mkdir(FolderName)
# ---------------------------------------- ๅคๅถๅนถไฟๅญๅฝๅๆไปถ -----------------------------------------
if platform.system() == 'Windows':
tf.compat.v1.reset_default_graph()
shutil.copy(__file__, '%s/%s' % (FolderName, os.path.basename(__file__)))
else:
shutil.copy(__file__, '%s/%s' % (FolderName, os.path.basename(__file__)))
# if the value of step_stop_flag is not 0, it will activate stop condition of step to kill program
step_stop_flag = input('please input an integer number to activate step-stop----0:no---!0:yes--:')
R['activate_stop'] = int(step_stop_flag)
# if the value of step_stop_flag is not 0, it will activate stop condition of step to kill program
R['max_epoch'] = 200000
if 0 != R['activate_stop']:
epoch_stop = input('please input a stop epoch:')
R['max_epoch'] = int(epoch_stop)
# ---------------------------- Setup of multi-scale problem-------------------------------
R['input_dim'] = 4 # ่พๅ
ฅ็ปดๆฐ๏ผๅณ้ฎ้ข็็ปดๆฐ(ๅ ๅ
้ฎ้ข)
R['output_dim'] = 1 # ่พๅบ็ปดๆฐ
if store_file == 'Laplace4D':
R['PDE_type'] = 'general_Laplace'
R['equa_name'] = 'PDE1'
# R['equa_name'] = 'PDE2'
# R['equa_name'] = 'PDE3'
# R['equa_name'] = 'PDE4'
# R['equa_name'] = 'PDE5'
# R['equa_name'] = 'PDE6'
# R['equa_name'] = 'PDE7'
elif store_file == 'pLaplace4D':
R['PDE_type'] = 'pLaplace'
# R['equa_name'] = 'multi_scale4D_1'
# R['equa_name'] = 'multi_scale4D_2'
# R['equa_name'] = 'multi_scale4D_5'
# R['equa_name'] = 'multi_scale4D_6'
R['equa_name'] = 'multi_scale4D_7'
elif store_file == 'Boltzmann4D':
R['PDE_type'] = 'Possion_Boltzmann'
# R['equa_name'] = 'Boltzmann1'
R['equa_name'] = 'Boltzmann2'
if R['PDE_type'] == 'general_Laplace':
R['mesh_number'] = 1
R['epsilon'] = 0.1
R['order2pLaplace_operator'] = 2
R['batch_size2interior'] = 8000 # ๅ
้จ่ฎญ็ปๆฐๆฎ็ๆนๅคงๅฐ
R['batch_size2boundary'] = 1250
elif R['PDE_type'] == 'pLaplace':
R['mesh_number'] = 1
R['epsilon'] = 0.1
R['order2pLaplace_operator'] = 2
R['batch_size2interior'] = 8000 # ๅ
้จ่ฎญ็ปๆฐๆฎ็ๆนๅคงๅฐ
R['batch_size2boundary'] = 1250
# ---------------------------- Setup of DNN -------------------------------
# ่ฃ
่ฝฝๆต่ฏๆฐๆฎๆจกๅผ
R['testData_model'] = 'loadData'
# R['testData_model'] = 'random_generate'
# R['loss_type'] = 'L2_loss' # loss็ฑปๅ:L2 loss
R['loss_type'] = 'variational_loss' # loss็ฑปๅ:PDEๅๅ
R['optimizer_name'] = 'Adam' # ไผๅๅจ
R['learning_rate'] = 2e-4 # ๅญฆไน ็
R['learning_rate_decay'] = 5e-5 # ๅญฆไน ็ decay
R['train_model'] = 'union_training'
# R['train_model'] = 'group2_training'
# R['train_model'] = 'group3_training'
# ๆญฃๅๅๆ้ๅๅ็ฝฎ็ๆจกๅผ
R['regular_wb_model'] = 'L0'
# R['regular_wb_model'] = 'L1'
# R['regular_wb_model'] = 'L2'
R['penalty2weight_biases'] = 0.000 # Regularization parameter for weights
# R['penalty2weight_biases'] = 0.001 # Regularization parameter for weights
# R['penalty2weight_biases'] = 0.0025 # Regularization parameter for weights
# ่พน็็ๆฉ็ฝๅค็ๆนๅผ,ไปฅๅ่พน็็ๆฉ็ฝๅ ๅญ
R['activate_penalty2bd_increase'] = 1
# R['init_boundary_penalty'] = 1000 # Regularization parameter for boundary conditions
R['init_boundary_penalty'] = 100 # Regularization parameter for boundary conditions
# ็ฝ็ป็้ข็่ๅด่ฎพ็ฝฎ
R['freqs'] = np.concatenate(([1], np.arange(1, 100 - 1)), axis=0)
# &&&&&&&&&&&&&&&&&&& ไฝฟ็จ็็ฝ็ปๆจกๅ &&&&&&&&&&&&&&&&&&&&&&&&&&&
# R['model'] = 'DNN'
# R['model'] = 'DNN_scale'
# R['model'] = 'DNN_adapt_scale'
R['model'] = 'DNN_FourierBase'
# R['model'] = 'DNN_Sin+Cos_Base'
# &&&&&&&&&&&&&&&&&&&&&& ้่ๅฑ็ๅฑๆฐๅๆฏๅฑ็ฅ็ปๅ
ๆฐ็ฎ &&&&&&&&&&&&&&&&&&&&&&&&&&&&
if R['model'] == 'DNN_FourierBase':
R['hidden_layers'] = (250, 400, 400, 300, 300, 200) # 250+500*400+400*400+400*300+300*300+300*200+200=630450
else:
# R['hidden_layers'] = (100, 10, 8, 6, 4) # ๆต่ฏ
# R['hidden_layers'] = (100, 80, 60, 60, 40, 40, 20)
# R['hidden_layers'] = (200, 100, 80, 50, 30)
# R['hidden_layers'] = (250, 400, 400, 300, 300, 200) # 250+500*400+400*400+400*300+300*300+300*200+200=630450
R['hidden_layers'] = (500, 400, 400, 300, 300, 200) # 500+500*400+400*400+400*300+300*300+300*200+200=630700
# R['hidden_layers'] = (500, 400, 300, 300, 200, 100)
# R['hidden_layers'] = (500, 400, 300, 200, 200, 100)
# &&&&&&&&&&&&&&&&&&& ๆฟๆดปๅฝๆฐ็้ๆฉ &&&&&&&&&&&&&&&&&&&&&&&&&&&&&
# R['activate_func'] = 'relu'
# R['activate_func'] = 'tanh'
# R['activate_func']' = leaky_relu'
# R['activate_func'] = 'srelu'
R['activate_func'] = 's2relu'
# R['activate_func'] = 'elu'
# R['activate_func'] = 'phi'
R['sfourier'] = 1.0
if R['model'] == 'DNN_FourierBase' and R['activate_func'] == 'tanh':
R['sfourier'] = 0.5
# R['sfourier'] = 1.0
elif R['model'] == 'DNN_FourierBase' and R['activate_func'] == 's2relu':
R['sfourier'] = 0.5
solve_Multiscale_PDE(R)
| 49.534202
| 126
| 0.579108
|
2e2d007585de13b95883c5aae3b81f29c9080021
| 171
|
py
|
Python
|
repos/spiketoolkit/spiketoolkit/comparison/__init__.py
|
tjd2002/spikeforest2
|
2e393564b858b2995aa2ccccd9bd73065681b5de
|
[
"Apache-2.0"
] | null | null | null |
repos/spiketoolkit/spiketoolkit/comparison/__init__.py
|
tjd2002/spikeforest2
|
2e393564b858b2995aa2ccccd9bd73065681b5de
|
[
"Apache-2.0"
] | null | null | null |
repos/spiketoolkit/spiketoolkit/comparison/__init__.py
|
tjd2002/spikeforest2
|
2e393564b858b2995aa2ccccd9bd73065681b5de
|
[
"Apache-2.0"
] | null | null | null |
from .sortingcomparison import SortingComparison, MappedSortingExtractor, compute_performance, confusion_matrix
from .multisortingcomparison import MultiSortingComparison
| 57
| 111
| 0.906433
|
5fcfe7b1168b504b2d2b891eb1101dd0ed2c33f6
| 13,543
|
py
|
Python
|
detectron2/modeling/meta_arch/rcnn.py
|
dlalsrl203/detectron2-1
|
bcfd861f77683f25e12761a07103145a3dd3b82c
|
[
"Apache-2.0"
] | null | null | null |
detectron2/modeling/meta_arch/rcnn.py
|
dlalsrl203/detectron2-1
|
bcfd861f77683f25e12761a07103145a3dd3b82c
|
[
"Apache-2.0"
] | null | null | null |
detectron2/modeling/meta_arch/rcnn.py
|
dlalsrl203/detectron2-1
|
bcfd861f77683f25e12761a07103145a3dd3b82c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
from typing import Dict, List, Optional, Tuple
import torch
from torch import nn
from detectron2.config import configurable
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.structures import ImageList, Instances
from detectron2.utils.events import get_event_storage
from detectron2.utils.logger import log_first_n
from ..backbone import Backbone, build_backbone
from ..postprocessing import detector_postprocess
from ..proposal_generator import build_proposal_generator
from ..roi_heads import build_roi_heads
from .build import META_ARCH_REGISTRY
__all__ = ["GeneralizedRCNN", "ProposalNetwork"]
@META_ARCH_REGISTRY.register()
class GeneralizedRCNN(nn.Module):
"""
Generalized R-CNN. Any models that contains the following three components:
1. Per-image feature extraction (aka backbone)
2. Region proposal generation
3. Per-region feature extraction and prediction
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
proposal_generator: nn.Module,
roi_heads: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
input_format: Optional[str] = None,
vis_period: int = 0,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
proposal_generator: a module that generates proposals using backbone features
roi_heads: a ROI head that performs per-region computation
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
input_format: describe the meaning of channels of input. Needed by visualization
vis_period: the period to run visualization. Set to 0 to disable.
"""
super().__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.roi_heads = roi_heads
self.input_format = input_format
self.vis_period = vis_period
if vis_period > 0:
assert input_format is not None, "input_format is required for visualization!"
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"roi_heads": build_roi_heads(cfg, backbone.output_shape()),
"input_format": cfg.INPUT.FORMAT,
"vis_period": cfg.VIS_PERIOD,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
}
@property
def device(self):
return self.pixel_mean.device
def visualize_training(self, batched_inputs, proposals):
"""
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 top-scoring predicted
object proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
"""
from detectron2.utils.visualizer import Visualizer
storage = get_event_storage()
max_vis_prop = 20
for input, prop in zip(batched_inputs, proposals):
img = input["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
anno_img = v_gt.get_image()
box_size = min(len(prop.proposal_boxes), max_vis_prop)
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
)
prop_img = v_pred.get_image()
vis_img = np.concatenate((anno_img, prop_img), axis=1)
vis_img = vis_img.transpose(2, 0, 1)
vis_name = "Left: GT bounding boxes; Right: Predicted proposals"
storage.put_image(vis_name, vis_img)
break # only visualize one image in a batch
def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "instances" whose value is a :class:`Instances`.
The :class:`Instances` object has the following keys:
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
"""
if not self.training:
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
if self.proposal_generator is not None:
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
proposal_losses = {}
_, detector_losses = self.roi_heads(images, features, proposals, gt_instances)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
self.visualize_training(batched_inputs, proposals)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def inference(
self,
batched_inputs: List[Dict[str, torch.Tensor]],
detected_instances: Optional[List[Instances]] = None,
do_postprocess: bool = True,
):
"""
Run inference on the given inputs.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or list[Instances]): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
do_postprocess (bool): whether to apply post-processing on the outputs.
Returns:
When do_postprocess=True, same as in :meth:`forward`.
Otherwise, a list[Instances] containing raw network outputs.
"""
assert not self.training
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
if detected_instances is None:
if self.proposal_generator is not None:
proposals, _ = self.proposal_generator(images, features, None)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
results, _ = self.roi_heads(images, features, proposals, None)
else:
detected_instances = [x.to(self.device) for x in detected_instances]
results = self.roi_heads.forward_with_given_boxes(features, detected_instances)
if do_postprocess:
assert not torch.jit.is_scripting(), "Scripting is not supported for postprocess."
return GeneralizedRCNN._postprocess(results, batched_inputs, images.image_sizes)
else:
return results
def preprocess_image(self, batched_inputs: List[Dict[str, torch.Tensor]]):
"""
Normalize, pad and batch the input images.
##
์
๋ ฅ ์ด๋ฏธ์ง๋ฅผ ์ ๊ทํ, ํจ๋ฉ ๋ฐ ๋ฐฐ์นํฉ๋๋ค.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
# ์ ๊ทํ : ๊ฐ ์ฐจ์์ ๋ฐ์ดํฐ๊ฐ ๋์ผํ ๋ฒ์ ๋ด์ ๊ฐ์ ๊ฐ๋๋ก ํ๋ ์ ์ฒ๋ฆฌ ๊ธฐ๋ฒ
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
return images
@staticmethod
def _postprocess(instances, batched_inputs: List[Dict[str, torch.Tensor]], image_sizes):
"""
Rescale the output instances to the target size.
"""
# note: private function; subject to changes
processed_results = []
for results_per_image, input_per_image, image_size in zip(
instances, batched_inputs, image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
@META_ARCH_REGISTRY.register()
class ProposalNetwork(nn.Module):
"""
A meta architecture that only predicts object proposals.
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
proposal_generator: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
proposal_generator: a module that generates proposals using backbone features
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
"""
super().__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
}
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
Same as in :class:`GeneralizedRCNN.forward`
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "proposals" whose value is a
:class:`Instances` with keys "proposal_boxes" and "objectness_logits".
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
features = self.backbone(images.tensor)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10
)
gt_instances = [x["targets"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
# In training, the proposals are not useful at all but we generate them anyway.
# This makes RPN-only models about 5% slower.
if self.training:
return proposal_losses
processed_results = []
for results_per_image, input_per_image, image_size in zip(
proposals, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"proposals": r})
return processed_results
| 40.915408
| 98
| 0.634424
|
68f180473fd917daac583d8d9242de3f78858f8b
| 1,390
|
py
|
Python
|
tests/fields/date_field_test.py
|
binary-butterfly/wtfjson
|
551ad07c895ce3c94ac3015b6b5ecc2102599b56
|
[
"MIT"
] | null | null | null |
tests/fields/date_field_test.py
|
binary-butterfly/wtfjson
|
551ad07c895ce3c94ac3015b6b5ecc2102599b56
|
[
"MIT"
] | 1
|
2021-10-11T08:55:45.000Z
|
2021-10-11T08:55:45.000Z
|
tests/fields/date_field_test.py
|
binary-butterfly/wtfjson
|
551ad07c895ce3c94ac3015b6b5ecc2102599b56
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
"""
binary butterfly validator
Copyright (c) 2021, binary butterfly GmbH
Use of this source code is governed by an MIT-style license that can be found in the LICENSE.txt.
"""
from datetime import date
from unittest import TestCase
from wtfjson import DictInput
from wtfjson.fields import DateField
class DateDictInput(DictInput):
test_field = DateField()
class DateFieldTest(TestCase):
def test_success(self):
form = DateDictInput(data={'test_field': '2020-10-01'})
assert form.validate() is True
assert form.has_errors is False
assert form.errors == {}
assert form.out == {'test_field': date(2020, 10, 1)}
def test_invalid_type(self):
form = DateDictInput(data={'test_field': 1})
assert form.validate() is False
assert form.has_errors is True
assert form.errors == {'test_field': ['invalid type']}
def test_invalid_format(self):
form = DateDictInput(data={'test_field': '2020-20-33'})
assert form.validate() is False
assert form.has_errors is True
assert form.errors == {'test_field': ['invalid date']}
def test_invalid_date(self):
form = DateDictInput(data={'test_field': '2020-20-33'})
assert form.validate() is False
assert form.has_errors is True
assert form.errors == {'test_field': ['invalid date']}
| 31.590909
| 97
| 0.669784
|
b78e7248499b693fb4b50310748b9ef8a9eecb04
| 1,735
|
py
|
Python
|
tests/test_rabbit_context.py
|
uk-gov-mirror/ONSdigital.census-rm-qid-batch-runner
|
158ab77260eb939a15e064ecb53040bd96180e5c
|
[
"MIT"
] | null | null | null |
tests/test_rabbit_context.py
|
uk-gov-mirror/ONSdigital.census-rm-qid-batch-runner
|
158ab77260eb939a15e064ecb53040bd96180e5c
|
[
"MIT"
] | 39
|
2019-05-14T07:23:06.000Z
|
2021-03-05T11:50:56.000Z
|
tests/test_rabbit_context.py
|
uk-gov-mirror/ONSdigital.census-rm-qid-batch-runner
|
158ab77260eb939a15e064ecb53040bd96180e5c
|
[
"MIT"
] | 1
|
2021-04-11T07:46:44.000Z
|
2021-04-11T07:46:44.000Z
|
from unittest import TestCase
from unittest.mock import patch
from pika.spec import PERSISTENT_DELIVERY_MODE
from rabbit_context import RabbitContext, RabbitConnectionClosedError
@patch('rabbit_context.pika')
class TestRabbitContext(TestCase):
def test_context_manager_opens_connection_and_channel(self, patch_pika):
with RabbitContext():
patch_pika.BlockingConnection.assert_called_once()
patch_pika.BlockingConnection.return_value.channel.assert_called_once()
def test_context_manager_closes_connection(self, patch_pika):
with RabbitContext():
pass
patch_pika.BlockingConnection.return_value.close.assert_called_once()
def test_attempt_to_publish_message_with_closed_connection_raises_correct_exception(self, patch_pika):
with RabbitContext() as rabbit:
pass
with self.assertRaises(RabbitConnectionClosedError):
rabbit.publish_message('This should raise an exception', 'text')
def test_publish_message(self, patch_pika):
with RabbitContext() as rabbit:
rabbit.publish_message('Test message body', 'text')
patch_pika.BasicProperties.assert_called_once_with(content_type='text', delivery_mode=PERSISTENT_DELIVERY_MODE)
patched_basic_publish = patch_pika.BlockingConnection.return_value.channel.return_value.basic_publish
patched_basic_publish.assert_called_once_with(exchange=rabbit._exchange,
routing_key=rabbit.queue_name,
body='Test message body',
properties=patch_pika.BasicProperties.return_value)
| 44.487179
| 119
| 0.705476
|
3bec3219f072b8defd270fae45b19ed32ec142dc
| 2,900
|
py
|
Python
|
echoAI/Activation/Torch/srelu.py
|
Venkateshwar2506/Echo
|
5d236b25ee4900754f48e0a865e1bf1ae9183875
|
[
"MIT"
] | null | null | null |
echoAI/Activation/Torch/srelu.py
|
Venkateshwar2506/Echo
|
5d236b25ee4900754f48e0a865e1bf1ae9183875
|
[
"MIT"
] | null | null | null |
echoAI/Activation/Torch/srelu.py
|
Venkateshwar2506/Echo
|
5d236b25ee4900754f48e0a865e1bf1ae9183875
|
[
"MIT"
] | null | null | null |
"""
Script defined the SReLU (S-shaped Rectified Linear Activation Unit):
.. math::
h(x_i) = \\left\\{\\begin{matrix} t_i^r + a_i^r(x_i - t_i^r), x_i \\geq t_i^r \\\\ x_i, t_i^r > x_i > t_i^l\\\\ t_i^l + a_i^l(x_i - t_i^l), x_i \\leq t_i^l \\\\ \\end{matrix}\\right.
See SReLU paper:
https://arxiv.org/pdf/1512.07030.pdf
"""
# import pytorch
import torch
from torch import nn
from torch.nn.parameter import Parameter
class SReLU(nn.Module):
"""
SReLU (S-shaped Rectified Linear Activation Unit): a combination of three linear functions, which perform mapping R โ R with the following formulation:
.. math::
h(x_i) = \\left\\{\\begin{matrix} t_i^r + a_i^r(x_i - t_i^r), x_i \\geq t_i^r \\\\ x_i, t_i^r > x_i > t_i^l\\\\ t_i^l + a_i^l(x_i - t_i^l), x_i \\leq t_i^l \\\\ \\end{matrix}\\right.
with 4 trainable parameters.
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Parameters:
.. math:: \\{t_i^r, a_i^r, t_i^l, a_i^l\\}
4 trainable parameters, which model an individual SReLU activation unit. The subscript i indicates that we allow SReLU to vary in different channels. Parameters can be initialized manually or randomly.
References:
- See SReLU paper:
https://arxiv.org/pdf/1512.07030.pdf
Examples:
>>> srelu_activation = srelu((2,2))
>>> t = torch.randn((2,2), dtype=torch.float, requires_grad = True)
>>> output = srelu_activation(t)
"""
def __init__(self, in_features, parameters=None):
"""
Initialization.
INPUT:
- in_features: shape of the input
- parameters: (tr, tl, ar, al) parameters for manual initialization, default value is None. If None is passed, parameters are initialized randomly.
"""
super(SReLU, self).__init__()
self.in_features = in_features
if parameters is None:
self.tr = Parameter(
torch.randn(in_features, dtype=torch.float, requires_grad=True)
)
self.tl = Parameter(
torch.randn(in_features, dtype=torch.float, requires_grad=True)
)
self.ar = Parameter(
torch.randn(in_features, dtype=torch.float, requires_grad=True)
)
self.al = Parameter(
torch.randn(in_features, dtype=torch.float, requires_grad=True)
)
else:
self.tr, self.tl, self.ar, self.al = parameters
def forward(self, x):
"""
Forward pass of the function
"""
return (
(x >= self.tr).float() * (self.tr + self.ar * (x + self.tr))
+ (x < self.tr).float() * (x > self.tl).float() * x
+ (x <= self.tl).float() * (self.tl + self.al * (x + self.tl))
)
| 34.52381
| 205
| 0.577586
|
f07dc6896cc8510710e1cc96bf7ae49b00859569
| 37,413
|
py
|
Python
|
swift/common/internal_client.py
|
naototty/swift
|
af373a9cbca5f084f1d0f215b82a579f76da5089
|
[
"Apache-2.0"
] | null | null | null |
swift/common/internal_client.py
|
naototty/swift
|
af373a9cbca5f084f1d0f215b82a579f76da5089
|
[
"Apache-2.0"
] | null | null | null |
swift/common/internal_client.py
|
naototty/swift
|
af373a9cbca5f084f1d0f215b82a579f76da5089
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from eventlet import sleep, Timeout
from eventlet.green import httplib, socket
import json
import six
from six.moves import range
from six.moves import urllib
import struct
from sys import exc_info, exit
import zlib
from time import gmtime, strftime, time
from zlib import compressobj
from swift.common.exceptions import ClientException
from swift.common.http import (HTTP_NOT_FOUND, HTTP_MULTIPLE_CHOICES,
is_server_error)
from swift.common.swob import Request, bytes_to_wsgi
from swift.common.utils import quote, closing_if_possible
from swift.common.wsgi import loadapp, pipeline_property
if six.PY3:
from eventlet.green.urllib import request as urllib2
else:
from eventlet.green import urllib2
class UnexpectedResponse(Exception):
"""
Exception raised on invalid responses to InternalClient.make_request().
:param message: Exception message.
:param resp: The unexpected response.
"""
def __init__(self, message, resp):
super(UnexpectedResponse, self).__init__(message)
self.resp = resp
class CompressingFileReader(object):
"""
Wrapper for file object to compress object while reading.
Can be used to wrap file objects passed to InternalClient.upload_object().
Used in testing of InternalClient.
:param file_obj: File object to wrap.
:param compresslevel: Compression level, defaults to 9.
:param chunk_size: Size of chunks read when iterating using object,
defaults to 4096.
"""
def __init__(self, file_obj, compresslevel=9, chunk_size=4096):
self._f = file_obj
self.compresslevel = compresslevel
self.chunk_size = chunk_size
self.set_initial_state()
def set_initial_state(self):
"""
Sets the object to the state needed for the first read.
"""
self._f.seek(0)
self._compressor = compressobj(
self.compresslevel, zlib.DEFLATED, -zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL, 0)
self.done = False
self.first = True
self.crc32 = 0
self.total_size = 0
def read(self, *a, **kw):
"""
Reads a chunk from the file object.
Params are passed directly to the underlying file object's read().
:returns: Compressed chunk from file object.
"""
if self.done:
return b''
x = self._f.read(*a, **kw)
if x:
self.crc32 = zlib.crc32(x, self.crc32) & 0xffffffff
self.total_size += len(x)
compressed = self._compressor.compress(x)
if not compressed:
compressed = self._compressor.flush(zlib.Z_SYNC_FLUSH)
else:
compressed = self._compressor.flush(zlib.Z_FINISH)
crc32 = struct.pack("<L", self.crc32 & 0xffffffff)
size = struct.pack("<L", self.total_size & 0xffffffff)
footer = crc32 + size
compressed += footer
self.done = True
if self.first:
self.first = False
header = b'\037\213\010\000\000\000\000\000\002\377'
compressed = header + compressed
return compressed
def __iter__(self):
return self
def __next__(self):
chunk = self.read(self.chunk_size)
if chunk:
return chunk
raise StopIteration
next = __next__
def seek(self, offset, whence=0):
if not (offset == 0 and whence == 0):
raise NotImplementedError('Seek implemented on offset 0 only')
self.set_initial_state()
class InternalClient(object):
"""
An internal client that uses a swift proxy app to make requests to Swift.
This client will exponentially slow down for retries.
:param conf_path: Full path to proxy config.
:param user_agent: User agent to be sent to requests to Swift.
:param request_tries: Number of tries before InternalClient.make_request()
gives up.
"""
def __init__(self, conf_path, user_agent, request_tries,
allow_modify_pipeline=False):
if request_tries < 1:
raise ValueError('request_tries must be positive')
self.app = loadapp(conf_path,
allow_modify_pipeline=allow_modify_pipeline)
self.user_agent = user_agent
self.request_tries = request_tries
get_object_ring = pipeline_property('get_object_ring')
container_ring = pipeline_property('container_ring')
account_ring = pipeline_property('account_ring')
auto_create_account_prefix = pipeline_property(
'auto_create_account_prefix', default='.')
def make_request(
self, method, path, headers, acceptable_statuses, body_file=None,
params=None):
"""Makes a request to Swift with retries.
:param method: HTTP method of request.
:param path: Path of request.
:param headers: Headers to be sent with request.
:param acceptable_statuses: List of acceptable statuses for request.
:param body_file: Body file to be passed along with request,
defaults to None.
:param params: A dict of params to be set in request query string,
defaults to None.
:returns: Response object on success.
:raises UnexpectedResponse: Exception raised when make_request() fails
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = dict(headers)
headers['user-agent'] = self.user_agent
for attempt in range(self.request_tries):
resp = exc_type = exc_value = exc_traceback = None
req = Request.blank(
path, environ={'REQUEST_METHOD': method}, headers=headers)
if body_file is not None:
if hasattr(body_file, 'seek'):
body_file.seek(0)
req.body_file = body_file
if params:
req.params = params
try:
resp = req.get_response(self.app)
except (Exception, Timeout):
exc_type, exc_value, exc_traceback = exc_info()
else:
if resp.status_int in acceptable_statuses or \
resp.status_int // 100 in acceptable_statuses:
return resp
elif not is_server_error(resp.status_int):
# No sense retrying when we expect the same result
break
# sleep only between tries, not after each one
if attempt < self.request_tries - 1:
if resp:
# always close any resp.app_iter before we discard it
with closing_if_possible(resp.app_iter):
# for non 2XX requests it's safe and useful to drain
# the response body so we log the correct status code
if resp.status_int // 100 != 2:
for iter_body in resp.app_iter:
pass
sleep(2 ** (attempt + 1))
if resp:
msg = 'Unexpected response: %s' % resp.status
if resp.status_int // 100 != 2 and resp.body:
# provide additional context (and drain the response body) for
# non 2XX responses
msg += ' (%s)' % resp.body
raise UnexpectedResponse(msg, resp)
if exc_type:
# To make pep8 tool happy, in place of raise t, v, tb:
six.reraise(exc_type, exc_value, exc_traceback)
def _get_metadata(
self, path, metadata_prefix='', acceptable_statuses=(2,),
headers=None, params=None):
"""
Gets metadata by doing a HEAD on a path and using the metadata_prefix
to get values from the headers returned.
:param path: Path to do HEAD on.
:param metadata_prefix: Used to filter values from the headers
returned. Will strip that prefix from the
keys in the dict returned. Defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:param headers: extra headers to send
:returns: A dict of metadata with metadata_prefix stripped from keys.
Keys will be lowercase.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = headers or {}
resp = self.make_request('HEAD', path, headers, acceptable_statuses,
params=params)
metadata_prefix = metadata_prefix.lower()
metadata = {}
for k, v in resp.headers.items():
if k.lower().startswith(metadata_prefix):
metadata[k[len(metadata_prefix):].lower()] = v
return metadata
def _iter_items(
self, path, marker='', end_marker='', prefix='',
acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Returns an iterator of items from a json listing. Assumes listing has
'name' key defined and uses markers.
:param path: Path to do GET on.
:param marker: Prefix of first desired item, defaults to ''.
:param end_marker: Last item returned will be 'less' than this,
defaults to ''.
:param prefix: Prefix of items
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
if not isinstance(marker, bytes):
marker = marker.encode('utf8')
if not isinstance(end_marker, bytes):
end_marker = end_marker.encode('utf8')
if not isinstance(prefix, bytes):
prefix = prefix.encode('utf8')
while True:
resp = self.make_request(
'GET', '%s?format=json&marker=%s&end_marker=%s&prefix=%s' %
(path, bytes_to_wsgi(quote(marker)),
bytes_to_wsgi(quote(end_marker)),
bytes_to_wsgi(quote(prefix))),
{}, acceptable_statuses)
if not resp.status_int == 200:
if resp.status_int >= HTTP_MULTIPLE_CHOICES:
b''.join(resp.app_iter)
break
data = json.loads(resp.body)
if not data:
break
for item in data:
yield item
marker = data[-1]['name'].encode('utf8')
def make_path(self, account, container=None, obj=None):
"""
Returns a swift path for a request quoting and utf-8 encoding the path
parts as need be.
:param account: swift account
:param container: container, defaults to None
:param obj: object, defaults to None
:raises ValueError: Is raised if obj is specified and container is
not.
"""
path = '/v1/%s' % quote(account)
if container:
path += '/%s' % quote(container)
if obj:
path += '/%s' % quote(obj)
elif obj:
raise ValueError('Object specified without container')
return path
def _set_metadata(
self, path, metadata, metadata_prefix='',
acceptable_statuses=(2,)):
"""
Sets metadata on path using metadata_prefix to set values in headers of
POST request.
:param path: Path to do POST on.
:param metadata: Dict of metadata to set.
:param metadata_prefix: Prefix used to set metadata values in headers
of requests, used to prefix keys in metadata
when setting metadata, defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = {}
for k, v in metadata.items():
if k.lower().startswith(metadata_prefix):
headers[k] = v
else:
headers['%s%s' % (metadata_prefix, k)] = v
self.make_request('POST', path, headers, acceptable_statuses)
# account methods
def iter_containers(
self, account, marker='', end_marker='', prefix='',
acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Returns an iterator of containers dicts from an account.
:param account: Account on which to do the container listing.
:param marker: Prefix of first desired item, defaults to ''.
:param end_marker: Last item returned will be 'less' than this,
defaults to ''.
:param prefix: Prefix of containers
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
return self._iter_items(path, marker, end_marker, prefix,
acceptable_statuses)
def get_account_info(
self, account, acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Returns (container_count, object_count) for an account.
:param account: Account on which to get the information.
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
resp = self.make_request('HEAD', path, {}, acceptable_statuses)
if not resp.status_int // 100 == 2:
return (0, 0)
return (int(resp.headers.get('x-account-container-count', 0)),
int(resp.headers.get('x-account-object-count', 0)))
def get_account_metadata(
self, account, metadata_prefix='', acceptable_statuses=(2,),
params=None):
"""Gets account metadata.
:param account: Account on which to get the metadata.
:param metadata_prefix: Used to filter values from the headers
returned. Will strip that prefix from the
keys in the dict returned. Defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:returns: Returns dict of account metadata. Keys will be lowercase.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
return self._get_metadata(path, metadata_prefix, acceptable_statuses,
headers=None, params=params)
def set_account_metadata(
self, account, metadata, metadata_prefix='',
acceptable_statuses=(2,)):
"""
Sets account metadata. A call to this will add to the account
metadata and not overwrite all of it with values in the metadata dict.
To clear an account metadata value, pass an empty string as
the value for the key in the metadata dict.
:param account: Account on which to get the metadata.
:param metadata: Dict of metadata to set.
:param metadata_prefix: Prefix used to set metadata values in headers
of requests, used to prefix keys in metadata
when setting metadata, defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
self._set_metadata(
path, metadata, metadata_prefix, acceptable_statuses)
# container methods
def container_exists(self, account, container):
"""Checks to see if a container exists.
:param account: The container's account.
:param container: Container to check.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
:returns: True if container exists, false otherwise.
"""
path = self.make_path(account, container)
resp = self.make_request('HEAD', path, {}, (2, HTTP_NOT_FOUND))
return not resp.status_int == HTTP_NOT_FOUND
def create_container(
self, account, container, headers=None, acceptable_statuses=(2,)):
"""
Creates container.
:param account: The container's account.
:param container: Container to create.
:param headers: Defaults to empty dict.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = headers or {}
path = self.make_path(account, container)
self.make_request('PUT', path, headers, acceptable_statuses)
def delete_container(
self, account, container, acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Deletes a container.
:param account: The container's account.
:param container: Container to delete.
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container)
self.make_request('DELETE', path, {}, acceptable_statuses)
def get_container_metadata(
self, account, container, metadata_prefix='',
acceptable_statuses=(2,), params=None):
"""Gets container metadata.
:param account: The container's account.
:param container: Container to get metadata on.
:param metadata_prefix: Used to filter values from the headers
returned. Will strip that prefix from the
keys in the dict returned. Defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:returns: Returns dict of container metadata. Keys will be lowercase.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container)
return self._get_metadata(path, metadata_prefix, acceptable_statuses,
params=params)
def iter_objects(
self, account, container, marker='', end_marker='', prefix='',
acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Returns an iterator of object dicts from a container.
:param account: The container's account.
:param container: Container to iterate objects on.
:param marker: Prefix of first desired item, defaults to ''.
:param end_marker: Last item returned will be 'less' than this,
defaults to ''.
:param prefix: Prefix of objects
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container)
return self._iter_items(path, marker, end_marker, prefix,
acceptable_statuses)
def set_container_metadata(
self, account, container, metadata, metadata_prefix='',
acceptable_statuses=(2,)):
"""
Sets container metadata. A call to this will add to the container
metadata and not overwrite all of it with values in the metadata dict.
To clear a container metadata value, pass an empty string as the value
for the key in the metadata dict.
:param account: The container's account.
:param container: Container to set metadata on.
:param metadata: Dict of metadata to set.
:param metadata_prefix: Prefix used to set metadata values in headers
of requests, used to prefix keys in metadata
when setting metadata, defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container)
self._set_metadata(
path, metadata, metadata_prefix, acceptable_statuses)
# object methods
def delete_object(
self, account, container, obj,
acceptable_statuses=(2, HTTP_NOT_FOUND),
headers=None):
"""
Deletes an object.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:param headers: extra headers to send with request
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container, obj)
self.make_request('DELETE', path, (headers or {}), acceptable_statuses)
def get_object_metadata(
self, account, container, obj, metadata_prefix='',
acceptable_statuses=(2,), headers=None, params=None):
"""Gets object metadata.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param metadata_prefix: Used to filter values from the headers
returned. Will strip that prefix from the
keys in the dict returned. Defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:param headers: extra headers to send with request
:returns: Dict of object metadata.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container, obj)
return self._get_metadata(path, metadata_prefix, acceptable_statuses,
headers=headers, params=params)
def get_object(self, account, container, obj, headers,
acceptable_statuses=(2,), params=None):
"""
Gets an object.
:param account: The object's account.
:param container: The object's container.
:param obj: The object name.
:param headers: Headers to send with request, defaults to empty dict.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:param params: A dict of params to be set in request query string,
defaults to None.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
:returns: A 3-tuple (status, headers, iterator of object body)
"""
headers = headers or {}
path = self.make_path(account, container, obj)
resp = self.make_request(
'GET', path, headers, acceptable_statuses, params=params)
return (resp.status_int, resp.headers, resp.app_iter)
def iter_object_lines(
self, account, container, obj, headers=None,
acceptable_statuses=(2,)):
"""
Returns an iterator of object lines from an uncompressed or compressed
text object.
Uncompress object as it is read if the object's name ends with '.gz'.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = headers or {}
path = self.make_path(account, container, obj)
resp = self.make_request('GET', path, headers, acceptable_statuses)
if not resp.status_int // 100 == 2:
return
last_part = b''
compressed = obj.endswith('.gz')
# magic in the following zlib.decompressobj argument is courtesy of
# Python decompressing gzip chunk-by-chunk
# http://stackoverflow.com/questions/2423866
d = zlib.decompressobj(16 + zlib.MAX_WBITS)
for chunk in resp.app_iter:
if compressed:
chunk = d.decompress(chunk)
parts = chunk.split(b'\n')
if len(parts) == 1:
last_part = last_part + parts[0]
else:
parts[0] = last_part + parts[0]
for part in parts[:-1]:
yield part
last_part = parts[-1]
if last_part:
yield last_part
def set_object_metadata(
self, account, container, obj, metadata,
metadata_prefix='', acceptable_statuses=(2,)):
"""
Sets an object's metadata. The object's metadata will be overwritten
by the values in the metadata dict.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param metadata: Dict of metadata to set.
:param metadata_prefix: Prefix used to set metadata values in headers
of requests, used to prefix keys in metadata
when setting metadata, defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container, obj)
self._set_metadata(
path, metadata, metadata_prefix, acceptable_statuses)
def upload_object(
self, fobj, account, container, obj, headers=None):
"""
:param fobj: File object to read object's content from.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param headers: Headers to send with request, defaults to empty dict.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = dict(headers or {})
if 'Content-Length' not in headers:
headers['Transfer-Encoding'] = 'chunked'
path = self.make_path(account, container, obj)
self.make_request('PUT', path, headers, (2,), fobj)
def get_auth(url, user, key, auth_version='1.0', **kwargs):
if auth_version != '1.0':
exit('ERROR: swiftclient missing, only auth v1.0 supported')
req = urllib2.Request(url)
req.add_header('X-Auth-User', user)
req.add_header('X-Auth-Key', key)
conn = urllib2.urlopen(req)
headers = conn.info()
return (
headers.getheader('X-Storage-Url'),
headers.getheader('X-Auth-Token'))
class SimpleClient(object):
"""
Simple client that is used in bin/swift-dispersion-* and container sync
"""
def __init__(self, url=None, token=None, starting_backoff=1,
max_backoff=5, retries=5):
self.url = url
self.token = token
self.attempts = 0 # needed in swif-dispersion-populate
self.starting_backoff = starting_backoff
self.max_backoff = max_backoff
self.retries = retries
def base_request(self, method, container=None, name=None, prefix=None,
headers=None, proxy=None, contents=None,
full_listing=None, logger=None, additional_info=None,
timeout=None, marker=None):
# Common request method
trans_start = time()
url = self.url
if full_listing:
info, body_data = self.base_request(
method, container, name, prefix, headers, proxy,
timeout=timeout, marker=marker)
listing = body_data
while listing:
marker = listing[-1]['name']
info, listing = self.base_request(
method, container, name, prefix, headers, proxy,
timeout=timeout, marker=marker)
if listing:
body_data.extend(listing)
return [info, body_data]
if headers is None:
headers = {}
if self.token:
headers['X-Auth-Token'] = self.token
if container:
url = '%s/%s' % (url.rstrip('/'), quote(container))
if name:
url = '%s/%s' % (url.rstrip('/'), quote(name))
else:
params = ['format=json']
if prefix:
params.append('prefix=%s' % prefix)
if marker:
params.append('marker=%s' % quote(marker))
url += '?' + '&'.join(params)
req = urllib2.Request(url, headers=headers, data=contents)
if proxy:
proxy = urllib.parse.urlparse(proxy)
req.set_proxy(proxy.netloc, proxy.scheme)
req.get_method = lambda: method
conn = urllib2.urlopen(req, timeout=timeout)
body = conn.read()
info = conn.info()
try:
body_data = json.loads(body)
except ValueError:
body_data = None
trans_stop = time()
if logger:
sent_content_length = 0
for n, v in headers.items():
nl = n.lower()
if nl == 'content-length':
try:
sent_content_length = int(v)
break
except ValueError:
pass
logger.debug("-> " + " ".join(
quote(str(x) if x else "-", ":/")
for x in (
strftime('%Y-%m-%dT%H:%M:%S', gmtime(trans_stop)),
method,
url,
conn.getcode(),
sent_content_length,
info['content-length'],
trans_start,
trans_stop,
trans_stop - trans_start,
additional_info
)))
return [info, body_data]
def retry_request(self, method, **kwargs):
retries = kwargs.pop('retries', self.retries)
self.attempts = 0
backoff = self.starting_backoff
while self.attempts <= retries:
self.attempts += 1
try:
return self.base_request(method, **kwargs)
except (socket.error, httplib.HTTPException, urllib2.URLError) \
as err:
if self.attempts > retries:
if isinstance(err, urllib2.HTTPError):
raise ClientException('Raise too many retries',
http_status=err.getcode())
else:
raise
sleep(backoff)
backoff = min(backoff * 2, self.max_backoff)
def get_account(self, *args, **kwargs):
# Used in swift-dispersion-populate
return self.retry_request('GET', **kwargs)
def put_container(self, container, **kwargs):
# Used in swift-dispersion-populate
return self.retry_request('PUT', container=container, **kwargs)
def get_container(self, container, **kwargs):
# Used in swift-dispersion-populate
return self.retry_request('GET', container=container, **kwargs)
def put_object(self, container, name, contents, **kwargs):
# Used in swift-dispersion-populate
return self.retry_request('PUT', container=container, name=name,
contents=contents.read(), **kwargs)
def head_object(url, **kwargs):
"""For usage with container sync """
client = SimpleClient(url=url)
return client.retry_request('HEAD', **kwargs)
def put_object(url, **kwargs):
"""For usage with container sync """
client = SimpleClient(url=url)
client.retry_request('PUT', **kwargs)
def delete_object(url, **kwargs):
"""For usage with container sync """
client = SimpleClient(url=url)
client.retry_request('DELETE', **kwargs)
| 40.056745
| 79
| 0.578515
|
a7162563d84e62a79dea2df52cd1f7aac676a27a
| 2,615
|
py
|
Python
|
src/hcl_model/transformers/structural_breaks.py
|
khrapovs/hcl-model
|
879740e6072c2ff45864040db0b8364b55de1f44
|
[
"MIT"
] | null | null | null |
src/hcl_model/transformers/structural_breaks.py
|
khrapovs/hcl-model
|
879740e6072c2ff45864040db0b8364b55de1f44
|
[
"MIT"
] | 5
|
2022-02-09T12:38:04.000Z
|
2022-02-21T15:25:06.000Z
|
src/hcl_model/transformers/structural_breaks.py
|
khrapovs/hcl-model
|
879740e6072c2ff45864040db0b8364b55de1f44
|
[
"MIT"
] | 1
|
2022-02-17T09:59:22.000Z
|
2022-02-17T09:59:22.000Z
|
from __future__ import annotations
from typing import Union
import numpy as np
import pandas as pd
import ruptures as rpt
from sklearn.base import BaseEstimator, TransformerMixin
X_TYPE = Union[pd.Series, np.ndarray]
class TargetStructuralBreakCorrectionTransformer(BaseEstimator, TransformerMixin):
def __init__(self, structural_break_correction: bool = True) -> None:
self.structural_break_correction = structural_break_correction
def fit(self, X: X_TYPE, y: pd.Series = None) -> TargetStructuralBreakCorrectionTransformer:
return self
def transform(self, X: X_TYPE) -> X_TYPE:
if self.structural_break_correction:
if isinstance(X, np.ndarray):
return self._get_series_without_structural_breaks(signal=pd.Series(X.flatten())).values
else:
return self._get_series_without_structural_breaks(signal=X)
else:
return X
@staticmethod
def inverse_transform(X: X_TYPE) -> X_TYPE:
return X
def _get_series_without_structural_breaks(self, signal: pd.Series) -> pd.Series:
change_points = self._get_change_points(y=signal)
if len(change_points) <= 1:
return signal
else:
change_points = np.concatenate((np.array([0]), change_points))
current_signal = signal[change_points[-2] : change_points[-1]]
level_current = current_signal.median()
variability_current = current_signal.std()
for past, current in zip(change_points[:-1], change_points[1:]):
variability_past = signal[past:current].std()
signal[past:current] = self._adjust_level(y=signal[past:current], level_current=level_current)
signal[past:current] = self._adjust_variability(
y=signal[past:current], variability_current=variability_current, variability_past=variability_past
)
return signal
@staticmethod
def _adjust_variability(y: pd.Series, variability_current: float, variability_past: float) -> pd.Series:
if variability_past == 0:
return y
else:
adjust_factor = variability_current / variability_past
return (y - y.mean()) * adjust_factor + y.mean()
@staticmethod
def _adjust_level(y: pd.Series, level_current: float) -> pd.Series:
return y + level_current - y.median()
@staticmethod
def _get_change_points(y: pd.Series) -> np.ndarray:
return np.array(rpt.KernelCPD(kernel="rbf", jump=1, min_size=26).fit(y.values).predict(pen=10))
| 40.230769
| 118
| 0.670746
|
ad84da7bceabb31fcd668e9ab0ba97c53c659c10
| 591
|
py
|
Python
|
common/oldTesting/streamClient.py
|
APWHY/carlsb
|
e8dd935f9c3b132bc16a0e0e982ffe9c13b8ad1c
|
[
"MIT"
] | null | null | null |
common/oldTesting/streamClient.py
|
APWHY/carlsb
|
e8dd935f9c3b132bc16a0e0e982ffe9c13b8ad1c
|
[
"MIT"
] | null | null | null |
common/oldTesting/streamClient.py
|
APWHY/carlsb
|
e8dd935f9c3b132bc16a0e0e982ffe9c13b8ad1c
|
[
"MIT"
] | null | null | null |
# simple client to send TCP messages from
# only used as a tool for manual testing
import socket
# ip = '<broadcast>'
myIP = socket.gethostbyname(socket.gethostname())
print(myIP)
# ip = "127.0.0.1"
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# soc.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
clients_input = input("What you want to send my dear client?\n")
soc.connect((myIP, 54373))
soc.send(clients_input.encode("utf8"))
result_bytes = soc.recv(4096)
result_string = result_bytes.decode("utf8")
print("Result from server is {}".format(result_string))
| 29.55
| 66
| 0.734349
|
10a31ec06284055e63a505083fc55e248ca8c504
| 1,059
|
py
|
Python
|
tests/test_get_value_digital.py
|
makaimann/fault
|
8c805415f398e64971d18fbd3014bc0b59fb38b8
|
[
"BSD-3-Clause"
] | 31
|
2018-07-16T15:03:14.000Z
|
2022-03-10T08:36:09.000Z
|
tests/test_get_value_digital.py
|
makaimann/fault
|
8c805415f398e64971d18fbd3014bc0b59fb38b8
|
[
"BSD-3-Clause"
] | 216
|
2018-07-18T20:00:34.000Z
|
2021-10-05T17:40:47.000Z
|
tests/test_get_value_digital.py
|
makaimann/fault
|
8c805415f398e64971d18fbd3014bc0b59fb38b8
|
[
"BSD-3-Clause"
] | 10
|
2019-02-17T00:56:58.000Z
|
2021-11-05T13:31:37.000Z
|
from pathlib import Path
import fault
import magma as m
from .common import pytest_sim_params
def pytest_generate_tests(metafunc):
pytest_sim_params(metafunc, 'system-verilog', 'verilator')
class MyAdder(m.Circuit):
io = m.IO(a=m.In(m.UInt[4]),
b=m.Out(m.UInt[4]))
io.b @= io.a + 1
def test_get_value_digital(target, simulator):
# define test
tester = fault.Tester(MyAdder)
# provide stimulus
stim = list(range(16))
output = []
for a in stim:
tester.poke(MyAdder.a, a)
tester.eval()
output.append(tester.get_value(MyAdder.b))
# run the test
kwargs = dict(
target=target,
tmp_dir=True
)
if target == 'system-verilog':
kwargs['simulator'] = simulator
elif target == 'verilator':
kwargs['flags'] = ['-Wno-fatal']
tester.compile_and_run(**kwargs)
# check the results
def model(a):
return (a + 1) % 16
for a, b_meas in zip(stim, output):
b_expct = model(a)
assert b_meas.value == b_expct
| 22.531915
| 62
| 0.608121
|
c5148a9bda2c3d7c62714f56a756948c563743b9
| 2,921
|
py
|
Python
|
config.py
|
tifmusic/tifmusic
|
3ae8e4d71ad7985f320403e058e25afe2481cb6c
|
[
"MIT"
] | 2
|
2021-08-17T08:16:26.000Z
|
2021-08-24T17:22:26.000Z
|
config.py
|
tifmusic/tifmusic
|
3ae8e4d71ad7985f320403e058e25afe2481cb6c
|
[
"MIT"
] | null | null | null |
config.py
|
tifmusic/tifmusic
|
3ae8e4d71ad7985f320403e058e25afe2481cb6c
|
[
"MIT"
] | 2
|
2021-08-25T04:46:50.000Z
|
2021-08-31T02:24:55.000Z
|
#MIT License
#Copyright (c) 2021 SUBIN
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import os
import re
from youtube_dl import YoutubeDL
ydl_opts = {
"geo-bypass": True,
"nocheckcertificate": True
}
ydl = YoutubeDL(ydl_opts)
links=[]
finalurl=""
C_PLAY=False
STREAM=os.environ.get("STREAM_URL", "https://bcovlive-a.akamaihd.net/19b535b7499a4719a5c19e043063f5d9/ap-southeast-1/6034685947001/playlist.m3u8?nocache=825347")
regex = r"^(https?\:\/\/)?(www\.youtube\.com|youtu\.?be)\/.+"
match = re.match(regex,STREAM)
regex_ = r"http.*"
match_ = re.match(regex_,STREAM)
if match:
meta = ydl.extract_info(STREAM, download=False)
formats = meta.get('formats', [meta])
for f in formats:
links.append(f['url'])
finalurl=links[0]
elif match_:
finalurl=STREAM
else:
C_PLAY=True
finalurl=STREAM
class Config:
ADMIN = os.environ.get("ADMINS", '')
ADMINS = [int(admin) if re.search('^\d+$', admin) else admin for admin in (ADMIN).split()]
API_ID = int(os.environ.get("API_ID", ''))
CHAT = int(os.environ.get("CHAT", ""))
LOG_GROUP=os.environ.get("LOG_GROUP", "")
if LOG_GROUP:
LOG_GROUP=int(LOG_GROUP)
else:
LOG_GROUP=None
STREAM_URL=finalurl
CPLAY=C_PLAY
SHUFFLE=bool(os.environ.get("SHUFFLE", True))
LIMIT=int(os.environ.get("LIMIT", 350))
ADMIN_ONLY=os.environ.get("ADMIN_ONLY", "N")
REPLY_MESSAGE=os.environ.get("REPLY_MESSAGE", None)
if REPLY_MESSAGE:
REPLY_MESSAGE=REPLY_MESSAGE
else:
REPLY_MESSAGE=None
EDIT_TITLE = os.environ.get("EDIT_TITLE", True)
if EDIT_TITLE == "NO":
EDIT_TITLE=None
DURATION_LIMIT=int(os.environ.get("MAXIMUM_DURATION", 15))
DELAY = int(os.environ.get("DELAY", 10))
API_HASH = os.environ.get("API_HASH", "")
BOT_TOKEN = os.environ.get("BOT_TOKEN", "")
SESSION = os.environ.get("SESSION_STRING", "")
playlist=[]
msg = {}
| 36.061728
| 161
| 0.707292
|
7b7defbee993dc201225a158cd3e8f4239e13bba
| 2,263
|
py
|
Python
|
tests/unit/workflow/processors/test_target_locker.py
|
thoughteer/edera
|
c4ddb5d8a25906c3bd773c91afb3260fc0b704f2
|
[
"MIT"
] | 3
|
2018-11-27T15:45:19.000Z
|
2018-12-21T20:32:10.000Z
|
tests/unit/workflow/processors/test_target_locker.py
|
thoughteer/edera
|
c4ddb5d8a25906c3bd773c91afb3260fc0b704f2
|
[
"MIT"
] | 18
|
2018-12-02T18:38:59.000Z
|
2020-02-05T22:09:37.000Z
|
tests/unit/workflow/processors/test_target_locker.py
|
thoughteer/edera
|
c4ddb5d8a25906c3bd773c91afb3260fc0b704f2
|
[
"MIT"
] | null | null | null |
import pytest
from edera import Condition
from edera import Task
from edera.exceptions import LockAcquisitionError
from edera.exceptions import LockRetentionError
from edera.lockers import ProcessLocker
from edera.requisites import shortcut
from edera.workflow import WorkflowBuilder
from edera.workflow.processors import TargetLocker
def test_target_locker_acquires_lock_first():
class C(Condition):
def check(self):
return False
class T(Task):
target = C()
def execute(self):
raise RuntimeError
class X(Task):
@shortcut
def requisite(self):
return T()
workflow = WorkflowBuilder().build(X())
locker = ProcessLocker()
TargetLocker(locker).process(workflow)
assert workflow[X()].item.target is None
with locker.lock("C"):
with pytest.raises(LockAcquisitionError):
workflow[T()].item.execute()
def test_target_locker_prechecks_target():
class C(Condition):
def check(self):
return True
class T(Task):
target = C()
def execute(self):
raise RuntimeError
workflow = WorkflowBuilder().build(T())
TargetLocker(ProcessLocker()).process(workflow)
workflow[T()].item.execute()
def test_target_locker_executes_task_if_all_is_ok():
class C(Condition):
def check(self):
return False
class T(Task):
target = C()
def execute(self):
raise RuntimeError
workflow = WorkflowBuilder().build(T())
TargetLocker(ProcessLocker()).process(workflow)
with pytest.raises(RuntimeError):
workflow[T()].item.execute()
def test_target_locker_interrupts_execution_on_lock_loss():
class C(Condition):
def check(self):
return False
class T(Task):
target = C()
def execute(self):
raise RuntimeError
class CrazyLocker(ProcessLocker):
def lock(self, key, callback=None):
callback()
return super(CrazyLocker, self).lock(key)
workflow = WorkflowBuilder().build(T())
TargetLocker(CrazyLocker()).process(workflow)
with pytest.raises(LockRetentionError):
workflow[T()].item.execute()
| 21.552381
| 59
| 0.647813
|
30a0cc49abfc3c797ab1cfa1b62d5eda9025694a
| 7,335
|
py
|
Python
|
openpeerpower/components/shelly/config_flow.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
openpeerpower/components/shelly/config_flow.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
openpeerpower/components/shelly/config_flow.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""Config flow for Shelly integration."""
import asyncio
import logging
import aiohttp
import aioshelly
import async_timeout
import voluptuous as vol
from openpeerpower import config_entries, core
from openpeerpower.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_USERNAME,
HTTP_UNAUTHORIZED,
)
from openpeerpower.helpers import aiohttp_client
from .const import AIOSHELLY_DEVICE_TIMEOUT_SEC, DOMAIN
from .utils import get_coap_context, get_device_sleep_period
_LOGGER = logging.getLogger(__name__)
HOST_SCHEMA = vol.Schema({vol.Required(CONF_HOST): str})
HTTP_CONNECT_ERRORS = (asyncio.TimeoutError, aiohttp.ClientError)
async def validate_input(opp: core.OpenPeerPower, host, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
options = aioshelly.ConnectionOptions(
host, data.get(CONF_USERNAME), data.get(CONF_PASSWORD)
)
coap_context = await get_coap_context(opp)
async with async_timeout.timeout(AIOSHELLY_DEVICE_TIMEOUT_SEC):
device = await aioshelly.Device.create(
aiohttp_client.async_get_clientsession(opp),
coap_context,
options,
)
device.shutdown()
# Return info that you want to store in the config entry.
return {
"title": device.settings["name"],
"hostname": device.settings["device"]["hostname"],
"sleep_period": get_device_sleep_period(device.settings),
"model": device.settings["device"]["type"],
}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Shelly."""
VERSION = 1
host = None
info = None
device_info = None
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
host = user_input[CONF_HOST]
try:
info = await self._async_get_info(host)
except HTTP_CONNECT_ERRORS:
errors["base"] = "cannot_connect"
except aioshelly.FirmwareUnsupported:
return self.async_abort(reason="unsupported_firmware")
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
await self.async_set_unique_id(info["mac"])
self._abort_if_unique_id_configured({CONF_HOST: host})
self.host = host
if info["auth"]:
return await self.async_step_credentials()
try:
device_info = await validate_input(self.opp, self.host, {})
except HTTP_CONNECT_ERRORS:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
return self.async_create_entry(
title=device_info["title"] or device_info["hostname"],
data={
**user_input,
"sleep_period": device_info["sleep_period"],
"model": device_info["model"],
},
)
return self.async_show_form(
step_id="user", data_schema=HOST_SCHEMA, errors=errors
)
async def async_step_credentials(self, user_input=None):
"""Handle the credentials step."""
errors = {}
if user_input is not None:
try:
device_info = await validate_input(self.opp, self.host, user_input)
except aiohttp.ClientResponseError as error:
if error.status == HTTP_UNAUTHORIZED:
errors["base"] = "invalid_auth"
else:
errors["base"] = "cannot_connect"
except HTTP_CONNECT_ERRORS:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
return self.async_create_entry(
title=device_info["title"] or device_info["hostname"],
data={
**user_input,
CONF_HOST: self.host,
"sleep_period": device_info["sleep_period"],
"model": device_info["model"],
},
)
else:
user_input = {}
schema = vol.Schema(
{
vol.Required(CONF_USERNAME, default=user_input.get(CONF_USERNAME)): str,
vol.Required(CONF_PASSWORD, default=user_input.get(CONF_PASSWORD)): str,
}
)
return self.async_show_form(
step_id="credentials", data_schema=schema, errors=errors
)
async def async_step_zeroconf(self, discovery_info):
"""Handle zeroconf discovery."""
try:
self.info = info = await self._async_get_info(discovery_info["host"])
except HTTP_CONNECT_ERRORS:
return self.async_abort(reason="cannot_connect")
except aioshelly.FirmwareUnsupported:
return self.async_abort(reason="unsupported_firmware")
await self.async_set_unique_id(info["mac"])
self._abort_if_unique_id_configured({CONF_HOST: discovery_info["host"]})
self.host = discovery_info["host"]
self.context["title_placeholders"] = {
"name": discovery_info.get("name", "").split(".")[0]
}
if info["auth"]:
return await self.async_step_credentials()
try:
self.device_info = await validate_input(self.opp, self.host, {})
except HTTP_CONNECT_ERRORS:
return self.async_abort(reason="cannot_connect")
return await self.async_step_confirm_discovery()
async def async_step_confirm_discovery(self, user_input=None):
"""Handle discovery confirm."""
errors = {}
if user_input is not None:
return self.async_create_entry(
title=self.device_info["title"] or self.device_info["hostname"],
data={
"host": self.host,
"sleep_period": self.device_info["sleep_period"],
"model": self.device_info["model"],
},
)
self._set_confirm_only()
return self.async_show_form(
step_id="confirm_discovery",
description_placeholders={
"model": aioshelly.MODEL_NAMES.get(
self.info["type"], self.info["type"]
),
"host": self.host,
},
errors=errors,
)
async def _async_get_info(self, host):
"""Get info from shelly device."""
async with async_timeout.timeout(AIOSHELLY_DEVICE_TIMEOUT_SEC):
return await aioshelly.get_info(
aiohttp_client.async_get_clientsession(self.opp),
host,
)
| 35.095694
| 88
| 0.577369
|
089d099f06d24ad67844856443e63719ca65c971
| 397
|
py
|
Python
|
tpDcc/libs/math/__version__.py
|
tpDcc/tpDcc-libs-math
|
d4db4c10d2b460d32b68a6aabbbad4c9ada65c24
|
[
"MIT"
] | null | null | null |
tpDcc/libs/math/__version__.py
|
tpDcc/tpDcc-libs-math
|
d4db4c10d2b460d32b68a6aabbbad4c9ada65c24
|
[
"MIT"
] | null | null | null |
tpDcc/libs/math/__version__.py
|
tpDcc/tpDcc-libs-math
|
d4db4c10d2b460d32b68a6aabbbad4c9ada65c24
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Version module for tpDcc-libs-math
"""
from __future__ import print_function, division, absolute_import
__version__ = None
def get_version():
global __version__
if __version__:
return __version__
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
return __version__
| 17.26087
| 64
| 0.702771
|
2fc604b1af18f23ab1bb109404f6c311f7457ae2
| 993
|
py
|
Python
|
benchmark/err.py
|
eschnett/QuasinormalModes.jl
|
7ec50c3f565f6cda7501baa0bc589e445873a06e
|
[
"MIT"
] | null | null | null |
benchmark/err.py
|
eschnett/QuasinormalModes.jl
|
7ec50c3f565f6cda7501baa0bc589e445873a06e
|
[
"MIT"
] | null | null | null |
benchmark/err.py
|
eschnett/QuasinormalModes.jl
|
7ec50c3f565f6cda7501baa0bc589e445873a06e
|
[
"MIT"
] | null | null | null |
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
##################
# Load data file #
##################
data_re = np.loadtxt("run_1.dat", usecols = (1,9))
xData_re = data_re[:,0]
yData_re = abs(data_re[:,1])
data_im = np.loadtxt("run_1.dat", usecols = (1,10))
xData_im = data_im[:,0]
yData_im = abs(data_im[:,1])
########
# Plot #
########
font_size = 30
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['font.family'] = 'Latin Modern Roman'
plt.rcParams['figure.figsize'] = [10, 8]
plt.close('all')
plt.semilogy(xData_re, yData_re, 'o', color = 'black', label = r'$\Re(\omega)$')
plt.semilogy(xData_im, yData_im, 'x', color = 'red', label = r'$\Im(\omega)$')
plt.legend(loc = 'upper right', fontsize = font_size)
plt.xlabel('Iterations', fontsize = font_size)
plt.ylabel(r'$\log|\varepsilon|$', fontsize = font_size)
plt.tick_params(axis='both', which='major', labelsize=font_size)
#plt.show()
plt.tight_layout()
plt.savefig('err.pdf')
| 23.093023
| 80
| 0.646526
|
6c5c0dbe39e95be16ba689c76d1c64afd2c749bf
| 32,779
|
py
|
Python
|
arviz/stats/diagnostics.py
|
jpreszler/arviz
|
4815daabe9db51eb5cbe8062678722d30077b873
|
[
"Apache-2.0"
] | null | null | null |
arviz/stats/diagnostics.py
|
jpreszler/arviz
|
4815daabe9db51eb5cbe8062678722d30077b873
|
[
"Apache-2.0"
] | null | null | null |
arviz/stats/diagnostics.py
|
jpreszler/arviz
|
4815daabe9db51eb5cbe8062678722d30077b873
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=too-many-lines, too-many-function-args, redefined-outer-name
"""Diagnostic functions for ArviZ."""
from collections.abc import Sequence
import warnings
import numpy as np
import pandas as pd
from scipy import stats
from .stats_utils import (
rint as _rint,
quantile as _quantile,
autocov as _autocov,
not_valid as _not_valid,
wrap_xarray_ufunc as _wrap_xarray_ufunc,
stats_variance_2d as svar,
histogram,
)
from ..data import convert_to_dataset
from ..utils import _var_names, conditional_jit, conditional_vect, Numba, _numba_var, _stack
__all__ = ["bfmi", "ess", "rhat", "mcse", "geweke"]
def bfmi(data):
r"""Calculate the estimated Bayesian fraction of missing information (BFMI).
BFMI quantifies how well momentum resampling matches the marginal energy distribution. For more
information on BFMI, see https://arxiv.org/pdf/1604.00695v1.pdf. The current advice is that
values smaller than 0.3 indicate poor sampling. However, this threshold is provisional and may
change. See http://mc-stan.org/users/documentation/case-studies/pystan_workflow.html for more
information.
Parameters
----------
data : obj
Any object that can be converted to an az.InferenceData object.
Refer to documentation of az.convert_to_dataset for details.
If InferenceData, energy variable needs to be found.
Returns
-------
z : array
The Bayesian fraction of missing information of the model and trace. One element per
chain in the trace.
Examples
--------
Compute the BFMI of an InferenceData object
.. ipython::
In [1]: import arviz as az
...: data = az.load_arviz_data('radon')
...: az.bfmi(data)
"""
if isinstance(data, np.ndarray):
return _bfmi(data)
dataset = convert_to_dataset(data, group="sample_stats")
if not hasattr(dataset, "energy"):
raise TypeError("Energy variable was not found.")
return _bfmi(dataset.energy)
def ess(data, *, var_names=None, method="bulk", relative=False, prob=None):
r"""Calculate estimate of the effective sample size.
Parameters
----------
data : obj
Any object that can be converted to an az.InferenceData object.
Refer to documentation of az.convert_to_dataset for details.
For ndarray: shape = (chain, draw).
For n-dimensional ndarray transform first to dataset with az.convert_to_dataset.
var_names : list
Names of variables to include in the effective_sample_size_mean report
method : str
Select ess method. Valid methods are:
- "bulk"
- "tail" # prob, optional
- "quantile" # prob
- "mean" (old ess)
- "sd"
- "median"
- "mad" (mean absolute deviance)
- "z_scale"
- "folded"
- "identity"
relative : bool
Return relative ess
`ress = ess / n`
prob : float, optional
probability value for "tail" and "quantile" ess functions.
Returns
-------
xarray.Dataset
Return the effective sample size, :math:`\hat{N}_{eff}`
Notes
-----
The basic ess diagnostic is computed by:
.. math:: \hat{N}_{eff} = \frac{MN}{\hat{\tau}}
.. math:: \hat{\tau} = -1 + 2 \sum_{t'=0}^K \hat{P}_{t'}
where :math:`M` is the number of chains, :math:`N` the number of draws,
:math:`\hat{\rho}_t` is the estimated _autocorrelation at lag :math:`t`, and
:math:`K` is the last integer for which :math:`\hat{P}_{K} = \hat{\rho}_{2K} +
\hat{\rho}_{2K+1}` is still positive.
The current implementation is similar to Stan, which uses Geyer's initial monotone sequence
criterion (Geyer, 1992; Geyer, 2011).
References
----------
* Vehtari et al. (2019) see https://arxiv.org/abs/1903.08008
* https://mc-stan.org/docs/2_18/reference-manual/effective-sample-size-section.html
Section 15.4.2
* Gelman et al. BDA (2014) Formula 11.8
Examples
--------
Calculate the effective_sample_size using the default arguments:
.. ipython::
In [1]: import arviz as az
...: data = az.load_arviz_data('non_centered_eight')
...: az.ess(data)
Calculate the ress of some of the variables
.. ipython::
In [1]: az.ess(data, relative=True, var_names=["mu", "theta_t"])
Calculate the ess using the "tail" method, leaving the `prob` argument at its default
value.
.. ipython::
In [1]: az.ess(data, method="tail")
"""
methods = {
"bulk": _ess_bulk,
"tail": _ess_tail,
"quantile": _ess_quantile,
"mean": _ess_mean,
"sd": _ess_sd,
"median": _ess_median,
"mad": _ess_mad,
"z_scale": _ess_z_scale,
"folded": _ess_folded,
"identity": _ess_identity,
"local": _ess_local,
}
if method not in methods:
raise TypeError(
"ESS method {} not found. Valid methods are:\n{}".format(method, "\n ".join(methods))
)
ess_func = methods[method]
if (method == "quantile") and prob is None:
raise TypeError("Quantile (prob) information needs to be defined.")
if isinstance(data, np.ndarray):
data = np.atleast_2d(data)
if len(data.shape) < 3:
if prob is not None:
return ess_func( # pylint: disable=unexpected-keyword-arg
data, prob=prob, relative=relative
)
else:
return ess_func(data, relative=relative)
else:
msg = (
"Only uni-dimensional ndarray variables are supported."
" Please transform first to dataset with `az.convert_to_dataset`."
)
raise TypeError(msg)
dataset = convert_to_dataset(data, group="posterior")
var_names = _var_names(var_names, dataset)
dataset = dataset if var_names is None else dataset[var_names]
ufunc_kwargs = {"ravel": False}
func_kwargs = {"relative": relative} if prob is None else {"prob": prob, "relative": relative}
return _wrap_xarray_ufunc(ess_func, dataset, ufunc_kwargs=ufunc_kwargs, func_kwargs=func_kwargs)
def rhat(data, *, var_names=None, method="rank"):
r"""Compute estimate of rank normalized splitR-hat for a set of traces.
The rank normalized R-hat diagnostic tests for lack of convergence by comparing the variance
between multiple chains to the variance within each chain. If convergence has been achieved,
the between-chain and within-chain variances should be identical. To be most effective in
detecting evidence for nonconvergence, each chain should have been initialized to starting
values that are dispersed relative to the target distribution.
Parameters
----------
data : obj
Any object that can be converted to an az.InferenceData object.
Refer to documentation of az.convert_to_dataset for details.
At least 2 posterior chains are needed to compute this diagnostic of one or more
stochastic parameters.
For ndarray: shape = (chain, draw).
For n-dimensional ndarray transform first to dataset with az.convert_to_dataset.
var_names : list
Names of variables to include in the rhat report
method : str
Select R-hat method. Valid methods are:
- "rank" # recommended by Vehtari et al. (2019)
- "split"
- "folded"
- "z_scale"
- "identity"
Returns
-------
xarray.Dataset
Returns dataset of the potential scale reduction factors, :math:`\hat{R}`
Notes
-----
The diagnostic is computed by:
.. math:: \hat{R} = \frac{\hat{V}}{W}
where :math:`W` is the within-chain variance and :math:`\hat{V}` is the posterior variance
estimate for the pooled rank-traces. This is the potential scale reduction factor, which
converges to unity when each of the traces is a sample from the target posterior. Values
greater than one indicate that one or more chains have not yet converged.
Rank values are calculated over all the chains with `scipy.stats.rankdata`.
Each chain is split in two and normalized with the z-transform following Vehtari et al. (2019).
References
----------
* Vehtari et al. (2019) see https://arxiv.org/abs/1903.08008
* Gelman et al. BDA (2014)
* Brooks and Gelman (1998)
* Gelman and Rubin (1992)
Examples
--------
Calculate the R-hat using the default arguments:
.. ipython::
In [1]: import arviz as az
...: data = az.load_arviz_data("non_centered_eight")
...: az.rhat(data)
Calculate the R-hat of some variables using the folded method:
.. ipython::
In [1]: az.rhat(data, var_names=["mu", "theta_t"], method="folded")
"""
methods = {
"rank": _rhat_rank,
"split": _rhat_split,
"folded": _rhat_folded,
"z_scale": _rhat_z_scale,
"identity": _rhat_identity,
}
if method not in methods:
raise TypeError(
"R-hat method {} not found. Valid methods are:\n{}".format(
method, "\n ".join(methods)
)
)
rhat_func = methods[method]
if isinstance(data, np.ndarray):
data = np.atleast_2d(data)
if len(data.shape) < 3:
return rhat_func(data)
else:
msg = (
"Only uni-dimensional ndarray variables are supported."
" Please transform first to dataset with `az.convert_to_dataset`."
)
raise TypeError(msg)
dataset = convert_to_dataset(data, group="posterior")
var_names = _var_names(var_names, dataset)
dataset = dataset if var_names is None else dataset[var_names]
ufunc_kwargs = {"ravel": False}
func_kwargs = {}
return _wrap_xarray_ufunc(
rhat_func, dataset, ufunc_kwargs=ufunc_kwargs, func_kwargs=func_kwargs
)
def mcse(data, *, var_names=None, method="mean", prob=None):
"""Calculate Markov Chain Standard Error statistic.
Parameters
----------
data : obj
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_dataset for details
For ndarray: shape = (chain, draw).
For n-dimensional ndarray transform first to dataset with az.convert_to_dataset.
var_names : list
Names of variables to include in the rhat report
method : str
Select mcse method. Valid methods are:
- "mean"
- "sd"
- "quantile"
prob : float
Quantile information.
Returns
-------
xarray.Dataset
Return the msce dataset
Examples
--------
Calculate the Markov Chain Standard Error using the default arguments:
.. ipython::
In [1]: import arviz as az
...: data = az.load_arviz_data("non_centered_eight")
...: az.mcse(data)
Calculate the Markov Chain Standard Error using the quantile method:
.. ipython::
In [1]: az.mcse(data, method="quantile", prob=.7)
"""
methods = {"mean": _mcse_mean, "sd": _mcse_sd, "quantile": _mcse_quantile}
if method not in methods:
raise TypeError(
"mcse method {} not found. Valid methods are:\n{}".format(
method, "\n ".join(methods)
)
)
mcse_func = methods[method]
if method == "quantile" and prob is None:
raise TypeError("Quantile (prob) information needs to be defined.")
if isinstance(data, np.ndarray):
data = np.atleast_2d(data)
if len(data.shape) < 3:
if prob is not None:
return mcse_func(data, prob=prob) # pylint: disable=unexpected-keyword-arg
else:
return mcse_func(data)
else:
msg = (
"Only uni-dimensional ndarray variables are supported."
" Please transform first to dataset with `az.convert_to_dataset`."
)
raise TypeError(msg)
dataset = convert_to_dataset(data, group="posterior")
var_names = _var_names(var_names, dataset)
dataset = dataset if var_names is None else dataset[var_names]
ufunc_kwargs = {"ravel": False}
func_kwargs = {} if prob is None else {"prob": prob}
return _wrap_xarray_ufunc(
mcse_func, dataset, ufunc_kwargs=ufunc_kwargs, func_kwargs=func_kwargs
)
@conditional_vect
def _sqrt(a_a, b_b):
return (a_a + b_b) ** 0.5
@conditional_jit(forceobj=True)
def geweke(ary, first=0.1, last=0.5, intervals=20):
r"""Compute z-scores for convergence diagnostics.
Compare the mean of the first % of series with the mean of the last % of series. x is divided
into a number of segments for which this difference is computed. If the series is converged,
this score should oscillate between -1 and 1.
Parameters
----------
ary : 1D array-like
The trace of some stochastic parameter.
first : float
The fraction of series at the beginning of the trace.
last : float
The fraction of series at the end to be compared with the section
at the beginning.
intervals : int
The number of segments.
Returns
-------
scores : list [[]]
Return a list of [i, score], where i is the starting index for each interval and score the
Geweke score on the interval.
Notes
-----
The Geweke score on some series x is computed by:
.. math:: \frac{E[x_s] - E[x_e]}{\sqrt{V[x_s] + V[x_e]}}
where :math:`E` stands for the mean, :math:`V` the variance,
:math:`x_s` a section at the start of the series and
:math:`x_e` a section at the end of the series.
References
----------
* Geweke (1992)
"""
# Filter out invalid intervals
return _geweke(ary, first, last, intervals)
def _geweke(ary, first, last, intervals):
_numba_flag = Numba.numba_flag
for interval in (first, last):
if interval <= 0 or interval >= 1:
raise ValueError("Invalid intervals for Geweke convergence analysis", (first, last))
if first + last >= 1:
raise ValueError("Invalid intervals for Geweke convergence analysis", (first, last))
# Initialize list of z-scores
zscores = []
# Last index value
end = len(ary) - 1
# Start intervals going up to the <last>% of the chain
last_start_idx = (1 - last) * end
# Calculate starting indices
start_indices = np.linspace(0, last_start_idx, num=intervals, endpoint=True, dtype=int)
# Loop over start indices
for start in start_indices:
# Calculate slices
first_slice = ary[start : start + int(first * (end - start))]
last_slice = ary[int(end - last * (end - start)) :]
z_score = first_slice.mean() - last_slice.mean()
if _numba_flag:
z_score /= _sqrt(svar(first_slice), svar(last_slice))
else:
z_score /= np.sqrt(first_slice.var() + last_slice.var())
zscores.append([start, z_score])
return np.array(zscores)
def ks_summary(pareto_tail_indices):
"""Display a summary of Pareto tail indices.
Parameters
----------
pareto_tail_indices : array
Pareto tail indices.
Returns
-------
df_k : dataframe
Dataframe containing k diagnostic values.
"""
_numba_flag = Numba.numba_flag
if _numba_flag:
bins = np.asarray([-np.Inf, 0.5, 0.7, 1, np.Inf])
kcounts, _ = histogram(pareto_tail_indices, bins)
else:
kcounts, _ = np.histogram(pareto_tail_indices, bins=[-np.Inf, 0.5, 0.7, 1, np.Inf])
kprop = kcounts / len(pareto_tail_indices) * 100
df_k = pd.DataFrame(
dict(_=["(good)", "(ok)", "(bad)", "(very bad)"], Count=kcounts, Pct=kprop)
).rename(index={0: "(-Inf, 0.5]", 1: " (0.5, 0.7]", 2: " (0.7, 1]", 3: " (1, Inf)"})
if np.sum(kcounts[1:]) == 0:
warnings.warn("All Pareto k estimates are good (k < 0.5)")
elif np.sum(kcounts[2:]) == 0:
warnings.warn("All Pareto k estimates are ok (k < 0.7)")
return df_k
def _bfmi(energy):
r"""Calculate the estimated Bayesian fraction of missing information (BFMI).
BFMI quantifies how well momentum resampling matches the marginal energy distribution. For more
information on BFMI, see https://arxiv.org/pdf/1604.00695v1.pdf. The current advice is that
values smaller than 0.3 indicate poor sampling. However, this threshold is provisional and may
change. See http://mc-stan.org/users/documentation/case-studies/pystan_workflow.html for more
information.
Parameters
----------
energy : NumPy array
Should be extracted from a gradient based sampler, such as in Stan or PyMC3. Typically,
after converting a trace or fit to InferenceData, the energy will be in
`data.sample_stats.energy`.
Returns
-------
z : array
The Bayesian fraction of missing information of the model and trace. One element per
chain in the trace.
"""
energy_mat = np.atleast_2d(energy)
num = np.square(np.diff(energy_mat, axis=1)).mean(axis=1) # pylint: disable=no-member
if energy_mat.ndim == 2:
den = _numba_var(svar, np.var, energy_mat, axis=1, ddof=0)
else:
den = np.var(energy, axis=1)
return num / den
def _z_scale(ary):
"""Calculate z_scale.
Parameters
----------
ary : np.ndarray
Returns
-------
np.ndarray
"""
ary = np.asarray(ary)
size = ary.size
rank = stats.rankdata(ary, method="average")
z = stats.norm.ppf((rank - 0.5) / size)
z = z.reshape(ary.shape)
return z
def _split_chains(ary):
"""Split and stack chains."""
ary = np.asarray(ary)
if len(ary.shape) > 1:
_, n_draw = ary.shape
else:
ary = np.atleast_2d(ary)
_, n_draw = ary.shape
half = n_draw // 2
return _stack(ary[:, :half], ary[:, -half:])
def _z_fold(ary):
"""Fold and z-scale values."""
ary = np.asarray(ary)
ary = abs(ary - np.median(ary))
ary = _z_scale(ary)
return ary
def _rhat(ary):
"""Compute the rhat for a 2d array."""
_numba_flag = Numba.numba_flag
ary = np.asarray(ary, dtype=float)
if _not_valid(ary, check_shape=False):
return np.nan
_, num_samples = ary.shape
# Calculate chain mean
chain_mean = np.mean(ary, axis=1)
# Calculate chain variance
chain_var = _numba_var(svar, np.var, ary, axis=1, ddof=1)
# Calculate between-chain variance
between_chain_variance = num_samples * _numba_var(svar, np.var, chain_mean, axis=None, ddof=1)
# Calculate within-chain variance
within_chain_variance = np.mean(chain_var)
# Estimate of marginal posterior variance
rhat_value = np.sqrt(
(between_chain_variance / within_chain_variance + num_samples - 1) / (num_samples)
)
return rhat_value
def _rhat_rank(ary):
"""Compute the rank normalized rhat for 2d array.
Computation follows https://arxiv.org/abs/1903.08008
"""
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=2)):
return np.nan
split_ary = _split_chains(ary)
rhat_bulk = _rhat(_z_scale(split_ary))
split_ary_folded = abs(split_ary - np.median(split_ary))
rhat_tail = _rhat(_z_scale(split_ary_folded))
rhat_rank = max(rhat_bulk, rhat_tail)
return rhat_rank
def _rhat_folded(ary):
"""Calculate split-Rhat for folded z-values."""
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=2)):
return np.nan
ary = _z_fold(_split_chains(ary))
return _rhat(ary)
def _rhat_z_scale(ary):
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=2)):
return np.nan
return _rhat(_z_scale(_split_chains(ary)))
def _rhat_split(ary):
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=2)):
return np.nan
return _rhat(_split_chains(ary))
def _rhat_identity(ary):
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=2)):
return np.nan
return _rhat(ary)
def _ess(ary, relative=False):
"""Compute the effective sample size for a 2D array."""
_numba_flag = Numba.numba_flag
ary = np.asarray(ary, dtype=float)
if _not_valid(ary, check_shape=False):
return np.nan
if (np.max(ary) - np.min(ary)) < np.finfo(float).resolution: # pylint: disable=no-member
return ary.size
if len(ary.shape) < 2:
ary = np.atleast_2d(ary)
n_chain, n_draw = ary.shape
acov = _autocov(ary, axis=1)
chain_mean = ary.mean(axis=1)
mean_var = np.mean(acov[:, 0]) * n_draw / (n_draw - 1.0)
var_plus = mean_var * (n_draw - 1.0) / n_draw
if n_chain > 1:
var_plus += _numba_var(svar, np.var, chain_mean, axis=None, ddof=1)
rho_hat_t = np.zeros(n_draw)
rho_hat_even = 1.0
rho_hat_t[0] = rho_hat_even
rho_hat_odd = 1.0 - (mean_var - np.mean(acov[:, 1])) / var_plus
rho_hat_t[1] = rho_hat_odd
# Geyer's initial positive sequence
t = 1
while t < (n_draw - 3) and (rho_hat_even + rho_hat_odd) > 0.0:
rho_hat_even = 1.0 - (mean_var - np.mean(acov[:, t + 1])) / var_plus
rho_hat_odd = 1.0 - (mean_var - np.mean(acov[:, t + 2])) / var_plus
if (rho_hat_even + rho_hat_odd) >= 0:
rho_hat_t[t + 1] = rho_hat_even
rho_hat_t[t + 2] = rho_hat_odd
t += 2
max_t = t - 2
# improve estimation
if rho_hat_even > 0:
rho_hat_t[max_t + 1] = rho_hat_even
# Geyer's initial monotone sequence
t = 1
while t <= max_t - 2:
if (rho_hat_t[t + 1] + rho_hat_t[t + 2]) > (rho_hat_t[t - 1] + rho_hat_t[t]):
rho_hat_t[t + 1] = (rho_hat_t[t - 1] + rho_hat_t[t]) / 2.0
rho_hat_t[t + 2] = rho_hat_t[t + 1]
t += 2
ess = n_chain * n_draw
tau_hat = -1.0 + 2.0 * np.sum(rho_hat_t[: max_t + 1]) + np.sum(rho_hat_t[max_t + 1 : max_t + 2])
tau_hat = max(tau_hat, 1 / np.log10(ess))
ess = (1 if relative else ess) / tau_hat
if np.isnan(rho_hat_t).any():
ess = np.nan
return ess
def _ess_bulk(ary, relative=False):
"""Compute the effective sample size for the bulk."""
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)):
return np.nan
z_scaled = _z_scale(_split_chains(ary))
ess_bulk = _ess(z_scaled, relative=relative)
return ess_bulk
def _ess_tail(ary, prob=None, relative=False):
"""Compute the effective sample size for the tail.
If `prob` defined, ess = min(qess(prob), qess(1-prob))
"""
if prob is None:
prob = (0.05, 0.95)
elif not isinstance(prob, Sequence):
prob = (prob, 1 - prob)
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)):
return np.nan
prob_low, prob_high = prob
quantile_low_ess = _ess_quantile(ary, prob_low, relative=relative)
quantile_high_ess = _ess_quantile(ary, prob_high, relative=relative)
return min(quantile_low_ess, quantile_high_ess)
def _ess_mean(ary, relative=False):
"""Compute the effective sample size for the mean."""
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)):
return np.nan
return _ess(_split_chains(ary), relative=relative)
def _ess_sd(ary, relative=False):
"""Compute the effective sample size for the sd."""
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)):
return np.nan
ary = _split_chains(ary)
return min(_ess(ary, relative=relative), _ess(ary ** 2, relative=relative))
def _ess_quantile(ary, prob, relative=False):
"""Compute the effective sample size for the specific residual."""
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)):
return np.nan
if prob is None:
raise TypeError("Prob not defined.")
quantile, = _quantile(ary, prob)
iquantile = ary <= quantile
return _ess(_split_chains(iquantile), relative=relative)
def _ess_local(ary, prob, relative=False):
"""Compute the effective sample size for the specific residual."""
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)):
return np.nan
if prob is None:
raise TypeError("Prob not defined.")
if len(prob) != 2:
raise ValueError("Prob argument in ess local must be upper and lower bound")
quantile = _quantile(ary, prob)
iquantile = (quantile[0] <= ary) & (ary <= quantile[1])
return _ess(_split_chains(iquantile), relative=relative)
def _ess_z_scale(ary, relative=False):
"""Calculate ess for z-scaLe."""
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)):
return np.nan
return _ess(_z_scale(_split_chains(ary)), relative=relative)
def _ess_folded(ary, relative=False):
"""Calculate split-ess for folded data."""
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)):
return np.nan
return _ess(_z_fold(_split_chains(ary)), relative=relative)
def _ess_median(ary, relative=False):
"""Calculate split-ess for median."""
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)):
return np.nan
return _ess_quantile(ary, 0.5, relative=relative)
def _ess_mad(ary, relative=False):
"""Calculate split-ess for mean absolute deviance."""
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)):
return np.nan
ary = abs(ary - np.median(ary))
ary = ary <= np.median(ary)
ary = _z_scale(_split_chains(ary))
return _ess(ary, relative=relative)
def _ess_identity(ary, relative=False):
"""Calculate ess."""
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)):
return np.nan
return _ess(ary, relative=relative)
def _conv_quantile(ary, prob):
"""Return mcse, Q05, Q95, Seff."""
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)):
return np.nan, np.nan, np.nan, np.nan
ess = _ess_quantile(ary, prob)
probability = [0.1586553, 0.8413447, 0.05, 0.95]
with np.errstate(invalid="ignore"):
ppf = stats.beta.ppf(probability, ess * prob + 1, ess * (1 - prob) + 1)
sorted_ary = np.sort(ary.ravel())
size = sorted_ary.size
ppf_size = ppf * size - 1
th1 = sorted_ary[_rint(np.nanmax((ppf_size[0], 0)))]
th2 = sorted_ary[_rint(np.nanmin((ppf_size[1], size - 1)))]
mcse_quantile = (th2 - th1) / 2
th1 = sorted_ary[_rint(np.nanmax((ppf_size[2], 0)))]
th2 = sorted_ary[_rint(np.nanmin((ppf_size[3], size - 1)))]
return mcse_quantile, th1, th2, ess
def _mcse_mean(ary):
"""Compute the Markov Chain mean error."""
_numba_flag = Numba.numba_flag
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)):
return np.nan
ess = _ess_mean(ary)
if _numba_flag:
sd = _sqrt(svar(np.ravel(ary), ddof=1), np.zeros(1))
else:
sd = np.std(ary, ddof=1)
mcse_mean_value = sd / np.sqrt(ess)
return mcse_mean_value
def _mcse_sd(ary):
"""Compute the Markov Chain sd error."""
_numba_flag = Numba.numba_flag
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)):
return np.nan
ess = _ess_sd(ary)
if _numba_flag:
sd = np.float(_sqrt(svar(np.ravel(ary), ddof=1), np.zeros(1)))
else:
sd = np.std(ary, ddof=1)
fac_mcse_sd = np.sqrt(np.exp(1) * (1 - 1 / ess) ** (ess - 1) - 1)
mcse_sd_value = sd * fac_mcse_sd
return mcse_sd_value
def _mcse_quantile(ary, prob):
"""Compute the Markov Chain quantile error at quantile=prob."""
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)):
return np.nan
mcse_q, *_ = _conv_quantile(ary, prob)
return mcse_q
def _circfunc(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
return samples, _angle(samples, low, high, np.pi)
@conditional_vect
def _angle(samples, low, high, p_i=np.pi):
ang = (samples - low) * 2.0 * p_i / (high - low)
return ang
def _circular_standard_deviation(samples, high=2 * np.pi, low=0, axis=None):
p_i = np.pi
samples, ang = _circfunc(samples, high, low)
s_s = np.sin(ang).mean(axis=axis)
c_c = np.cos(ang).mean(axis=axis)
r_r = np.hypot(s_s, c_c)
return ((high - low) / 2.0 / p_i) * np.sqrt(-2 * np.log(r_r))
def _mc_error(ary, batches=5, circular=False):
"""Calculate the simulation standard error, accounting for non-independent samples.
The trace is divided into batches, and the standard deviation of the batch
means is calculated.
Parameters
----------
ary : Numpy array
An array containing MCMC samples
batches : integer
Number of batches
circular : bool
Whether to compute the error taking into account `ary` is a circular variable
(in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).
Returns
-------
mc_error : float
Simulation standard error
"""
_numba_flag = Numba.numba_flag
if ary.ndim > 1:
dims = np.shape(ary)
trace = np.transpose([t.ravel() for t in ary])
return np.reshape([_mc_error(t, batches) for t in trace], dims[1:])
else:
if _not_valid(ary, check_shape=False):
return np.nan
if batches == 1:
if circular:
if _numba_flag:
std = _circular_standard_deviation(ary, high=np.pi, low=-np.pi)
else:
std = stats.circstd(ary, high=np.pi, low=-np.pi)
else:
if _numba_flag:
std = np.float(_sqrt(svar(ary), np.zeros(1)))
else:
std = np.std(ary)
return std / np.sqrt(len(ary))
batched_traces = np.resize(ary, (batches, int(len(ary) / batches)))
if circular:
means = stats.circmean(batched_traces, high=np.pi, low=-np.pi, axis=1)
if _numba_flag:
std = _circular_standard_deviation(means, high=np.pi, low=-np.pi)
else:
std = stats.circstd(means, high=np.pi, low=-np.pi)
else:
means = np.mean(batched_traces, 1)
if _numba_flag:
std = _sqrt(svar(means), np.zeros(1))
else:
std = np.std(means)
return std / np.sqrt(batches)
def _multichain_statistics(ary):
"""Calculate efficiently multichain statistics for summary.
Parameters
----------
ary : numpy.ndarray
Returns
-------
tuple
Order of return parameters is
- mcse_mean, mcse_sd, ess_mean, ess_sd, ess_bulk, ess_tail, r_hat
"""
ary = np.atleast_2d(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)):
return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
# ess mean
ess_mean_value = _ess_mean(ary)
# ess sd
ess_sd_value = _ess_sd(ary)
# ess bulk
z_split = _z_scale(_split_chains(ary))
ess_bulk_value = _ess(z_split)
# ess tail
quantile05, quantile95 = _quantile(ary, [0.05, 0.95])
iquantile05 = ary <= quantile05
quantile05_ess = _ess(_split_chains(iquantile05))
iquantile95 = ary <= quantile95
quantile95_ess = _ess(_split_chains(iquantile95))
ess_tail_value = min(quantile05_ess, quantile95_ess)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=2)):
rhat_value = np.nan
else:
# r_hat
rhat_bulk = _rhat(z_split)
ary_folded = np.abs(ary - np.median(ary))
rhat_tail = _rhat(_z_scale(_split_chains(ary_folded)))
rhat_value = max(rhat_bulk, rhat_tail)
# mcse_mean
sd = np.std(ary, ddof=1)
mcse_mean_value = sd / np.sqrt(ess_mean_value)
# mcse_sd
fac_mcse_sd = np.sqrt(np.exp(1) * (1 - 1 / ess_sd_value) ** (ess_sd_value - 1) - 1)
mcse_sd_value = sd * fac_mcse_sd
return (
mcse_mean_value,
mcse_sd_value,
ess_mean_value,
ess_sd_value,
ess_bulk_value,
ess_tail_value,
rhat_value,
)
| 32.104799
| 100
| 0.629244
|
b059d27382d32e8f8f9c3ae119295a722f8751b2
| 1,579
|
py
|
Python
|
backend/urls.py
|
kevinqqnj/django-vue-template
|
d061c072ffea4c029784ff427c105a84cb153547
|
[
"MIT"
] | 5
|
2019-05-30T09:31:44.000Z
|
2020-09-11T05:47:39.000Z
|
backend/urls.py
|
kevinqqnj/django-vue-template
|
d061c072ffea4c029784ff427c105a84cb153547
|
[
"MIT"
] | null | null | null |
backend/urls.py
|
kevinqqnj/django-vue-template
|
d061c072ffea4c029784ff427c105a84cb153547
|
[
"MIT"
] | 5
|
2019-07-19T07:36:54.000Z
|
2021-04-11T09:46:31.000Z
|
"""backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path, include
from rest_framework import routers
from .api.views import index_view, MessageViewSet, serve_worker_view
router = routers.DefaultRouter()
router.register('messages', MessageViewSet)
urlpatterns = [
# http://localhost:8000/
path('', index_view, name='index'),
# serve static files for PWA
path('index.html', index_view, name='index'),
re_path(r'^(?P<worker_name>manifest).json$', serve_worker_view, name='manifest'),
re_path(r'^(?P<worker_name>[-\w\d.]+).js$', serve_worker_view, name='serve_worker'),
re_path(r'^(?P<worker_name>robots).txt$', serve_worker_view, name='robots'),
# http://localhost:8000/api/<router-viewsets>
path('api/', include(router.urls)),
# http://localhost:8000/admin/
path('admin/', admin.site.urls),
# support vue-router history mode
re_path(r'^\S+$', index_view, name='SPA_reload'),
]
| 35.886364
| 88
| 0.69981
|
91f6ecc4b1b741ccd0cace044ea5af80f3dd6251
| 1,186
|
py
|
Python
|
Lesson 2.2 (if, else, elif).py
|
Justmove08/Lesson_2
|
8f88619c0bf0140be9f4b8e24f7a7852758de55a
|
[
"MIT"
] | null | null | null |
Lesson 2.2 (if, else, elif).py
|
Justmove08/Lesson_2
|
8f88619c0bf0140be9f4b8e24f7a7852758de55a
|
[
"MIT"
] | null | null | null |
Lesson 2.2 (if, else, elif).py
|
Justmove08/Lesson_2
|
8f88619c0bf0140be9f4b8e24f7a7852758de55a
|
[
"MIT"
] | null | null | null |
# # ะะพะฝััััะบัะธั IF (ะตัะปะธ)
# if 0:
# print('true')
# print('continue')
# # ะะพะฝััััะบัะธั IF-ELSE (ะตัะปะธ, ะธะฝะฐัะต)
# if 0:
# print('true')
# else:
# print('false')
# # ะะพะฝััััะบัะธั IF-ELIF
# if 1:
# print('true')
# elif 1:
# print('elif_1')
# elif 1:
# print('elif_2')
# elif 1:
# print('elif_3')
# else:
# print('false')
# IF-ELSE ะดะปั ะฟัะธัะฒะฐะธะฒะฐะฝะธั
# X = 1
# Y = 3
# Z = 5
# # if X:
# # A = Y
# # else:
# # A = Z
# # print(A, type(A))
# A = Y if X else Z
# print(A, type(A))
# ะัะฐะบัะธัะตัะบะฐั ะทะฐะดะฐัะฐ ะฟัะพ ะฐะฒัะพะผะพะฑะธะปั
brand = 'volvo' # ะฑัะตะฝะด
engine_volume = 1.5 # ะพะฑัะตะผ ะดะฒะธะณะฐัะตะปั
horsepower = 151 # ะผะพัะฝะพััั ะดะฒะธะณะฐัะตะปั
sunroof = False # ะฝะฐะปะธัะธะต ะปัะบะฐ
# # ะัะพะฒะตัะบะฐ ััะปะพะฒะธั IF
# if horsepower < 80:
# print('No tax')
# #ะัะพะฒะตัะบะฐ ััะปะพะฒะธั IF-ELSE
# if horsepower == 80:
# print('No tax')
# print('No tax')
# print('No tax')
# else:
# print('Tax')
# ะัะพะฒะตัะบะฐ ััะปะพะฒะธั IF/ELIF/ELIF/ELSE
tax = 0
if horsepower < 80:
tax = 0
elif horsepower < 100:
tax = 10000
elif horsepower < 150:
tax = 15000
else:
tax = 50000
print(tax)
# ะัะพะฒะตัะบะฐ ััะปะพะฒะธั IF ะดะปั ะฟัะธัะฒะฐะธะฒะฐะฝะธั
cool_car = 0
cool_car = 1 if sunroof == 1 else 0
print(cool_car)
| 17.188406
| 38
| 0.590219
|
36ed056951608eb8a7d098e1ff6f594a778313a3
| 2,919
|
py
|
Python
|
my_package/data/dataset.py
|
Rohan-Raj-1729/myPackage
|
85ccef85097e3a5f9e30d03739f7030579bfe34b
|
[
"MIT"
] | null | null | null |
my_package/data/dataset.py
|
Rohan-Raj-1729/myPackage
|
85ccef85097e3a5f9e30d03739f7030579bfe34b
|
[
"MIT"
] | null | null | null |
my_package/data/dataset.py
|
Rohan-Raj-1729/myPackage
|
85ccef85097e3a5f9e30d03739f7030579bfe34b
|
[
"MIT"
] | null | null | null |
# Imports
import json
from PIL import Image
import numpy as np
class Dataset(object):
'''
A class for the dataset that will return data items as per the given index
'''
def __init__(self, annotation_file, transforms=None):
'''
Arguments:
annotation_file: path to the annotation file
transforms: list of transforms (class instances)
For instance, [<class 'RandomCrop'>, <class 'Rotate'>]
'''
self.annotations_path = annotation_file
self.transforms =transforms
def __len__(self):
'''
return the number of data points in the dataset
'''
a = open(self.annotations_path)
a = list(a)
return len(a)
def __getitem__(self, idx):
'''
return the dataset element for the index: "idx"
Arguments:
idx: index of the data element.
Returns: A dictionary with:
image: image (in the form of a numpy array) (shape: (3, H, W))
gt_bboxes: N X 5 array where N is the number of bounding boxes, each
consisting of [class, x1, y1, x2, y2]
x1 and x2 lie between 0 and width of the image,
y1 and y2 lie between 0 and height of the image.
You need to do the following,
1. Extract the correct annotation using the idx provided.
2. Read the image and convert it into a numpy array (wont be necessary
with some libraries). The shape of the array would be (3, H, W).
3. Scale the values in the array to be with [0, 1].
4. Create a dictonary with both the image and annotations
4. Perform the desired transformations.
5. Return the transformed image and annotations as specified.
'''
a = open(self.annotations_path)
a = list(a)
a_list = []
for i in a:
a_list.append(json.loads(i))
#for i in a_list:
# print(i)
line_ = a_list[idx]
img_path = 'd:/PythonPrograms/DS_Asg2/data/' + line_['img_fn']
image = Image.open(img_path)
for i in self.transforms:
image = i(np.asarray(image))
h = [[i[0] for i in j] for j in image]
w = [[i[1] for i in j] for j in image]
x = [[i[2] for i in j] for j in image]
a = np.array(255)
h = h / a
w = w / a
x = x / a
image1 = np.array([h, w, x])
result = {}
result['image'] = image1
stringB = line_['bboxes']
cat = [j['category'] for j in stringB]
ar = []
ar = [[k for k in j['bbox']] for j in stringB]
gtb = {}
for i in range(len(cat)):
gtb[cat[i]] = ar[i]
result['gt_bboxes'] = gtb
return result
| 34.75
| 84
| 0.530319
|
c7af398c446781e46a76f4ed9cc89f39ad3bfcd9
| 7,547
|
py
|
Python
|
configurations/je_meta_fixedaggr_jsc80leakyconv_augzoombright.py
|
ShuaiW/kaggle-heart
|
022997f27add953c74af2b371c67d9d86cbdccc3
|
[
"MIT"
] | 182
|
2016-03-15T01:51:29.000Z
|
2021-04-21T09:49:05.000Z
|
configurations/je_meta_fixedaggr_jsc80leakyconv_augzoombright.py
|
weidezhang/kaggle-heart
|
022997f27add953c74af2b371c67d9d86cbdccc3
|
[
"MIT"
] | 1
|
2018-06-22T16:46:12.000Z
|
2018-06-22T21:08:09.000Z
|
configurations/je_meta_fixedaggr_jsc80leakyconv_augzoombright.py
|
weidezhang/kaggle-heart
|
022997f27add953c74af2b371c67d9d86cbdccc3
|
[
"MIT"
] | 61
|
2016-03-15T00:58:28.000Z
|
2020-03-06T22:00:41.000Z
|
"""Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import image_transform
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
import utils
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 20
validate_train_set = True
save_every = 5
restart_from_save = False
dump_network_loaded_data = False
# Training (schedule) parameters
# - batch sizes
batch_size = 1
sunny_batch_size = 4
batches_per_chunk = 32 *4
num_epochs_train = 150
# - learning rate and method
base_lr = 0.00003
learning_rate_schedule = {
0: base_lr,
8*num_epochs_train/10: base_lr/10,
19*num_epochs_train/20: base_lr/100,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotation": (-180, 180),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
"zoom_x": (.75, 1.25),
"zoom_y": (.75, 1.25),
"change_brightness": (-0.3, 0.3),
}
augmentation_params_test = {
"rotation": (-180, 180),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
"zoom_x": (.80, 1.20),
"zoom_y": (.80, 1.20),
"change_brightness": (-0.2, 0.2),
}
use_hough_roi = True
preprocess_train = functools.partial( # normscale_resize_and_augment has a bug
preprocess.preprocess_normscale,
normscale_resize_and_augment_function=functools.partial(
image_transform.normscale_resize_and_augment_2,
normalised_patch_size=(80,80)))
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
def filter_samples(folders):
# don't use patients who don't have mre than 6 slices
return [
folder for folder in folders
if data_loader.compute_nr_slices(folder) > 6]
# Input sizes
image_size = 64
nr_slices = 22
data_sizes = {
"sliced:data:sax": (batch_size, nr_slices, 30, image_size, image_size),
"sliced:data:sax:locations": (batch_size, nr_slices),
"sliced:data:sax:is_not_padded": (batch_size, nr_slices),
"sliced:data:randomslices": (batch_size, nr_slices, 30, image_size, image_size),
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size),
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size),
"sliced:data:singleslice": (batch_size, 30, image_size, image_size),
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size),
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 100 # More augmentations since a we only use single slices
tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# nonlinearity putting a lower bound on it's output
def lb_softplus(lb):
return lambda x: nn.nonlinearities.softplus(x) + lb
init = nn.init.Orthogonal()
rnn_layer = functools.partial(nn.layers.RecurrentLayer,
W_in_to_hid=init,
W_hid_to_hid=init,
b=nn.init.Constant(0.1),
nonlinearity=nn.nonlinearities.rectify,
hid_init=nn.init.Constant(0.),
backwards=False,
learn_init=True,
gradient_steps=-1,
grad_clipping=False,
unroll_scan=False,
precompute_input=False)
# Architecture
def build_model():
#################
# Regular model #
#################
input_size = data_sizes["sliced:data:sax"]
input_size_mask = data_sizes["sliced:data:sax:is_not_padded"]
input_size_locations = data_sizes["sliced:data:sax:locations"]
l0 = nn.layers.InputLayer(input_size)
lin_slice_mask = nn.layers.InputLayer(input_size_mask)
lin_slice_locations = nn.layers.InputLayer(input_size_locations)
# PREPROCESS SLICES SEPERATELY
# Convolutional layers and some dense layers are defined in a submodel
l0_slices = nn.layers.ReshapeLayer(l0, (-1, [2], [3], [4]))
import je_ss_jonisc80_leaky_convroll_augzoombright
submodel = je_ss_jonisc80_leaky_convroll_augzoombright.build_model(l0_slices)
# Systole Dense layers
l_sys_mu = submodel["meta_outputs"]["systole:mu"]
l_sys_sigma = submodel["meta_outputs"]["systole:sigma"]
# Diastole Dense layers
l_dia_mu = submodel["meta_outputs"]["diastole:mu"]
l_dia_sigma = submodel["meta_outputs"]["diastole:sigma"]
# AGGREGATE SLICES PER PATIENT
l_scaled_slice_locations = layers.TrainableScaleLayer(lin_slice_locations, scale=nn.init.Constant(0.1), trainable=False)
# Systole
l_pat_sys_ss_mu = nn.layers.ReshapeLayer(l_sys_mu, (-1, nr_slices))
l_pat_sys_ss_sigma = nn.layers.ReshapeLayer(l_sys_sigma, (-1, nr_slices))
l_pat_sys_aggr_mu_sigma = layers.JeroenLayer([l_pat_sys_ss_mu, l_pat_sys_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=100.)
l_systole = layers.MuSigmaErfLayer(l_pat_sys_aggr_mu_sigma)
# Diastole
l_pat_dia_ss_mu = nn.layers.ReshapeLayer(l_dia_mu, (-1, nr_slices))
l_pat_dia_ss_sigma = nn.layers.ReshapeLayer(l_dia_sigma, (-1, nr_slices))
l_pat_dia_aggr_mu_sigma = layers.JeroenLayer([l_pat_dia_ss_mu, l_pat_dia_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=100.)
l_diastole = layers.MuSigmaErfLayer(l_pat_dia_aggr_mu_sigma)
submodels = [submodel]
return {
"inputs":{
"sliced:data:sax": l0,
"sliced:data:sax:is_not_padded": lin_slice_mask,
"sliced:data:sax:locations": lin_slice_locations,
},
"outputs": {
"systole": l_systole,
"diastole": l_diastole,
},
"regularizable": dict(
{},
**{
k: v
for d in [model["regularizable"] for model in submodels if "regularizable" in model]
for k, v in d.items() }
),
"pretrained":{
je_ss_jonisc80_leaky_convroll_augzoombright.__name__: submodel["outputs"],
}
}
| 31.710084
| 149
| 0.719226
|
5404df7483cdb1fd68465f363f8dd5f9b3da5e6f
| 7,522
|
py
|
Python
|
linkerd/datadog_checks/linkerd/config_models/defaults.py
|
kjmadscience/integrations-core
|
663bdf44730dd6c9f3565c121318b320bfcb4988
|
[
"BSD-3-Clause"
] | null | null | null |
linkerd/datadog_checks/linkerd/config_models/defaults.py
|
kjmadscience/integrations-core
|
663bdf44730dd6c9f3565c121318b320bfcb4988
|
[
"BSD-3-Clause"
] | null | null | null |
linkerd/datadog_checks/linkerd/config_models/defaults.py
|
kjmadscience/integrations-core
|
663bdf44730dd6c9f3565c121318b320bfcb4988
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# This file is autogenerated.
# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:
# ddev -x validate config -s <INTEGRATION_NAME>
# ddev -x validate models -s <INTEGRATION_NAME>
from datadog_checks.base.utils.models.fields import get_default_field_value
def shared_proxy(field, value):
return get_default_field_value(field, value)
def shared_service(field, value):
return get_default_field_value(field, value)
def shared_skip_proxy(field, value):
return False
def shared_timeout(field, value):
return 10
def instance_allow_redirects(field, value):
return True
def instance_auth_token(field, value):
return get_default_field_value(field, value)
def instance_auth_type(field, value):
return 'basic'
def instance_aws_host(field, value):
return get_default_field_value(field, value)
def instance_aws_region(field, value):
return get_default_field_value(field, value)
def instance_aws_service(field, value):
return get_default_field_value(field, value)
def instance_bearer_token_auth(field, value):
return get_default_field_value(field, value)
def instance_bearer_token_path(field, value):
return get_default_field_value(field, value)
def instance_cache_metric_wildcards(field, value):
return True
def instance_cache_shared_labels(field, value):
return True
def instance_collect_counters_with_distributions(field, value):
return False
def instance_collect_histogram_buckets(field, value):
return True
def instance_connect_timeout(field, value):
return get_default_field_value(field, value)
def instance_disable_generic_tags(field, value):
return False
def instance_empty_default_hostname(field, value):
return False
def instance_enable_health_service_check(field, value):
return True
def instance_exclude_labels(field, value):
return get_default_field_value(field, value)
def instance_exclude_metrics(field, value):
return get_default_field_value(field, value)
def instance_exclude_metrics_by_labels(field, value):
return get_default_field_value(field, value)
def instance_extra_headers(field, value):
return get_default_field_value(field, value)
def instance_extra_metrics(field, value):
return get_default_field_value(field, value)
def instance_headers(field, value):
return get_default_field_value(field, value)
def instance_health_service_check(field, value):
return True
def instance_histogram_buckets_as_distributions(field, value):
return False
def instance_hostname_format(field, value):
return get_default_field_value(field, value)
def instance_hostname_label(field, value):
return get_default_field_value(field, value)
def instance_ignore_metrics(field, value):
return get_default_field_value(field, value)
def instance_ignore_metrics_by_labels(field, value):
return get_default_field_value(field, value)
def instance_ignore_tags(field, value):
return get_default_field_value(field, value)
def instance_include_labels(field, value):
return get_default_field_value(field, value)
def instance_kerberos_auth(field, value):
return 'disabled'
def instance_kerberos_cache(field, value):
return get_default_field_value(field, value)
def instance_kerberos_delegate(field, value):
return False
def instance_kerberos_force_initiate(field, value):
return False
def instance_kerberos_hostname(field, value):
return get_default_field_value(field, value)
def instance_kerberos_keytab(field, value):
return get_default_field_value(field, value)
def instance_kerberos_principal(field, value):
return get_default_field_value(field, value)
def instance_label_joins(field, value):
return get_default_field_value(field, value)
def instance_label_to_hostname(field, value):
return get_default_field_value(field, value)
def instance_labels_mapper(field, value):
return get_default_field_value(field, value)
def instance_log_requests(field, value):
return False
def instance_metric_patterns(field, value):
return get_default_field_value(field, value)
def instance_metrics(field, value):
return get_default_field_value(field, value)
def instance_min_collection_interval(field, value):
return 15
def instance_namespace(field, value):
return get_default_field_value(field, value)
def instance_non_cumulative_histogram_buckets(field, value):
return False
def instance_ntlm_domain(field, value):
return get_default_field_value(field, value)
def instance_openmetrics_endpoint(field, value):
return 'http://localhost:9990/admin/metrics/prometheus'
def instance_password(field, value):
return get_default_field_value(field, value)
def instance_persist_connections(field, value):
return False
def instance_prometheus_metrics_prefix(field, value):
return get_default_field_value(field, value)
def instance_prometheus_url(field, value):
return get_default_field_value(field, value)
def instance_proxy(field, value):
return get_default_field_value(field, value)
def instance_raw_line_filters(field, value):
return get_default_field_value(field, value)
def instance_raw_metric_prefix(field, value):
return get_default_field_value(field, value)
def instance_read_timeout(field, value):
return get_default_field_value(field, value)
def instance_rename_labels(field, value):
return get_default_field_value(field, value)
def instance_request_size(field, value):
return 16
def instance_send_distribution_buckets(field, value):
return False
def instance_send_distribution_counts_as_monotonic(field, value):
return False
def instance_send_distribution_sums_as_monotonic(field, value):
return False
def instance_send_histograms_buckets(field, value):
return True
def instance_send_monotonic_counter(field, value):
return True
def instance_send_monotonic_with_gauge(field, value):
return False
def instance_service(field, value):
return get_default_field_value(field, value)
def instance_share_labels(field, value):
return get_default_field_value(field, value)
def instance_skip_proxy(field, value):
return False
def instance_tags(field, value):
return get_default_field_value(field, value)
def instance_telemetry(field, value):
return False
def instance_timeout(field, value):
return 10
def instance_tls_ca_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_ignore_warning(field, value):
return False
def instance_tls_private_key(field, value):
return get_default_field_value(field, value)
def instance_tls_protocols_allowed(field, value):
return get_default_field_value(field, value)
def instance_tls_use_host_header(field, value):
return False
def instance_tls_verify(field, value):
return True
def instance_type_overrides(field, value):
return get_default_field_value(field, value)
def instance_use_latest_spec(field, value):
return False
def instance_use_legacy_auth_encoding(field, value):
return True
def instance_use_process_start_time(field, value):
return False
def instance_username(field, value):
return get_default_field_value(field, value)
| 21.188732
| 105
| 0.787557
|
b002b603e75ff12d580d83a9b627836c60d2d8d8
| 9,643
|
py
|
Python
|
tests/test_context.py
|
nascimento/coveragepy
|
9adb80c9612d3d364362bd3b6551532fe9ec7d5f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_context.py
|
nascimento/coveragepy
|
9adb80c9612d3d364362bd3b6551532fe9ec7d5f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_context.py
|
nascimento/coveragepy
|
9adb80c9612d3d364362bd3b6551532fe9ec7d5f
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for context support."""
import inspect
import os.path
import coverage
from coverage import env
from coverage.context import qualname_from_frame
from coverage.data import CoverageData
from tests.coveragetest import CoverageTest
class StaticContextTest(CoverageTest):
"""Tests of the static context."""
def test_no_context(self):
self.make_file("main.py", "a = 1")
cov = coverage.Coverage()
self.start_import_stop(cov, "main")
data = cov.get_data()
self.assertCountEqual(data.measured_contexts(), [""])
def test_static_context(self):
self.make_file("main.py", "a = 1")
cov = coverage.Coverage(context="gooey")
self.start_import_stop(cov, "main")
data = cov.get_data()
self.assertCountEqual(data.measured_contexts(), ["gooey"])
SOURCE = """\
a = 1
if a > 2:
a = 3
assert a == 1
"""
LINES = [1, 2, 4]
ARCS = [(-1, 1), (1, 2), (2, 4), (4, -1)]
def run_red_blue(self, **options):
"""Run red.py and blue.py, and return their CoverageData objects."""
self.make_file("red.py", self.SOURCE)
red_cov = coverage.Coverage(context="red", data_suffix="r", source=["."], **options)
self.start_import_stop(red_cov, "red")
red_cov.save()
red_data = red_cov.get_data()
self.make_file("blue.py", self.SOURCE)
blue_cov = coverage.Coverage(context="blue", data_suffix="b", source=["."], **options)
self.start_import_stop(blue_cov, "blue")
blue_cov.save()
blue_data = blue_cov.get_data()
return red_data, blue_data
def test_combining_line_contexts(self):
red_data, blue_data = self.run_red_blue()
for datas in [[red_data, blue_data], [blue_data, red_data]]:
combined = CoverageData(suffix="combined")
for data in datas:
combined.update(data)
self.assertEqual(combined.measured_contexts(), {'red', 'blue'})
full_names = {os.path.basename(f): f for f in combined.measured_files()}
self.assertCountEqual(full_names, ['red.py', 'blue.py'])
fred = full_names['red.py']
fblue = full_names['blue.py']
def assert_combined_lines(filename, context, lines):
# pylint: disable=cell-var-from-loop
combined.set_query_context(context)
self.assertEqual(combined.lines(filename), lines)
assert_combined_lines(fred, 'red', self.LINES)
assert_combined_lines(fred, 'blue', [])
assert_combined_lines(fblue, 'red', [])
assert_combined_lines(fblue, 'blue', self.LINES)
def test_combining_arc_contexts(self):
red_data, blue_data = self.run_red_blue(branch=True)
for datas in [[red_data, blue_data], [blue_data, red_data]]:
combined = CoverageData(suffix="combined")
for data in datas:
combined.update(data)
self.assertEqual(combined.measured_contexts(), {'red', 'blue'})
full_names = {os.path.basename(f): f for f in combined.measured_files()}
self.assertCountEqual(full_names, ['red.py', 'blue.py'])
fred = full_names['red.py']
fblue = full_names['blue.py']
def assert_combined_lines(filename, context, lines):
# pylint: disable=cell-var-from-loop
combined.set_query_context(context)
self.assertEqual(combined.lines(filename), lines)
assert_combined_lines(fred, 'red', self.LINES)
assert_combined_lines(fred, 'blue', [])
assert_combined_lines(fblue, 'red', [])
assert_combined_lines(fblue, 'blue', self.LINES)
def assert_combined_arcs(filename, context, lines):
# pylint: disable=cell-var-from-loop
combined.set_query_context(context)
self.assertEqual(combined.arcs(filename), lines)
assert_combined_arcs(fred, 'red', self.ARCS)
assert_combined_arcs(fred, 'blue', [])
assert_combined_arcs(fblue, 'red', [])
assert_combined_arcs(fblue, 'blue', self.ARCS)
class DynamicContextTest(CoverageTest):
"""Tests of dynamically changing contexts."""
SOURCE = """\
def helper(lineno):
x = 2
def test_one():
a = 5
helper(6)
def test_two():
a = 9
b = 10
if a > 11:
b = 12
assert a == (13-4)
assert b == (14-4)
helper(15)
test_one()
x = 18
helper(19)
test_two()
"""
OUTER_LINES = [1, 4, 8, 17, 18, 19, 2, 20]
TEST_ONE_LINES = [5, 6, 2]
TEST_TWO_LINES = [9, 10, 11, 13, 14, 15, 2]
def test_dynamic_alone(self):
self.make_file("two_tests.py", self.SOURCE)
cov = coverage.Coverage(source=["."])
cov.set_option("run:dynamic_context", "test_function")
self.start_import_stop(cov, "two_tests")
data = cov.get_data()
full_names = {os.path.basename(f): f for f in data.measured_files()}
fname = full_names["two_tests.py"]
self.assertCountEqual(
data.measured_contexts(),
["", "two_tests.test_one", "two_tests.test_two"])
def assert_context_lines(context, lines):
data.set_query_context(context)
self.assertCountEqual(lines, data.lines(fname))
assert_context_lines("", self.OUTER_LINES)
assert_context_lines("two_tests.test_one", self.TEST_ONE_LINES)
assert_context_lines("two_tests.test_two", self.TEST_TWO_LINES)
def test_static_and_dynamic(self):
self.make_file("two_tests.py", self.SOURCE)
cov = coverage.Coverage(context="stat", source=["."])
cov.set_option("run:dynamic_context", "test_function")
self.start_import_stop(cov, "two_tests")
data = cov.get_data()
full_names = {os.path.basename(f): f for f in data.measured_files()}
fname = full_names["two_tests.py"]
self.assertCountEqual(
data.measured_contexts(),
["stat", "stat|two_tests.test_one", "stat|two_tests.test_two"])
def assert_context_lines(context, lines):
data.set_query_context(context)
self.assertCountEqual(lines, data.lines(fname))
assert_context_lines("stat", self.OUTER_LINES)
assert_context_lines("stat|two_tests.test_one", self.TEST_ONE_LINES)
assert_context_lines("stat|two_tests.test_two", self.TEST_TWO_LINES)
def get_qualname():
"""Helper to return qualname_from_frame for the caller."""
stack = inspect.stack()[1:]
if any(sinfo[0].f_code.co_name == "get_qualname" for sinfo in stack):
# We're calling outselves recursively, maybe because we're testing
# properties. Return an int to try to get back on track.
return 17
caller_frame = stack[0][0]
return qualname_from_frame(caller_frame)
# pylint: disable=missing-docstring, unused-argument
class Parent(object):
def meth(self):
return get_qualname()
@property
def a_property(self):
return get_qualname()
class Child(Parent):
pass
class SomethingElse(object):
pass
class MultiChild(SomethingElse, Child):
pass
def no_arguments():
return get_qualname()
def plain_old_function(a, b):
return get_qualname()
def fake_out(self):
return get_qualname()
def patch_meth(self):
return get_qualname()
class OldStyle:
def meth(self):
return get_qualname()
class OldChild(OldStyle):
pass
# pylint: enable=missing-docstring, unused-argument
class QualnameTest(CoverageTest):
"""Tests of qualname_from_frame."""
# Pylint gets confused about meth() below.
# pylint: disable=no-value-for-parameter
run_in_temp_dir = False
def test_method(self):
self.assertEqual(Parent().meth(), "tests.test_context.Parent.meth")
def test_inherited_method(self):
self.assertEqual(Child().meth(), "tests.test_context.Parent.meth")
def test_mi_inherited_method(self):
self.assertEqual(MultiChild().meth(), "tests.test_context.Parent.meth")
def test_no_arguments(self):
self.assertEqual(no_arguments(), "tests.test_context.no_arguments")
def test_plain_old_function(self):
self.assertEqual(
plain_old_function(0, 1), "tests.test_context.plain_old_function")
def test_fake_out(self):
self.assertEqual(fake_out(0), "tests.test_context.fake_out")
def test_property(self):
self.assertEqual(
Parent().a_property, "tests.test_context.Parent.a_property")
def test_changeling(self):
c = Child()
c.meth = patch_meth
self.assertEqual(c.meth(c), "tests.test_context.patch_meth")
def test_oldstyle(self):
if not env.PY2:
self.skipTest("Old-style classes are only in Python 2")
self.assertEqual(OldStyle().meth(), "tests.test_context.OldStyle.meth")
self.assertEqual(OldChild().meth(), "tests.test_context.OldStyle.meth")
def test_bug_829(self):
# A class with a name like a function shouldn't confuse qualname_from_frame.
class test_something(object): # pylint: disable=unused-variable
self.assertEqual(get_qualname(), None)
| 33.137457
| 94
| 0.627398
|
8ff649882857b45ea87ac3dc54b188abd01262bf
| 3,437
|
py
|
Python
|
src/tests/test_comms_models.py
|
reddcoin-project/ReddConnect
|
5c212683de6b80b81fd15ed05239c3a1b46c3afd
|
[
"BSD-3-Clause"
] | 5
|
2015-01-30T08:47:59.000Z
|
2022-01-22T19:27:03.000Z
|
src/tests/test_comms_models.py
|
reddcoin-project/ReddConnect
|
5c212683de6b80b81fd15ed05239c3a1b46c3afd
|
[
"BSD-3-Clause"
] | 2
|
2017-12-28T21:36:48.000Z
|
2017-12-28T21:36:57.000Z
|
src/tests/test_comms_models.py
|
reddcoin-project/ReddConnect
|
5c212683de6b80b81fd15ed05239c3a1b46c3afd
|
[
"BSD-3-Clause"
] | 1
|
2019-01-05T15:51:37.000Z
|
2019-01-05T15:51:37.000Z
|
import unittest
class TestMsg(unittest.TestCase):
def test___init__(self):
# msg = Msg(*args, **kwargs)
assert True # TODO: implement your test here
def test___str__(self):
# msg = Msg(*args, **kwargs)
# self.assertEqual(expected, msg.__str__())
assert True # TODO: implement your test here
def test_remove_receiver(self):
# msg = Msg(*args, **kwargs)
# self.assertEqual(expected, msg.remove_receiver(obj))
assert True # TODO: implement your test here
def test_remove_sender(self):
# msg = Msg(*args, **kwargs)
# self.assertEqual(expected, msg.remove_sender(value))
assert True # TODO: implement your test here
class TestTempMsg(unittest.TestCase):
def test___init__(self):
# temp_msg = TempMsg(senders, receivers, channels, message, header, type, lockstring, hide_from)
assert True # TODO: implement your test here
def test___str__(self):
# temp_msg = TempMsg(senders, receivers, channels, message, header, type, lockstring, hide_from)
# self.assertEqual(expected, temp_msg.__str__())
assert True # TODO: implement your test here
def test_access(self):
# temp_msg = TempMsg(senders, receivers, channels, message, header, type, lockstring, hide_from)
# self.assertEqual(expected, temp_msg.access(accessing_obj, access_type, default))
assert True # TODO: implement your test here
def test_remove_receiver(self):
# temp_msg = TempMsg(senders, receivers, channels, message, header, type, lockstring, hide_from)
# self.assertEqual(expected, temp_msg.remove_receiver(obj))
assert True # TODO: implement your test here
def test_remove_sender(self):
# temp_msg = TempMsg(senders, receivers, channels, message, header, type, lockstring, hide_from)
# self.assertEqual(expected, temp_msg.remove_sender(obj))
assert True # TODO: implement your test here
class TestChannelDB(unittest.TestCase):
def test___init__(self):
# channel_d_b = ChannelDB(*args, **kwargs)
assert True # TODO: implement your test here
def test___str__(self):
# channel_d_b = ChannelDB(*args, **kwargs)
# self.assertEqual(expected, channel_d_b.__str__())
assert True # TODO: implement your test here
def test_access(self):
# channel_d_b = ChannelDB(*args, **kwargs)
# self.assertEqual(expected, channel_d_b.access(accessing_obj, access_type, default))
assert True # TODO: implement your test here
def test_connect(self):
# channel_d_b = ChannelDB(*args, **kwargs)
# self.assertEqual(expected, channel_d_b.connect(player))
assert True # TODO: implement your test here
def test_delete(self):
# channel_d_b = ChannelDB(*args, **kwargs)
# self.assertEqual(expected, channel_d_b.delete())
assert True # TODO: implement your test here
def test_disconnect(self):
# channel_d_b = ChannelDB(*args, **kwargs)
# self.assertEqual(expected, channel_d_b.disconnect(player))
assert True # TODO: implement your test here
def test_has_connection(self):
# channel_d_b = ChannelDB(*args, **kwargs)
# self.assertEqual(expected, channel_d_b.has_connection(player))
assert True # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
| 40.435294
| 104
| 0.673553
|
82c15fcc29aeec0d0dd65aeebe69c59d3838271d
| 277
|
py
|
Python
|
interfax/__init__.py
|
ricky-shake-n-bake-bobby/interfax-python
|
63c282672b6555f745d971988441af44b133468d
|
[
"MIT"
] | null | null | null |
interfax/__init__.py
|
ricky-shake-n-bake-bobby/interfax-python
|
63c282672b6555f745d971988441af44b133468d
|
[
"MIT"
] | null | null | null |
interfax/__init__.py
|
ricky-shake-n-bake-bobby/interfax-python
|
63c282672b6555f745d971988441af44b133468d
|
[
"MIT"
] | null | null | null |
from .response import InboundFax, OutboundFax, ForwardingEmail, Document, Image
from .files import File
__version__ = '1.0.5'
from .client import InterFAX # NOQA
__all__ = ('InterFAX', 'InboundFax', 'OutboundFax', 'ForwardingEmail',
'Document', 'Image', 'File')
| 27.7
| 79
| 0.707581
|
974ee5d30804ed3970c84f42090129127547e5f4
| 1,150
|
py
|
Python
|
reviewboard/extensions/testing/testcases.py
|
amalik2/reviewboard
|
676aa2dce38ce619a74f2d4cb3cfae9bce21416e
|
[
"MIT"
] | 921
|
2015-01-01T15:26:28.000Z
|
2022-03-29T11:30:38.000Z
|
reviewboard/extensions/testing/testcases.py
|
amalik2/reviewboard
|
676aa2dce38ce619a74f2d4cb3cfae9bce21416e
|
[
"MIT"
] | 5
|
2015-03-17T18:57:47.000Z
|
2020-10-02T13:24:31.000Z
|
reviewboard/extensions/testing/testcases.py
|
amalik2/reviewboard
|
676aa2dce38ce619a74f2d4cb3cfae9bce21416e
|
[
"MIT"
] | 285
|
2015-01-12T06:24:36.000Z
|
2022-03-29T11:03:50.000Z
|
"""Base test case support for extension unit tests."""
from __future__ import unicode_literals
from djblets.extensions.testing import ExtensionTestCaseMixin
from reviewboard.extensions.base import get_extension_manager
from reviewboard.testing import TestCase
class ExtensionTestCase(ExtensionTestCaseMixin, TestCase):
"""Base class for Review Board extension unit tests.
Extension authors can subclass this to help write unit tests for their
extensions, ensuring their functionality works as expected.
See :ref:`testing-extensions` for information on how to write unit tests
for extensions, and
:py:class:`~djblets.extensions.testing.testcases.ExtensionTestCaseMixin`
for the details on how this class works.
"""
def get_extension_manager(self):
"""Return the extension manager used for these extensions.
Subclasses don't need to override this unless they're doing something
highly specialized.
Returns:
djblets.extensions.manager.ExtensionManager:
The extension manager used for the unit tests.
"""
return get_extension_manager()
| 33.823529
| 77
| 0.745217
|
e7e5d906727d7f2ad28b7f5cc44bf562fb2432f7
| 1,967
|
py
|
Python
|
openGaussBase/testcase/TOOLS/SERVER_TOOLS/gs_check/Opengauss_Function_Tools_gs_check_Case0234.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/TOOLS/SERVER_TOOLS/gs_check/Opengauss_Function_Tools_gs_check_Case0234.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/TOOLS/SERVER_TOOLS/gs_check/Opengauss_Function_Tools_gs_check_Case0234.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : ๆๅก็ซฏๅทฅๅ
ท
Case Name : ็จๆทๆฃๆฅไบคๆขๅ
ๅญ๏ผๅๆฐiๅๅๆฐU็ปๅๆต่ฏ๏ผ
Description :
่ฟ่กopenGauss็็จๆทๆฃๆฅๆๅฎๆฃๆฅ้กน๏ผ gs_check -i CheckSwapMemory -U omm
Expect :
ๆฃๆฅๅฎๆ
History :
"""
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
logger = Logger()
class Tools(unittest.TestCase):
def setUp(self):
logger.info('--------------Opengauss_Function_Tools_gs_check_Case0234start-------------------')
self.dbuserNode = Node('dbuser')
self.rootNode = Node('default')
self.Constant = Constant()
def test_server_tools(self):
logger.info('------------------่ฟ่กopenGauss็็จๆทๆฃๆฅไบคๆขๅ
ๅญ------------------')
check_cmd1 = f'''
source {macro.DB_ENV_PATH}
gs_check -i CheckSwapMemory -U {self.dbuserNode.ssh_user}
'''
logger.info(check_cmd1)
msg1 = self.dbuserNode.sh(check_cmd1).result()
logger.info(msg1)
flag = (self.Constant.GS_CHECK_SUCCESS_MSG2[0] in msg1 or self.Constant.GS_CHECK_SUCCESS_MSG2[1] in msg1) and \
self.Constant.GS_CHECK_SUCCESS_MSG2[2] in msg1
self.assertTrue(flag)
def tearDown(self):
logger.info('--------------ๆ ้ๆธ
็็ฏๅข-------------------')
logger.info('------------------Opengauss_Function_Tools_gs_check_Case0234finish------------------')
| 35.125
| 119
| 0.638027
|
997da9e542fdace66007708c59806f1b5c1a8fd3
| 44,301
|
py
|
Python
|
sklearn/manifold/tests/test_t_sne.py
|
henrytseng/scikit-learn
|
75c58c39eb9eed15ba6c96c52b6adcd95f045294
|
[
"BSD-3-Clause"
] | null | null | null |
sklearn/manifold/tests/test_t_sne.py
|
henrytseng/scikit-learn
|
75c58c39eb9eed15ba6c96c52b6adcd95f045294
|
[
"BSD-3-Clause"
] | 1
|
2022-03-09T19:32:57.000Z
|
2022-03-09T20:23:30.000Z
|
sklearn/manifold/tests/test_t_sne.py
|
henrytseng/scikit-learn
|
75c58c39eb9eed15ba6c96c52b6adcd95f045294
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
from io import StringIO
import numpy as np
from numpy.testing import assert_allclose
import scipy.sparse as sp
import pytest
import warnings
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import kneighbors_graph
from sklearn.exceptions import EfficiencyWarning
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import skip_if_32bit
from sklearn.utils import check_random_state
from sklearn.manifold._t_sne import _joint_probabilities
from sklearn.manifold._t_sne import _joint_probabilities_nn
from sklearn.manifold._t_sne import _kl_divergence
from sklearn.manifold._t_sne import _kl_divergence_bh
from sklearn.manifold._t_sne import _gradient_descent
from sklearn.manifold._t_sne import trustworthiness
from sklearn.manifold import TSNE
# mypy error: Module 'sklearn.manifold' has no attribute '_barnes_hut_tsne'
from sklearn.manifold import _barnes_hut_tsne # type: ignore
from sklearn.manifold._utils import _binary_search_perplexity
from sklearn.datasets import make_blobs
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import cosine_distances
x = np.linspace(0, 1, 10)
xx, yy = np.meshgrid(x, x)
X_2d_grid = np.hstack(
[
xx.ravel().reshape(-1, 1),
yy.ravel().reshape(-1, 1),
]
)
pytestmark = pytest.mark.filterwarnings(
"ignore:The PCA initialization in TSNE will change to have the standard deviation",
)
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _, compute_error=True):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_, compute_error=True):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(),
np.zeros(1),
0,
n_iter=100,
n_iter_without_progress=100,
momentum=0.0,
learning_rate=0.0,
min_gain=0.0,
min_grad_norm=1e-5,
verbose=2,
)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert error == 1.0
assert it == 0
assert "gradient norm" in out
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function,
np.zeros(1),
0,
n_iter=100,
n_iter_without_progress=10,
momentum=0.0,
learning_rate=0.0,
min_gain=0.0,
min_grad_norm=0.0,
verbose=2,
)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert error == 0.0
assert it == 11
assert "did not make any progress" in out
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(),
np.zeros(1),
0,
n_iter=11,
n_iter_without_progress=100,
momentum=0.0,
learning_rate=0.0,
min_gain=0.0,
min_grad_norm=0.0,
verbose=2,
)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert error == 0.0
assert it == 10
assert "Iteration 10" in out
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
data = random_state.randn(50, 5)
distances = pairwise_distances(data).astype(np.float32)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean(
[np.exp(-np.sum(P[i] * np.log(P[i]))) for i in range(P.shape[0])]
)
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_underflow():
# Test if the binary search finds Gaussians with desired perplexity.
# A more challenging case than the one above, producing numeric
# underflow in float precision (see issue #19471 and PR #19472).
random_state = check_random_state(42)
data = random_state.randn(1, 90).astype(np.float32) + 100
desired_perplexity = 30.0
P = _binary_search_perplexity(data, desired_perplexity, verbose=0)
perplexity = 2 ** -np.nansum(P[0, 1:] * np.log2(P[0, 1:]))
assert_almost_equal(perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 200
desired_perplexity = 25.0
random_state = check_random_state(0)
data = random_state.randn(n_samples, 2).astype(np.float32, copy=False)
distances = pairwise_distances(data)
P1 = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
# Test that when we use all the neighbors the results are identical
n_neighbors = n_samples - 1
nn = NearestNeighbors().fit(data)
distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors, mode="distance")
distances_nn = distance_graph.data.astype(np.float32, copy=False)
distances_nn = distances_nn.reshape(n_samples, n_neighbors)
P2 = _binary_search_perplexity(distances_nn, desired_perplexity, verbose=0)
indptr = distance_graph.indptr
P1_nn = np.array(
[
P1[k, distance_graph.indices[indptr[k] : indptr[k + 1]]]
for k in range(n_samples)
]
)
assert_array_almost_equal(P1_nn, P2, decimal=4)
# Test that the highest P_ij are the same when fewer neighbors are used
for k in np.linspace(150, n_samples - 1, 5):
k = int(k)
topn = k * 10 # check the top 10 * k entries out of k * k entries
distance_graph = nn.kneighbors_graph(n_neighbors=k, mode="distance")
distances_nn = distance_graph.data.astype(np.float32, copy=False)
distances_nn = distances_nn.reshape(n_samples, k)
P2k = _binary_search_perplexity(distances_nn, desired_perplexity, verbose=0)
assert_array_almost_equal(P1_nn, P2, decimal=2)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
idx = np.argsort(P2k.ravel())[::-1]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
n_neighbors = 10
n_samples = 100
random_state = check_random_state(0)
data = random_state.randn(n_samples, 5)
nn = NearestNeighbors().fit(data)
distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors, mode="distance")
distances = distance_graph.data.astype(np.float32, copy=False)
distances = distances.reshape(n_samples, n_neighbors)
last_P = None
desired_perplexity = 3
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), desired_perplexity, verbose=0)
P1 = _joint_probabilities_nn(distance_graph, desired_perplexity, verbose=0)
# Convert the sparse matrix to a dense one for testing
P1 = P1.toarray()
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components).astype(np.float32)
P = _joint_probabilities(distances, desired_perplexity=25.0, verbose=0)
def fun(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[0]
def grad(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0, decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert trustworthiness(X, 5.0 + X / 10.0) == 1.0
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert trustworthiness(X, X_embedded) < 0.6
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_trustworthiness_n_neighbors_error():
"""Raise an error when n_neighbors >= n_samples / 2.
Non-regression test for #18567.
"""
regex = "n_neighbors .+ should be less than .+"
rng = np.random.RandomState(42)
X = rng.rand(7, 4)
X_embedded = rng.rand(7, 2)
with pytest.raises(ValueError, match=regex):
trustworthiness(X, X_embedded, n_neighbors=5)
trust = trustworthiness(X, X_embedded, n_neighbors=3)
assert 0 <= trust <= 1
@pytest.mark.parametrize("method", ["exact", "barnes_hut"])
@pytest.mark.parametrize("init", ("random", "pca"))
def test_preserve_trustworthiness_approximately(method, init):
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
n_components = 2
X = random_state.randn(50, n_components).astype(np.float32)
tsne = TSNE(
n_components=n_components,
init=init,
random_state=0,
method=method,
n_iter=700,
learning_rate="auto",
)
X_embedded = tsne.fit_transform(X)
t = trustworthiness(X, X_embedded, n_neighbors=1)
assert t > 0.85
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for n_iter in [250, 300, 350]:
tsne = TSNE(
n_components=2,
init="random",
perplexity=10,
learning_rate=100.0,
n_iter=n_iter,
random_state=0,
)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert kl_divergences[1] <= kl_divergences[0]
assert kl_divergences[2] <= kl_divergences[1]
@pytest.mark.parametrize("method", ["exact", "barnes_hut"])
def test_fit_csr_matrix(method):
# X can be a sparse matrix.
rng = check_random_state(0)
X = rng.randn(50, 2)
X[(rng.randint(0, 50, 25), rng.randint(0, 2, 25))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(
n_components=2,
init="random",
perplexity=10,
learning_rate=100.0,
random_state=0,
method=method,
n_iter=750,
)
X_embedded = tsne.fit_transform(X_csr)
assert_allclose(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0, rtol=1.1e-1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
for i in range(3):
X = random_state.randn(80, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(
n_components=2,
perplexity=2,
learning_rate=100.0,
early_exaggeration=2.0,
metric="precomputed",
random_state=i,
verbose=0,
n_iter=500,
init="random",
)
X_embedded = tsne.fit_transform(D)
t = trustworthiness(D, X_embedded, n_neighbors=1, metric="precomputed")
assert t > 0.95
def test_trustworthiness_not_euclidean_metric():
# Test trustworthiness with a metric different from 'euclidean' and
# 'precomputed'
random_state = check_random_state(0)
X = random_state.randn(100, 2)
assert trustworthiness(X, X, metric="cosine") == trustworthiness(
pairwise_distances(X, metric="cosine"), X, metric="precomputed"
)
@pytest.mark.filterwarnings("ignore:The default learning rate in TSNE")
@pytest.mark.filterwarnings("ignore:The default initialization in TSNE")
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99, perplexity=1)
with pytest.raises(ValueError, match="early_exaggeration .*"):
tsne.fit_transform(np.array([[0.0], [0.0]]))
@pytest.mark.filterwarnings("ignore:The default learning rate in TSNE")
@pytest.mark.filterwarnings("ignore:The default initialization in TSNE")
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199, perplexity=1)
with pytest.raises(ValueError, match="n_iter .*"):
tsne.fit_transform(np.array([[0.0], [0.0]]))
@pytest.mark.filterwarnings("ignore:The default learning rate in TSNE")
@pytest.mark.parametrize(
"method, retype",
[
("exact", np.asarray),
("barnes_hut", np.asarray),
("barnes_hut", sp.csr_matrix),
],
)
@pytest.mark.parametrize(
"D, message_regex",
[
([[0.0], [1.0]], ".* square distance matrix"),
([[0.0, -1.0], [1.0, 0.0]], ".* positive.*"),
],
)
def test_bad_precomputed_distances(method, D, retype, message_regex):
tsne = TSNE(
metric="precomputed",
method=method,
init="random",
random_state=42,
perplexity=1,
)
with pytest.raises(ValueError, match=message_regex):
tsne.fit_transform(retype(D))
@pytest.mark.filterwarnings("ignore:The default learning rate in TSNE")
def test_exact_no_precomputed_sparse():
tsne = TSNE(
metric="precomputed",
method="exact",
init="random",
random_state=42,
perplexity=1,
)
with pytest.raises(TypeError, match="sparse"):
tsne.fit_transform(sp.csr_matrix([[0, 5], [5, 0]]))
@pytest.mark.filterwarnings("ignore:The default learning rate in TSNE")
def test_high_perplexity_precomputed_sparse_distances():
# Perplexity should be less than 50
dist = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])
bad_dist = sp.csr_matrix(dist)
tsne = TSNE(metric="precomputed", init="random", random_state=42, perplexity=1)
msg = "3 neighbors per samples are required, but some samples have only 1"
with pytest.raises(ValueError, match=msg):
tsne.fit_transform(bad_dist)
@ignore_warnings(category=EfficiencyWarning)
def test_sparse_precomputed_distance():
"""Make sure that TSNE works identically for sparse and dense matrix"""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D_sparse = kneighbors_graph(X, n_neighbors=100, mode="distance", include_self=True)
D = pairwise_distances(X)
assert sp.issparse(D_sparse)
assert_almost_equal(D_sparse.A, D)
tsne = TSNE(
metric="precomputed", random_state=0, init="random", learning_rate="auto"
)
Xt_dense = tsne.fit_transform(D)
for fmt in ["csr", "lil"]:
Xt_sparse = tsne.fit_transform(D_sparse.asformat(fmt))
assert_almost_equal(Xt_dense, Xt_sparse)
@pytest.mark.filterwarnings("ignore:The default learning rate in TSNE")
@pytest.mark.filterwarnings("ignore:The default initialization in TSNE")
def test_non_positive_computed_distances():
# Computed distance matrices must be positive.
def metric(x, y):
return -1
# Negative computed distances should be caught even if result is squared
tsne = TSNE(metric=metric, method="exact", perplexity=1)
X = np.array([[0.0, 0.0], [1.0, 1.0]])
with pytest.raises(ValueError, match="All distances .*metric given.*"):
tsne.fit_transform(X)
@pytest.mark.filterwarnings("ignore:The default learning rate in TSNE")
def test_init_not_available():
# 'init' must be 'pca', 'random', or numpy array.
tsne = TSNE(init="not available", perplexity=1)
m = "'init' must be 'pca', 'random', or a numpy array"
with pytest.raises(ValueError, match=m):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_init_ndarray():
# Initialize TSNE with ndarray and test fit
tsne = TSNE(init=np.zeros((100, 2)), learning_rate="auto")
X_embedded = tsne.fit_transform(np.ones((100, 5)))
assert_array_equal(np.zeros((100, 2)), X_embedded)
def test_init_ndarray_precomputed():
# Initialize TSNE with ndarray and metric 'precomputed'
# Make sure no FutureWarning is thrown from _fit
tsne = TSNE(
init=np.zeros((100, 2)),
metric="precomputed",
learning_rate=50.0,
)
tsne.fit(np.zeros((100, 100)))
@pytest.mark.filterwarnings("ignore:The default learning rate in TSNE")
@pytest.mark.filterwarnings("ignore:The default initialization in TSNE")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available", method="exact", perplexity=1)
with pytest.raises(ValueError, match="Unknown metric not available.*"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
tsne = TSNE(metric="not available", method="barnes_hut", perplexity=1)
with pytest.raises(ValueError, match="Metric 'not available' not valid.*"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
@pytest.mark.filterwarnings("ignore:The default learning rate in TSNE")
@pytest.mark.filterwarnings("ignore:The default initialization in TSNE")
def test_method_not_available():
# 'nethod' must be 'barnes_hut' or 'exact'
tsne = TSNE(method="not available", perplexity=1)
with pytest.raises(ValueError, match="'method' must be 'barnes_hut' or "):
tsne.fit_transform(np.array([[0.0], [1.0]]))
@pytest.mark.filterwarnings("ignore:The default learning rate in TSNE")
@pytest.mark.filterwarnings("ignore:The default initialization in TSNE")
def test_angle_out_of_range_checks():
# check the angle parameter range
for angle in [-1, -1e-6, 1 + 1e-6, 2]:
tsne = TSNE(angle=angle, perplexity=1)
with pytest.raises(ValueError, match="'angle' must be between 0.0 - 1.0"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
@pytest.mark.filterwarnings("ignore:The default learning rate in TSNE")
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices cannot use PCA initialization.
tsne = TSNE(metric="precomputed", init="pca", perplexity=1)
with pytest.raises(
ValueError,
match='The parameter init="pca" cannot be used with metric="precomputed".',
):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_sparse_input():
# Sparse input matrices cannot use PCA initialization.
tsne = TSNE(init="pca", learning_rate=100.0, perplexity=1)
with pytest.raises(TypeError, match="PCA initialization.*"):
tsne.fit_transform(sp.csr_matrix([[0, 5], [5, 0]]))
@pytest.mark.filterwarnings("ignore:The default learning rate in TSNE")
@pytest.mark.filterwarnings("ignore:The default initialization in TSNE")
def test_n_components_range():
# barnes_hut method should only be used with n_components <= 3
tsne = TSNE(n_components=4, method="barnes_hut", perplexity=1)
with pytest.raises(ValueError, match="'n_components' should be .*"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_early_exaggeration_used():
# check that the ``early_exaggeration`` parameter has an effect
random_state = check_random_state(0)
n_components = 2
methods = ["exact", "barnes_hut"]
X = random_state.randn(25, n_components).astype(np.float32)
for method in methods:
tsne = TSNE(
n_components=n_components,
perplexity=1,
learning_rate=100.0,
init="pca",
random_state=0,
method=method,
early_exaggeration=1.0,
n_iter=250,
)
X_embedded1 = tsne.fit_transform(X)
tsne = TSNE(
n_components=n_components,
perplexity=1,
learning_rate=100.0,
init="pca",
random_state=0,
method=method,
early_exaggeration=10.0,
n_iter=250,
)
X_embedded2 = tsne.fit_transform(X)
assert not np.allclose(X_embedded1, X_embedded2)
def test_n_iter_used():
# check that the ``n_iter`` parameter has an effect
random_state = check_random_state(0)
n_components = 2
methods = ["exact", "barnes_hut"]
X = random_state.randn(25, n_components).astype(np.float32)
for method in methods:
for n_iter in [251, 500]:
tsne = TSNE(
n_components=n_components,
perplexity=1,
learning_rate=0.5,
init="random",
random_state=0,
method=method,
early_exaggeration=1.0,
n_iter=n_iter,
)
tsne.fit_transform(X)
assert tsne.n_iter_ == n_iter - 1
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array(
[[-4.961291e-05, -1.072243e-04], [9.259460e-05, 2.702024e-04]]
)
neighbors = np.array([[1], [0]])
grad_output = np.array(
[[-2.37012478e-05, -6.29044398e-05], [2.37012478e-05, 6.29044398e-05]]
)
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0], [5.0, 2.0], [7.3, 2.2]])
pos_output = np.array(
[
[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05],
]
)
neighbors = np.array([[1, 2, 3], [0, 2, 3], [1, 0, 3], [1, 2, 0]])
grad_output = np.array(
[
[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09],
]
)
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0], [5.0, 2.0], [7.3, 2.2]])
pos_output = np.array(
[
[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05],
]
)
neighbors = np.array([[1, 2, 3], [0, 2, 3], [1, 0, 3], [1, 2, 0]])
grad_output = np.array(
[
[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09],
]
)
_run_answer_test(pos_input, pos_output, neighbors, grad_output, False, 0.1, 2)
def _run_answer_test(
pos_input,
pos_output,
neighbors,
grad_output,
verbose=False,
perplexity=0.1,
skip_num_points=0,
):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64, copy=False)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
from scipy.sparse import csr_matrix
P = csr_matrix(pij_input)
neighbors = P.indices.astype(np.int64)
indptr = P.indptr.astype(np.int64)
_barnes_hut_tsne.gradient(
P.data, pos_output, neighbors, indptr, grad_bh, 0.5, 2, 1, skip_num_points=0
)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
@pytest.mark.filterwarnings("ignore:The default learning rate in TSNE")
@pytest.mark.filterwarnings("ignore:The default initialization in TSNE")
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2, perplexity=4)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert "[t-SNE]" in out
assert "nearest neighbors..." in out
assert "Computed conditional probabilities" in out
assert "Mean sigma" in out
assert "early exaggeration" in out
@pytest.mark.filterwarnings("ignore:The default learning rate in TSNE")
@pytest.mark.filterwarnings("ignore:The default initialization in TSNE")
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev", perplexity=4)
X = random_state.randn(5, 2)
tsne.fit_transform(X)
@pytest.mark.filterwarnings("ignore:The default learning rate in TSNE")
@pytest.mark.filterwarnings("ignore:The default initialization in TSNE")
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1, perplexity=4)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert np.all(np.isfinite(X_embedded))
@pytest.mark.parametrize("method", ["barnes_hut", "exact"])
@pytest.mark.parametrize("dt", [np.float32, np.float64])
def test_64bit(method, dt):
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
X = random_state.randn(10, 2).astype(dt, copy=False)
tsne = TSNE(
n_components=2,
perplexity=2,
learning_rate=100.0,
random_state=0,
method=method,
verbose=0,
n_iter=300,
init="random",
)
X_embedded = tsne.fit_transform(X)
effective_type = X_embedded.dtype
# tsne cython code is only single precision, so the output will
# always be single precision, irrespectively of the input dtype
assert effective_type == np.float32
@pytest.mark.parametrize("method", ["barnes_hut", "exact"])
def test_kl_divergence_not_nan(method):
# Ensure kl_divergence_ is computed at last iteration
# even though n_iter % n_iter_check != 0, i.e. 1003 % 50 != 0
random_state = check_random_state(0)
X = random_state.randn(50, 2)
tsne = TSNE(
n_components=2,
perplexity=2,
learning_rate=100.0,
random_state=0,
method=method,
verbose=0,
n_iter=503,
init="random",
)
tsne.fit_transform(X)
assert not np.isnan(tsne.kl_divergence_)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
data = random_state.randn(n_samples, n_features)
distances = pairwise_distances(data)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, verbose=0)
kl_exact, grad_exact = _kl_divergence(
params, P, degrees_of_freedom, n_samples, n_components
)
n_neighbors = n_samples - 1
distances_csr = (
NearestNeighbors()
.fit(data)
.kneighbors_graph(n_neighbors=n_neighbors, mode="distance")
)
P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0)
kl_bh, grad_bh = _kl_divergence_bh(
params,
P_bh,
degrees_of_freedom,
n_samples,
n_components,
angle=angle,
skip_num_points=0,
verbose=0,
)
P = squareform(P)
P_bh = P_bh.toarray()
assert_array_almost_equal(P_bh, P, decimal=5)
assert_almost_equal(kl_exact, kl_bh, decimal=3)
@skip_if_32bit
def test_n_iter_without_progress():
# Use a dummy negative n_iter_without_progress and check output on stdout
random_state = check_random_state(0)
X = random_state.randn(100, 10)
for method in ["barnes_hut", "exact"]:
tsne = TSNE(
n_iter_without_progress=-1,
verbose=2,
learning_rate=1e8,
random_state=0,
method=method,
n_iter=351,
init="random",
)
tsne._N_ITER_CHECK = 1
tsne._EXPLORATION_N_ITER = 0
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the value of n_iter_without_progress
assert "did not make any progress during the last -1 episodes. Finished." in out
@pytest.mark.filterwarnings("ignore:The default learning rate in TSNE")
@pytest.mark.filterwarnings("ignore:The default initialization in TSNE")
def test_min_grad_norm():
# Make sure that the parameter min_grad_norm is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
min_grad_norm = 0.002
tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2, random_state=0, method="exact")
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
lines_out = out.split("\n")
# extract the gradient norm from the verbose output
gradient_norm_values = []
for line in lines_out:
# When the computation is Finished just an old gradient norm value
# is repeated that we do not need to store
if "Finished" in line:
break
start_grad_norm = line.find("gradient norm")
if start_grad_norm >= 0:
line = line[start_grad_norm:]
line = line.replace("gradient norm = ", "").split(" ")[0]
gradient_norm_values.append(float(line))
# Compute how often the gradient norm is smaller than min_grad_norm
gradient_norm_values = np.array(gradient_norm_values)
n_smaller_gradient_norms = len(
gradient_norm_values[gradient_norm_values <= min_grad_norm]
)
# The gradient norm can be smaller than min_grad_norm at most once,
# because in the moment it becomes smaller the optimization stops
assert n_smaller_gradient_norms <= 1
@pytest.mark.filterwarnings("ignore:The default learning rate in TSNE")
@pytest.mark.filterwarnings("ignore:The default initialization in TSNE")
def test_accessible_kl_divergence():
# Ensures that the accessible kl_divergence matches the computed value
random_state = check_random_state(0)
X = random_state.randn(50, 2)
tsne = TSNE(
n_iter_without_progress=2, verbose=2, random_state=0, method="exact", n_iter=500
)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the accessible kl_divergence as the error at
# the last iteration
for line in out.split("\n")[::-1]:
if "Iteration" in line:
_, _, error = line.partition("error = ")
if error:
error, _, _ = error.partition(",")
break
assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5)
@pytest.mark.parametrize("method", ["barnes_hut", "exact"])
def test_uniform_grid(method):
"""Make sure that TSNE can approximately recover a uniform 2D grid
Due to ties in distances between point in X_2d_grid, this test is platform
dependent for ``method='barnes_hut'`` due to numerical imprecision.
Also, t-SNE is not assured to converge to the right solution because bad
initialization can lead to convergence to bad local minimum (the
optimization problem is non-convex). To avoid breaking the test too often,
we re-run t-SNE from the final point when the convergence is not good
enough.
"""
seeds = range(3)
n_iter = 500
for seed in seeds:
tsne = TSNE(
n_components=2,
init="random",
random_state=seed,
perplexity=50,
n_iter=n_iter,
method=method,
learning_rate="auto",
)
Y = tsne.fit_transform(X_2d_grid)
try_name = "{}_{}".format(method, seed)
try:
assert_uniform_grid(Y, try_name)
except AssertionError:
# If the test fails a first time, re-run with init=Y to see if
# this was caused by a bad initialization. Note that this will
# also run an early_exaggeration step.
try_name += ":rerun"
tsne.init = Y
Y = tsne.fit_transform(X_2d_grid)
assert_uniform_grid(Y, try_name)
def assert_uniform_grid(Y, try_name=None):
# Ensure that the resulting embedding leads to approximately
# uniformly spaced points: the distance to the closest neighbors
# should be non-zero and approximately constant.
nn = NearestNeighbors(n_neighbors=1).fit(Y)
dist_to_nn = nn.kneighbors(return_distance=True)[0].ravel()
assert dist_to_nn.min() > 0.1
smallest_to_mean = dist_to_nn.min() / np.mean(dist_to_nn)
largest_to_mean = dist_to_nn.max() / np.mean(dist_to_nn)
assert smallest_to_mean > 0.5, try_name
assert largest_to_mean < 2, try_name
def test_bh_match_exact():
# check that the ``barnes_hut`` method match the exact one when
# ``angle = 0`` and ``perplexity > n_samples / 3``
random_state = check_random_state(0)
n_features = 10
X = random_state.randn(30, n_features).astype(np.float32)
X_embeddeds = {}
n_iter = {}
for method in ["exact", "barnes_hut"]:
tsne = TSNE(
n_components=2,
method=method,
learning_rate=1.0,
init="random",
random_state=0,
n_iter=251,
perplexity=29.5,
angle=0,
)
# Kill the early_exaggeration
tsne._EXPLORATION_N_ITER = 0
X_embeddeds[method] = tsne.fit_transform(X)
n_iter[method] = tsne.n_iter_
assert n_iter["exact"] == n_iter["barnes_hut"]
assert_allclose(X_embeddeds["exact"], X_embeddeds["barnes_hut"], rtol=1e-4)
def test_gradient_bh_multithread_match_sequential():
# check that the bh gradient with different num_threads gives the same
# results
n_features = 10
n_samples = 30
n_components = 2
degrees_of_freedom = 1
angle = 3
perplexity = 5
random_state = check_random_state(0)
data = random_state.randn(n_samples, n_features).astype(np.float32)
params = random_state.randn(n_samples, n_components)
n_neighbors = n_samples - 1
distances_csr = (
NearestNeighbors()
.fit(data)
.kneighbors_graph(n_neighbors=n_neighbors, mode="distance")
)
P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0)
kl_sequential, grad_sequential = _kl_divergence_bh(
params,
P_bh,
degrees_of_freedom,
n_samples,
n_components,
angle=angle,
skip_num_points=0,
verbose=0,
num_threads=1,
)
for num_threads in [2, 4]:
kl_multithread, grad_multithread = _kl_divergence_bh(
params,
P_bh,
degrees_of_freedom,
n_samples,
n_components,
angle=angle,
skip_num_points=0,
verbose=0,
num_threads=num_threads,
)
assert_allclose(kl_multithread, kl_sequential, rtol=1e-6)
assert_allclose(grad_multithread, grad_multithread)
def test_tsne_with_different_distance_metrics():
"""Make sure that TSNE works for different distance metrics"""
random_state = check_random_state(0)
n_components_original = 3
n_components_embedding = 2
X = random_state.randn(50, n_components_original).astype(np.float32)
metrics = ["manhattan", "cosine"]
dist_funcs = [manhattan_distances, cosine_distances]
for metric, dist_func in zip(metrics, dist_funcs):
X_transformed_tsne = TSNE(
metric=metric,
n_components=n_components_embedding,
random_state=0,
n_iter=300,
init="random",
learning_rate="auto",
).fit_transform(X)
X_transformed_tsne_precomputed = TSNE(
metric="precomputed",
n_components=n_components_embedding,
random_state=0,
n_iter=300,
init="random",
learning_rate="auto",
).fit_transform(dist_func(X))
assert_array_equal(X_transformed_tsne, X_transformed_tsne_precomputed)
# TODO: Remove in 1.2
@pytest.mark.parametrize("init", [None, "random", "pca"])
def test_tsne_init_futurewarning(init):
"""Make sure that a FutureWarning is only raised when the
init is not specified or is 'pca'."""
random_state = check_random_state(0)
X = random_state.randn(5, 2)
kwargs = dict(learning_rate=200.0, init=init, perplexity=4)
tsne = TSNE(**{k: v for k, v in kwargs.items() if v is not None})
if init is None:
with pytest.warns(FutureWarning, match="The default initialization.*"):
tsne.fit_transform(X)
elif init == "pca":
with pytest.warns(FutureWarning, match="The PCA initialization.*"):
tsne.fit_transform(X)
else:
with warnings.catch_warnings():
warnings.simplefilter("error", FutureWarning)
tsne.fit_transform(X)
# TODO: Remove in 1.2
@pytest.mark.parametrize("learning_rate", [None, 200.0])
def test_tsne_learning_rate_futurewarning(learning_rate):
"""Make sure that a FutureWarning is only raised when the learning rate
is not specified"""
random_state = check_random_state(0)
X = random_state.randn(5, 2)
kwargs = dict(learning_rate=learning_rate, init="random", perplexity=4)
tsne = TSNE(**{k: v for k, v in kwargs.items() if v is not None})
if learning_rate is None:
with pytest.warns(FutureWarning, match="The default learning rate.*"):
tsne.fit_transform(X)
else:
with warnings.catch_warnings():
warnings.simplefilter("error", FutureWarning)
tsne.fit_transform(X)
@pytest.mark.filterwarnings("ignore:The default initialization in TSNE")
def test_tsne_negative_learning_rate():
"""Make sure that negative learning rate results in a ValueError"""
random_state = check_random_state(0)
X = random_state.randn(5, 2)
with pytest.raises(ValueError, match="'learning_rate' must be.*"):
TSNE(learning_rate=-50.0, perplexity=4).fit_transform(X)
@pytest.mark.parametrize("method", ["exact", "barnes_hut"])
def test_tsne_n_jobs(method):
"""Make sure that the n_jobs parameter doesn't impact the output"""
random_state = check_random_state(0)
n_features = 10
X = random_state.randn(30, n_features)
X_tr_ref = TSNE(
n_components=2,
method=method,
perplexity=25.0,
angle=0,
n_jobs=1,
random_state=0,
init="random",
learning_rate="auto",
).fit_transform(X)
X_tr = TSNE(
n_components=2,
method=method,
perplexity=25.0,
angle=0,
n_jobs=2,
random_state=0,
init="random",
learning_rate="auto",
).fit_transform(X)
assert_allclose(X_tr_ref, X_tr)
# TODO: Remove filterwarnings in 1.2
@pytest.mark.filterwarnings("ignore:.*TSNE will change.*:FutureWarning")
def test_tsne_with_mahalanobis_distance():
"""Make sure that method_parameters works with mahalanobis distance."""
random_state = check_random_state(0)
n_samples, n_features = 300, 10
X = random_state.randn(n_samples, n_features)
default_params = {
"perplexity": 40,
"n_iter": 250,
"learning_rate": "auto",
"n_components": 3,
"random_state": 0,
}
tsne = TSNE(metric="mahalanobis", **default_params)
msg = "Must provide either V or VI for Mahalanobis distance"
with pytest.raises(ValueError, match=msg):
tsne.fit_transform(X)
precomputed_X = squareform(pdist(X, metric="mahalanobis"), checks=True)
X_trans_expected = TSNE(metric="precomputed", **default_params).fit_transform(
precomputed_X
)
X_trans = TSNE(
metric="mahalanobis", metric_params={"V": np.cov(X.T)}, **default_params
).fit_transform(X)
assert_allclose(X_trans, X_trans_expected)
@pytest.mark.filterwarnings("ignore:The PCA initialization in TSNE will change")
# FIXME: remove in 1.3 after deprecation of `square_distances`
def test_tsne_deprecation_square_distances():
"""Check that we raise a warning regarding the removal of
`square_distances`.
Also check the parameters do not have any effect.
"""
random_state = check_random_state(0)
X = random_state.randn(30, 10)
tsne = TSNE(
n_components=2,
init="pca",
learning_rate="auto",
perplexity=25.0,
angle=0,
n_jobs=1,
random_state=0,
square_distances=True,
)
warn_msg = (
"The parameter `square_distances` has not effect and will be removed in"
" version 1.3"
)
with pytest.warns(FutureWarning, match=warn_msg):
X_trans_1 = tsne.fit_transform(X)
tsne = TSNE(
n_components=2,
init="pca",
learning_rate="auto",
perplexity=25.0,
angle=0,
n_jobs=1,
random_state=0,
)
X_trans_2 = tsne.fit_transform(X)
assert_allclose(X_trans_1, X_trans_2)
@pytest.mark.parametrize("perplexity", (20, 30))
def test_tsne_perplexity_validation(perplexity):
"""Make sure that perplexity > n_samples results in a ValueError"""
random_state = check_random_state(0)
X = random_state.randn(20, 2)
est = TSNE(
learning_rate="auto",
init="pca",
perplexity=perplexity,
random_state=random_state,
)
msg = "perplexity must be less than n_samples"
with pytest.raises(ValueError, match=msg):
est.fit_transform(X)
| 33.921133
| 88
| 0.655042
|
5a9569fe1ec40dea79d5328bb11c86cc09d539d2
| 2,909
|
py
|
Python
|
tasks/hard-300/shooting-kers/service/generate_html.py
|
C4T-BuT-S4D/nordctf-2019-finals
|
b63d00efe9912de3e165badfa29be1483d731ecf
|
[
"WTFPL"
] | 5
|
2019-10-12T11:04:05.000Z
|
2021-01-21T16:56:29.000Z
|
tasks/hard-300/shooting-kers/service/generate_html.py
|
C4T-BuT-S4D/nordctf-2019-finals
|
b63d00efe9912de3e165badfa29be1483d731ecf
|
[
"WTFPL"
] | null | null | null |
tasks/hard-300/shooting-kers/service/generate_html.py
|
C4T-BuT-S4D/nordctf-2019-finals
|
b63d00efe9912de3e165badfa29be1483d731ecf
|
[
"WTFPL"
] | 1
|
2021-01-21T16:56:33.000Z
|
2021-01-21T16:56:33.000Z
|
#!/usr/bin/env python3
import re
from html import escape
def get_offsets(line):
parts = re.findall(r'\S+\s+', line)
offsets = [0]
for part in parts:
offsets.append(offsets[-1] + len(part))
return offsets
def read_table():
length_header = [input() for i in range(4)][2]
length = int(re.search('\d+', length_header).group(0))
header = input().replace('line #*', 'line #* ')
offsets = get_offsets(header)
header = header.split()
input()
table = [[] for i in range(len(offsets))]
for i, title in enumerate(header):
table[i].append(title)
for i in range(length):
line = input()
for k, (x, y) in enumerate(zip(offsets, offsets[1:] + [len(line)*2])):
table[k].append(line[x:y].strip())
return table
def build_code(table):
code = []
styles = ''' <style type="text/css">
body {
background-color: #fff;
color: #222;
font-family: sans-serif;
font-size: 12pt;
}
table {
margin:0 auto;
text-align: left;
border-collapse: collapse;
border: 0;
width: 934px;
box-shadow: 1px 2px 3px #ccc;
}
td, th {
border: 1px solid #666;
vertical-align: baseline;
padding: 4px 5px;
}
thead tr {
background-color: #99c;
font-weight: bold;
font-size: 14pt;
}
tbody tr {
background-color: #ddd;
max-width: 300px;
overflow-x: auto;
word-wrap: break-word;
}
</style>'''
code.extend([
'<!DOCTYPE html>',
'<html>',
' <head>',
' <title>Shooting Kers</title>',
styles,
' </head>',
' <body>',
' <table>',
' <thead>',
' <tr>'
])
for i in range(len(table)):
code.append(' <td>%s</td>' % table[i][0].upper())
code.extend([
' </tr>',
' </thead>',
' <tbody>',
])
table_iter = zip(*table)
next(table_iter)
for line in table_iter:
code.append(' <tr>')
for element in line:
code.append(' <td><pre>%s</pre></td>' % escape(element))
code.append(' </tr>')
code.extend([
' </tbody>',
' </table>',
' </body>',
'<html>'
])
return '\n'.join(code)
def main():
table = read_table()
html = build_code(table)
print(html)
if __name__ == '__main__':
main()
| 26.445455
| 87
| 0.422482
|
21baf75d7c37f497ff7f11dc41cd95ccb0e3f835
| 5,570
|
py
|
Python
|
networkx/generators/nonisomorphic_trees.py
|
jdrudolph/networkx
|
d49733b61fb7dedfbbbdd9c04464adf6beb5db07
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/generators/nonisomorphic_trees.py
|
jdrudolph/networkx
|
d49733b61fb7dedfbbbdd9c04464adf6beb5db07
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/generators/nonisomorphic_trees.py
|
jdrudolph/networkx
|
d49733b61fb7dedfbbbdd9c04464adf6beb5db07
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Implementation of the Wright, Richmond, Odlyzko and McKay (WROM)
algorithm for the enumeration of all non-isomorphic free trees of a
given order. Rooted trees are represented by level sequences, i.e.,
lists in which the i-th element specifies the distance of vertex i to
the root.
"""
# Copyright (C) 2013 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__author__ = "\n".join(["Aric Hagberg (hagberg@lanl.gov)",
"Mridul Seth (seth.mridul@gmail.com)"])
__all__ = ['nonisomorphic_trees',
'number_of_nonisomorphic_trees']
import networkx as nx
def nonisomorphic_trees(order, create="graph"):
"""Returns a list of nonisomporphic trees
Parameters
----------
order : int
order of the desired tree(s)
create : graph or matrix (default="Graph)
If graph is selected a list of trees will be returned,
if matrix is selected a list of adjancency matrix will
be returned
Returns
-------
G : List of NetworkX Graphs
M : List of Adjacency matrices
Reference
---------
"""
if order < 2:
raise ValueError
# start at the path graph rooted at its center
layout = list(range(order // 2 + 1)) + list(range(1, (order + 1) // 2))
while layout is not None:
layout = _next_tree(layout)
if layout is not None:
if create == "graph":
yield _layout_to_graph(layout)
elif create == "matrix":
yield _layout_to_matrix(layout)
layout = _next_rooted_tree(layout)
def number_of_nonisomorphic_trees(order):
"""Returns the number of nonisomorphic trees
Parameters
----------
order : int
order of the desired tree(s)
Returns
-------
length : Number of nonisomorphic graphs for the given order
Reference
---------
"""
length = sum(1 for _ in nonisomorphic_trees(order))
return length
def _next_rooted_tree(predecessor, p=None):
"""One iteration of the Beyer-Hedetniemi algorithm."""
if p is None:
p = len(predecessor) - 1
while predecessor[p] == 1:
p -= 1
if p == 0:
return None
q = p - 1
while predecessor[q] != predecessor[p] - 1:
q -= 1
result = list(predecessor)
for i in range(p, len(result)):
result[i] = result[i - p + q]
return result
def _next_tree(candidate):
"""One iteration of the Wright, Richmond, Odlyzko and McKay
algorithm."""
# valid representation of a free tree if:
# there are at least two vertices at layer 1
# (this is always the case because we start at the path graph)
left, rest = _split_tree(candidate)
# and the left subtree of the root
# is less high than the tree with the left subtree removed
left_height = max(left)
rest_height = max(rest)
valid = rest_height >= left_height
if valid and rest_height == left_height:
# and, if left and rest are of the same height,
# if left does not encompass more vertices
if len(left) > len(rest):
valid = False
# and, if they have the same number or vertices,
# if left does not come after rest lexicographically
elif len(left) == len(rest) and left > rest:
valid = False
if valid:
return candidate
else:
# jump to the next valid free tree
p = len(left)
new_candidate = _next_rooted_tree(candidate, p)
if candidate[p] > 2:
new_left, new_rest = _split_tree(new_candidate)
new_left_height = max(new_left)
suffix = range(1, new_left_height + 2)
new_candidate[-len(suffix):] = suffix
return new_candidate
def _split_tree(layout):
"""Return a tuple of two layouts, one containing the left
subtree of the root vertex, and one containing the original tree
with the left subtree removed."""
one_found = False
m = None
for i in range(len(layout)):
if layout[i] == 1:
if one_found:
m = i
break
else:
one_found = True
if m is None:
m = len(layout)
left = [layout[i] - 1 for i in range(1, m)]
rest = [0] + [layout[i] for i in range(m, len(layout))]
return (left, rest)
def _layout_to_matrix(layout):
"""Create the adjacency matrix for the tree specified by the
given layout (level sequence)."""
result = [[0] * len(layout) for i in range(len(layout))]
stack = []
for i in range(len(layout)):
i_level = layout[i]
if stack:
j = stack[-1]
j_level = layout[j]
while j_level >= i_level:
stack.pop()
j = stack[-1]
j_level = layout[j]
result[i][j] = result[j][i] = 1
stack.append(i)
return result
def _layout_to_graph(layout):
"""Create a NetworkX Graph for the tree specified by the
given layout(level sequence)"""
result = [[0] * len(layout) for i in range(len(layout))]
G = nx.Graph()
stack = []
for i in range(len(layout)):
i_level = layout[i]
if stack:
j = stack[-1]
j_level = layout[j]
while j_level >= i_level:
stack.pop()
j = stack[-1]
j_level = layout[j]
G.add_edge(i, j)
stack.append(i)
return G
| 27.574257
| 75
| 0.585996
|
153486489979c55ad088f513f5b7d27d4f0ab901
| 501
|
py
|
Python
|
Sound Analysis WeWalk/Comparer.py
|
3arii/UpSpectrum
|
bccab66b6f5d36038a34c1a893a65a7eafae15c3
|
[
"MIT"
] | null | null | null |
Sound Analysis WeWalk/Comparer.py
|
3arii/UpSpectrum
|
bccab66b6f5d36038a34c1a893a65a7eafae15c3
|
[
"MIT"
] | null | null | null |
Sound Analysis WeWalk/Comparer.py
|
3arii/UpSpectrum
|
bccab66b6f5d36038a34c1a893a65a7eafae15c3
|
[
"MIT"
] | null | null | null |
from PitchDetector import pitch_detector
from PyaudioRecorder import record_audio
src = "C:/Users/deniz/OneDrive/Documents/Deniz the Laps/Wewalk/Sound Analysis WeWalk/output.wav"
src2 = "C:/Users/deniz/OneDrive/Documents/Deniz the Laps/Wewalk/Sound Analysis WeWalk/output2.wav"
record_audio(src, 4)
record_audio(src2, 4)
pitch = pitch_detector(src)
pitch2 = pitch_detector(src2)
if pitch > pitch2:
print("First sound is louder.")
else:
print("Second sound is louder.")
| 27.833333
| 99
| 0.744511
|
cd1d757725c82f8b31048bec3a1c4f5efd7a586e
| 1,663
|
py
|
Python
|
tasksplataform/googleapi.py
|
exodojaffar/EADGetTasks
|
809e2577b6268c4e4f569810c46daf6db9250ff3
|
[
"MIT"
] | 4
|
2021-02-08T00:29:19.000Z
|
2021-05-18T03:48:46.000Z
|
tasksplataform/googleapi.py
|
exodojaffar/EADGetTasks
|
809e2577b6268c4e4f569810c46daf6db9250ff3
|
[
"MIT"
] | 1
|
2020-10-14T15:17:37.000Z
|
2020-10-14T15:17:37.000Z
|
tasksplataform/googleapi.py
|
exodojaffar/EADGetTasks
|
809e2577b6268c4e4f569810c46daf6db9250ff3
|
[
"MIT"
] | null | null | null |
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from google.auth.transport.requests import Request
import pickle
import os.path
class GoogleApi:
def __init__(self, scopes, service, version, credentials_file="credentials.json"):
self.google_service = None
self.__scopes = [scopes]
self.__service = service
self.__version = version
self.__credentials_file = credentials_file
self.__auth()
self.__create__google_service()
pass
def __auth(self):
if os.path.exists('token.pickle'):
self.__load_token_file()
self.__validate_auth()
else:
self.__get_auth()
# Save the credentials for the next run
self.__save_token_file()
pass
def __set_creds(self, creds):
self.__creds = creds
pass
def __save_token_file(self):
with open('token.pickle', 'wb') as token:
pickle.dump(self.__creds, token)
pass
def __load_token_file(self):
with open('token.pickle', 'rb') as token:
self.__set_creds(pickle.load(token))
pass
def __validate_auth(self):
if not self.__creds or not self.__creds.valid:
if self.__creds and self.__creds.expired and self.__creds.refresh_token:
self.__creds.refresh(Request())
pass
def __get_auth(self):
flow = InstalledAppFlow.from_client_secrets_file(self.__credentials_file, self.__scopes)
self.__set_creds(flow.run_local_server(port=0))
def __create__google_service(self):
self.google_service = build(self.__service, self.__version, credentials=self.__creds)
pass
def test_with_tasks():
api = GoogleApi("https://www.googleapis.com/auth/tasks", 'tasks', 'v1')
pass
if __name__ == '__main__':
test_with_tasks()
| 24.455882
| 90
| 0.754059
|
c2fd1375b182e01a0074b92e04875d1c2a672c09
| 12,314
|
py
|
Python
|
arelle/plugin/validate/CIPC/__init__.py
|
hamscher/Arelle
|
64c1beddcc7163e571011faf07a03d8ffe18bb78
|
[
"Apache-2.0"
] | 292
|
2015-01-27T03:31:51.000Z
|
2022-03-26T07:00:05.000Z
|
arelle/plugin/validate/CIPC/__init__.py
|
hamscher/Arelle
|
64c1beddcc7163e571011faf07a03d8ffe18bb78
|
[
"Apache-2.0"
] | 94
|
2015-04-18T23:03:00.000Z
|
2022-03-28T17:24:55.000Z
|
arelle/plugin/validate/CIPC/__init__.py
|
hamscher/Arelle
|
64c1beddcc7163e571011faf07a03d8ffe18bb78
|
[
"Apache-2.0"
] | 200
|
2015-01-13T03:55:47.000Z
|
2022-03-29T12:38:56.000Z
|
'''
Created on Dec 21, 2017
Filer Guidelines: http://www.cipc.co.za/files/8615/1333/0514/25082017_Guidelines_for_Filing__AFSs_in_XBRL_by_Client_Companies_Technical_Aspects_v1-7_HVMZ.pdf
Taxonomy Architecture: http://www.cipc.co.za/files/1715/1325/5802/CIPC_XBRL_Taxonomy_Framework_Architecture_-_2017-12-15.pdf
Taxonomy package expected to be installed: http://xbrl.cipc.co.za/cipc_2017-12-15.zip
@author: Mark V Systems Limited
(c) Copyright 2017 Mark V Systems Limited, All rights reserved.
'''
import os, re
from lxml.etree import _ElementTree, _Comment, _ProcessingInstruction
from arelle import ModelDocument, XbrlConst
from arelle.ModelDtsObject import ModelResource
from arelle.ModelInstanceObject import ModelFact, ModelInlineFact, ModelInlineFootnote
from arelle.ModelObject import ModelObject
from arelle.ModelValue import qname
from arelle.XbrlConst import ixbrlAll, xhtml
from .Const import cpicModules # , mandatoryElements
cipcBlockedInlineHtmlElements = {
'object', 'script'}
namePattern = re.compile(r"^(.*) - ((18|19|20)\d{2}-[0-9]+-(06|07|08|09|10|12|20|21|22|23|24|25|26|30|31)) - (20[1-9]\d)$")
reportingModulePattern = re.compile(r"http://xbrl.cipc.co.za/taxonomy/.*/\w*(ca_fas|full_ifrs|ifrs_for_smes)\w*[_-]20[12][0-9]-[0-9]{2}-[0-9]{2}.xsd")
def dislosureSystemTypes(disclosureSystem, *args, **kwargs):
# return ((disclosure system name, variable name), ...)
return (("CIPC", "CIPCplugin"),)
def disclosureSystemConfigURL(disclosureSystem, *args, **kwargs):
return os.path.join(os.path.dirname(__file__), "config.xml")
def validateXbrlStart(val, parameters=None, *args, **kwargs):
val.validateCIPCplugin = val.validateDisclosureSystem and getattr(val.disclosureSystem, "CIPCplugin", False)
if not (val.validateCIPCplugin):
return
def validateXbrlFinally(val, *args, **kwargs):
if not (val.validateCIPCplugin):
return
_xhtmlNs = "{{{}}}".format(xhtml)
_xhtmlNsLen = len(_xhtmlNs)
modelXbrl = val.modelXbrl
modelDocument = modelXbrl.modelDocument
if not modelDocument:
return # never loaded properly
_statusMsg = _("validating {0} filing rules").format(val.disclosureSystem.name)
modelXbrl.profileActivity()
modelXbrl.modelManager.showStatus(_statusMsg)
if modelDocument.type == ModelDocument.Type.INSTANCE:
modelXbrl.error("cipc:instanceMustBeInlineXBRL",
_("CIPC expects inline XBRL instances."),
modelObject=modelXbrl)
if modelDocument.type in (ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INSTANCE):
footnotesRelationshipSet = modelXbrl.relationshipSet("XBRL-footnotes")
orphanedFootnotes = set()
nonEnglishFootnotes = set()
foonoteRoleErrors = set()
transformRegistryErrors = set()
def checkFootnote(elt, text):
if text: # non-empty footnote must be linked to a fact if not empty
if not any(isinstance(rel.fromModelObject, ModelFact)
for rel in footnotesRelationshipSet.toModelObject(elt)):
orphanedFootnotes.add(elt)
if not elt.xmlLang.startswith("en"):
nonEnglishFootnotes.add(elt)
if elt.role != XbrlConst.footnote or not all(
rel.arcrole == XbrlConst.factFootnote and rel.linkrole == XbrlConst.defaultLinkRole
for rel in footnotesRelationshipSet.toModelObject(elt)):
footnoteRoleErrors.add(elt)
if modelDocument.type == ModelDocument.Type.INLINEXBRL:
_baseName, _baseExt = os.path.splitext(modelDocument.basename)
if _baseExt not in (".xhtml",) or not namePattern.match(_baseName):
modelXbrl.warning("cipc:fileNameMalformed",
_("FileName should have the pattern \"Co. name - regYr-regNbr-coCode - finYr.xhtml\": %(fileName)s"),
modelObject=modelXbrl, fileName=modelDocument.basename)
rootElt = modelDocument.xmlRootElement
if rootElt.tag not in ("{http://www.w3.org/1999/xhtml}html", "{http://www.w3.org/1999/xhtml}xhtml"):
modelXbrl.error("cipc:htmlRootElement",
_("InlineXBRL root element <%(element)s> MUST be html and have the xhtml namespace."),
modelObject=rootElt, element=rootElt.tag)
for elt in rootElt.iter():
eltTag = elt.tag
if isinstance(elt, ModelObject) and elt.namespaceURI == xhtml:
eltTag = elt.localName
elif isinstance(elt, (_ElementTree, _Comment, _ProcessingInstruction)):
continue # comment or other non-parsed element
else:
eltTag = elt.tag
if eltTag.startswith(_xhtmlNs):
eltTag = eltTag[_xhtmlNsLen:]
if eltTag in cipcBlockedInlineHtmlElements:
modelXbrl.error("cipc:disallowedHtmlElement",
_("Html element is disallowed: %(element)s"),
modelObject=elt, element=eltTag)
if eltTag == "title" and not namePattern.match(elt.text):
modelXbrl.error("cipc:titleElementMalformed",
_("Title element required to have the pattern \"Co. name - regYr-regNbr-coCode - finYr\": %(title)s"),
modelObject=elt, title=elt.text)
for attrTag, attrValue in elt.items():
if ((attrTag == "href" and eltTag == "a") or
(attrTag == "src" and eltTag == "img")):
if "javascript:" in attrValue:
modelXbrl.error("cipc:disallowedScript",
_("Element %(element)s has javascript in '%(attribute)s'"),
modelObject=elt, attribute=attrTag, element=eltTag)
if isinstance(elt, ModelInlineFootnote):
checkFootnote(elt, elt.value)
elif isinstance(elt, ModelResource) and elt.qname == XbrlConst.qnLinkFootnote:
checkFootnote(elt, elt.value)
elif isinstance(elt, ModelInlineFact):
if elt.format is not None and elt.format.namespaceURI != 'http://www.xbrl.org/inlineXBRL/transformation/2015-02-26':
transformRegistryErrors.add(elt)
elif modelDocument.type == ModelDocument.Type.INSTANCE:
for elt in modelDocument.xmlRootElement.iter():
if elt.qname == XbrlConst.qnLinkFootnote: # for now assume no private elements extend link:footnote
checkFootnote(elt, elt.stringValue)
# identify type of filer (FAS, Full IFES, IFRS for SMES)
reportingModules = [reportingModulePattern.match(referencedDoc.uri).group(1)
for referencedDoc in modelDocument.referencesDocument.keys()
if referencedDoc.type == ModelDocument.Type.SCHEMA
if reportingModulePattern.match(referencedDoc.uri)]
if len(reportingModules) != 1 or reportingModules[0] not in cpicModules:
modelXbrl.error("cipc:reportingModuleAmbiguous",
_("Reporting module must specify namespace for FAS, IFRS-FULL or IFRS-SMES"),
modelObject=elt)
reportingModule = None
else:
reportingModule = cpicModules[reportingModules[0]]
# build namespace maps
nsMap = {}
for ns in modelXbrl.namespaceDocs.keys():
if ns.endswith("/ca"):
nsMap["cipc-ca"] = ns
elif ns.endswith("/ca/enum"):
nsMap["cipc-ca-enum"] = ns
elif ns.endswith("/ifrs-full"):
nsMap["ifrs-full"] = ns
elif ns.endswith("/ifrs-smes"):
nsMap["ifrs-smes"] = ns
''' checked by CIPC formula
# build mandatory and footnoteIfNil tables by ns qname in use
mandatory = set()
for prefixedName in mandatoryElements[reportingModule]["mandatory"]:
prefix, _sep, name = prefixedName.rpartition(":")
mandatory.add(qname(nsMap.get(prefix),name))
footnoteIfNil = set()
for prefixedName in mandatoryElements[reportingModule]["footnoteIfNil"]:
prefix, _sep, name = prefixedName.rpartition(":")
footnoteIfNil.add(qname(nsMap.get(prefix),name))
reportedMandatory = set()
reportedFootnoteIfNil = set()
factsMandatoryNilWithoutFootnote = set()
footnotesRelationshipSet = modelXbrl.relationshipSet(XbrlConst.factFootnote, XbrlConst.defaultLinkRole)
for qn, facts in modelXbrl.factsByQname.items():
if qn in mandatory:
reportedMandatory.add(qn)
elif qn in footnoteIfNil:
for fact in facts:
reportedFootnoteIfNil.add(qn)
if fact.isNil and not any(footnote.role == XbrlConst.footnote and
footnote.xmlLang.startswith("en") and
footnote.stringValue.strip()
for footnoteRel in footnotesRelationshipSet.fromModelObject(fact)
for footnote in (footnoteRel.toModelObject,)):
factsMandatoryNilWithoutFootnote.add(fact)
missingElements = (mandatory - reportedMandatory) # | (reportedFootnoteIfNil - reportedFootnoteIfNil)
if missingElements:
modelXbrl.error("cpic:missingRequiredElements",
_("Required elements missing from document: %(elements)s."),
modelObject=modelXbrl, elements=", ".join(sorted(str(qn) for qn in missingElements)))
if factsMandatoryNilWithoutFootnote:
modelXbrl.error("cpic:missingExplanatoryFootnote",
_("Required nil facts missing explanatory footnote: %(elements)s."),
modelObject=factsMandatoryNilWithoutFootnote,
elements=", ".join(sorted(str(fact.qname) for fact in factsMandatoryNilWithoutFootnote)))
'''
if transformRegistryErrors:
modelXbrl.warning("cpic:transformRegistry",
_("Transformation Registry 3 should be for facts: %(elements)s."),
modelObject=transformRegistryErrors,
elements=", ".join(sorted(str(fact.qname) for fact in transformRegistryErrors)))
if orphanedFootnotes:
modelXbrl.error("cipc:orphanedFootnote",
_("Non-empty footnotes must be connected to fact(s)."),
modelObject=orphanedFootnotes)
if nonEnglishFootnotes:
modelXbrl.error("cipc:nonEnglishFootnote",
_("Footnotes must use English language."),
modelObject=nonEnglishFootnotes)
if foonoteRoleErrors:
modelXbrl.error("cipc:footnoteRoleErrors",
_("Footnotes must the default link, resource and arc roles."),
modelObject=foonoteRoleErrors)
modelXbrl.profileActivity(_statusMsg, minTimeToShow=0.0)
modelXbrl.modelManager.showStatus(None)
__pluginInfo__ = {
# Do not use _( ) in pluginInfo itself (it is applied later, after loading
'name': 'Validate CIPC',
'version': '1.0',
'description': '''CIPC (South Africa) Validation.''',
'license': 'Apache-2',
'author': 'Mark V Systems',
'copyright': '(c) Copyright 2017 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'DisclosureSystem.Types': dislosureSystemTypes,
'DisclosureSystem.ConfigURL': disclosureSystemConfigURL,
'Validate.XBRL.Start': validateXbrlStart,
'Validate.XBRL.Finally': validateXbrlFinally,
}
| 52.177966
| 157
| 0.606464
|
4011cdcb6bd3089a0dd7b9fbad8d461d361ec726
| 7,961
|
py
|
Python
|
nodes/ImageServer.py
|
anqixu/sightedturtlesim
|
35712504679a477d0021a8cec3d22cb087ebc545
|
[
"BSD-2-Clause"
] | 1
|
2017-06-16T22:31:24.000Z
|
2017-06-16T22:31:24.000Z
|
nodes/ImageServer.py
|
anqixu/sightedturtlesim
|
35712504679a477d0021a8cec3d22cb087ebc545
|
[
"BSD-2-Clause"
] | null | null | null |
nodes/ImageServer.py
|
anqixu/sightedturtlesim
|
35712504679a477d0021a8cec3d22cb087ebc545
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Python port of AbstractImageServer and SingleImageServer in sightedturtlesim
import numpy as np
import cv2
class SingleImageServer:
# If the desired camera dimensions (with respect to the canvas dimensions)
# exceeds DOWNSIZE_SCALE_RATIO times the dimensions of the desired image,
# then the bounding box should be downsized prior to rotation, to ensure
# that the downsized camera dimensions will be exactly DOWNSIZE_SCALE_RATIO
# times the size of the desired image
DOWNSIZE_SCALE_RATIO = 1.5
def __init__(self, imageFilename, ppm=1.0):
self._pixelsPerMeter = ppm
self._imageFilename = imageFilename
self._canvas = cv2.imread(imageFilename, flags=1) # flags=1 means return 3-channel color image
self._height, self._width = self._canvas.shape[:2]
"""
- headingDeg: 0=+x, 90=+y
- output_shape: rows, columns
"""
def getImage(self, eastingM, northingM, altitudeM, headingDeg, hfovDeg, aspectRatio, output_shape):
# Convert inputs into pixel space
xPx, yPx, zPx = eastingM*self._pixelsPerMeter, self._height-northingM*self._pixelsPerMeter, altitudeM*self._pixelsPerMeter
thetaRad = headingDeg/180.0*np.pi
camW = zPx*2. * np.tan(hfovDeg / 2.0 / 180.0 * np.pi)
camH = camW / aspectRatio
outputH, outputW = output_shape
# Compute the bounding box width and height of the (rotated) camera frame
# - order of corners: top-left (-x & -y), bottom-left, bottom-right, top-right
camTransform = np.array(
[[np.cos(thetaRad), np.sin(thetaRad)],
[-np.sin(thetaRad), np.cos(thetaRad)]])
camCorners = np.array(
[[-camW/2, -camW/2, +camW/2, +camW/2],
[-camH/2, +camH/2, +camH/2, -camH/2]])
camTransformedCorners = np.dot(camTransform, camCorners)
camTXMax, camTXMin = np.max(camTransformedCorners[0,:])+xPx, np.min(camTransformedCorners[0,:])+xPx
camTYMax, camTYMin = np.max(camTransformedCorners[1,:])+yPx, np.min(camTransformedCorners[1,:])+yPx
# Decide to slightly over-sample the bounding box if rotation angle is not exact
headingMod90 = (headingDeg % 90.)
if headingMod90 > 45.: headingMod90 -= 90.
if abs(headingMod90) > 5.0: # If headingDeg is not within +/- 5' away from 0', 90', 180', or 270'
camTXMax += 1
camTXMin -= 1
camTYMax += 1
camTYMin -= 1
# Extract the sub-window corresponding to the bounding box
camTXMax, camTXMin = int(round(camTXMax)), int(round(camTXMin))
camTYMax, camTYMin = int(round(camTYMax)), int(round(camTYMin))
if camTXMin >= 0 and camTXMax < self._width and camTYMin >= 0 and camTYMax < self._height:
bbImage = self._canvas[camTYMin:camTYMax+1, camTXMin:camTXMax+1, :]
else:
bbImage = np.zeros(shape=(camTYMax-camTYMin+1, camTXMax-camTXMin+1, self._canvas.shape[2]), dtype=self._canvas.dtype)
currCamTY = camTYMin
bbY = 0
while currCamTY <= camTYMax:
currCamTYMod = currCamTY % self._height
patchHeight = min(camTYMax-currCamTY+1, self._height-currCamTYMod)
currCamTX = camTXMin
bbX = 0
while currCamTX <= camTXMax:
currCamTXMod = currCamTX % self._width
patchWidth = min(camTXMax-currCamTX+1, self._width - currCamTXMod)
bbPatch = bbImage[bbY:bbY+patchHeight, bbX:bbX+patchWidth]
np.copyto(bbPatch, self._canvas[currCamTYMod:currCamTYMod+patchHeight, currCamTXMod:currCamTXMod+patchWidth])
currCamTX += patchWidth
bbX += patchWidth
currCamTY += patchHeight
bbY += patchHeight
# Decide to downsize image if necessary
if camW > SingleImageServer.DOWNSIZE_SCALE_RATIO*outputW and camH > SingleImageServer.DOWNSIZE_SCALE_RATIO*outputH:
downsizeFactor = max(SingleImageServer.DOWNSIZE_SCALE_RATIO*outputW/camW,
SingleImageServer.DOWNSIZE_SCALE_RATIO*outputH/camH)
bbImage = cv2.resize(bbImage, dsize=None, fx=downsizeFactor, fy=downsizeFactor, interpolation=cv2.INTER_AREA)
camW *= downsizeFactor
camH *= downsizeFactor
# Compute the width and height of the rotated bounding box
# and adjust the centers of the transformation matrix
bbImage_rows, bbImage_cols = bbImage.shape[:2]
bbTransform = cv2.getRotationMatrix2D((bbImage_cols/2., bbImage_rows/2.), -headingDeg, 1.0)
bbCorners = np.array(
[[0, 0, bbImage_cols, bbImage_cols],
[0, bbImage_rows, bbImage_rows, 0 ],
[1, 1, 1, 1 ]])
bbTransformedCorners = np.dot(bbTransform, bbCorners)
bbTWidth = int(round(np.max(bbTransformedCorners[0,:]) - np.min(bbTransformedCorners[0,:])))
bbTHeight = int(round(np.max(bbTransformedCorners[1,:]) - np.min(bbTransformedCorners[1,:])))
bbTransform[0,2] += bbTWidth/2.0 - bbImage_cols/2.0
bbTransform[1,2] += bbTHeight/2.0 - bbImage_rows/2.0
bbRotatedImage = cv2.warpAffine(bbImage, bbTransform, (bbTWidth, bbTHeight), flags=cv2.INTER_NEAREST)
bbRTopLeftX = max(int(bbRotatedImage.shape[1]/2. - camW/2.), 0)
bbRTopLeftY = max(int(bbRotatedImage.shape[0]/2. - camH/2.), 0)
bbRBottomRightX = bbRTopLeftX + max(int(camW), 1)
if bbRBottomRightX > bbRotatedImage.shape[1]:
bbRotatedImage = bbRotatedImage.shape[1]
bbRBottomRightY = bbRTopLeftY + max(int(camH), 1)
if bbRBottomRightY > bbRotatedImage.shape[0]:
bbRBottomRightY = bbRotatedImage.shape[0]
camImage = bbRotatedImage[bbRTopLeftY:bbRBottomRightY, bbRTopLeftX:bbRBottomRightX]
if camImage.shape[:2] != output_shape:
buffer = cv2.resize(camImage, dsize=(outputW, outputH), interpolation=cv2.INTER_LINEAR)
else:
buffer = camImage.copy() # In rare case camImage is a slice from underlying map
# Visualize results
if False:
from matplotlib import pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
XYs = camTransformedCorners.transpose()
XYs[:,0] += xPx
XYs[:,1] += yPx
path = Path(XYs, [Path.MOVETO]+[Path.LINETO]*3)
patch = patches.PathPatch(path, lw=2)
plt.figure()
ax = plt.subplot(221)
ax.add_patch(patch)
plt.xlabel('Eastings (m)')
plt.ylabel('Northings (m)')
plt.axis('equal')
plt.axis([0, self._width, 0, self._height])
ax.invert_yaxis()
plt.title('patch')
plt.subplot(222)
plt.imshow(bbImage)
plt.title('bbImage')
plt.subplot(223)
plt.imshow(bbRotatedImage)
plt.title('bbRotatedImage')
plt.subplot(224)
plt.imshow(camImage)
plt.title('camImage')
plt.show()
plt.figure()
plt.imshow(camImage)
plt.title('camImage')
plt.show()
return buffer
"""
Returns cornersXY = [topLeftX, topLeftY, topRightX, topRightY,
bottomRightX, bottomRightY, bottomLeftX, bottomLeftY]
"""
@staticmethod
def toCornersXY(eastingM, northingM, altitudeM, headingDeg, hfovDeg, aspectRatio):
# Convert inputs into pixel space
xPx, yPx, zPx = eastingM*self._pixelsPerMeter, self._height-northingM*self._pixelsPerMeter, altitudeM*self._pixelsPerMeter
thetaRad = headingDeg/180.0*np.pi
camW = zPx*2. * np.tan(hfovDeg / 2.0 / 180.0 * np.pi)
camH = camW / aspectRatio
# Compute the bounding box width and height of the (rotated) camera frame
# - order of corners: top-left (-x & -y), bottom-left, bottom-right, top-right
camTransform = np.array(
[[np.cos(thetaRad), np.sin(thetaRad)],
[-np.sin(thetaRad), np.cos(thetaRad)]])
camCorners = np.array(
[[-camW/2, -camW/2, +camW/2, +camW/2],
[-camH/2, +camH/2, +camH/2, -camH/2]])
camTransformedCorners = np.dot(camTransform, camCorners)
camTransformedCorners[0,:] += xPx
camTransformedCorners[1,:] += yPx
cornersXY = np.ndarray.flatten(camTransformedCorners.transpose())
return cornersXY
| 41.680628
| 126
| 0.671649
|
1dc6d4978fcfae9b6671a61ee5ad33190687d731
| 3,211
|
py
|
Python
|
bin/sa_haveibeenpwned/aob_py3/future/types/newdict.py
|
hRun/SA-haveibeenpwned
|
2a8ae3dedc405dc3c8dac1cb6a705a70f574afdb
|
[
"Apache-2.0"
] | 2
|
2020-08-17T07:52:48.000Z
|
2020-12-18T16:39:32.000Z
|
bin/sa_haveibeenpwned/aob_py3/future/types/newdict.py
|
hRun/SA-haveibeenpwned
|
2a8ae3dedc405dc3c8dac1cb6a705a70f574afdb
|
[
"Apache-2.0"
] | 5
|
2020-12-15T23:40:14.000Z
|
2022-02-23T15:43:18.000Z
|
bin/sa_haveibeenpwned/aob_py2/future/types/newdict.py
|
hRun/SA-haveibeenpwned
|
2a8ae3dedc405dc3c8dac1cb6a705a70f574afdb
|
[
"Apache-2.0"
] | 4
|
2019-05-16T09:57:33.000Z
|
2021-07-14T12:31:21.000Z
|
"""
A dict subclass for Python 2 that behaves like Python 3's dict
Example use:
>>> from builtins import dict
>>> d1 = dict() # instead of {} for an empty dict
>>> d2 = dict(key1='value1', key2='value2')
The keys, values and items methods now return iterators on Python 2.x
(with set-like behaviour on Python 2.7).
>>> for d in (d1, d2):
... assert not isinstance(d.keys(), list)
... assert not isinstance(d.values(), list)
... assert not isinstance(d.items(), list)
"""
import sys
from future.utils import with_metaclass
from future.types.newobject import newobject
_builtin_dict = dict
ver = sys.version_info[:2]
class BaseNewDict(type):
def __instancecheck__(cls, instance):
if cls == newdict:
return isinstance(instance, _builtin_dict)
else:
return issubclass(instance.__class__, cls)
class newdict(with_metaclass(BaseNewDict, _builtin_dict)):
"""
A backport of the Python 3 dict object to Py2
"""
def items(self):
"""
On Python 2.7+:
D.items() -> a set-like object providing a view on D's items
On Python 2.6:
D.items() -> an iterator over D's items
"""
if ver == (2, 7):
return self.viewitems()
elif ver == (2, 6):
return self.iteritems()
elif ver >= (3, 0):
return self.items()
def keys(self):
"""
On Python 2.7+:
D.keys() -> a set-like object providing a view on D's keys
On Python 2.6:
D.keys() -> an iterator over D's keys
"""
if ver == (2, 7):
return self.viewkeys()
elif ver == (2, 6):
return self.iterkeys()
elif ver >= (3, 0):
return self.keys()
def values(self):
"""
On Python 2.7+:
D.values() -> a set-like object providing a view on D's values
On Python 2.6:
D.values() -> an iterator over D's values
"""
if ver == (2, 7):
return self.viewvalues()
elif ver == (2, 6):
return self.itervalues()
elif ver >= (3, 0):
return self.values()
def __new__(cls, *args, **kwargs):
"""
dict() -> new empty dictionary
dict(mapping) -> new dictionary initialized from a mapping object's
(key, value) pairs
dict(iterable) -> new dictionary initialized as if via:
d = {}
for k, v in iterable:
d[k] = v
dict(**kwargs) -> new dictionary initialized with the name=value pairs
in the keyword argument list. For example: dict(one=1, two=2)
"""
if len(args) == 0:
return super(newdict, cls).__new__(cls)
elif type(args[0]) == newdict:
value = args[0]
else:
value = args[0]
return super(newdict, cls).__new__(cls, value)
def __native__(self):
"""
Hook for the future.utils.native() function
"""
return dict(self)
__all__ = ['newdict']
| 28.669643
| 79
| 0.523513
|
46f4176d0e8e88a06a719e1b187269a97adba18b
| 12,171
|
py
|
Python
|
tests/test_integration.py
|
najamansari/django-elasticsearch-dsl
|
7b6dab35313d0512ed86497ba407752ae82294c0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_integration.py
|
najamansari/django-elasticsearch-dsl
|
7b6dab35313d0512ed86497ba407752ae82294c0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_integration.py
|
najamansari/django-elasticsearch-dsl
|
7b6dab35313d0512ed86497ba407752ae82294c0
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
import os
import unittest
from django.core.management import call_command
from django.test import TestCase
from django.utils.six import StringIO
from django.utils.translation import ugettext_lazy as _
from django_elasticsearch_dsl.test import ESTestCase
from .documents import (
ad_index,
AdDocument,
car_index,
CarDocument,
PaginatedAdDocument
)
from .models import Car, Manufacturer, Ad, Category, COUNTRIES
@unittest.skipUnless(
os.environ.get('ELASTICSEARCH_URL', False),
"--elasticsearch not set"
)
class IntegrationTestCase(ESTestCase, TestCase):
def setUp(self):
super(IntegrationTestCase, self).setUp()
self.manufacturer = Manufacturer(
name="Peugeot", created=datetime(1900, 10, 9, 0, 0),
country_code="FR", logo='logo.jpg'
)
self.manufacturer.save()
self.car1 = Car(
name="508", launched=datetime(2010, 9, 9, 0, 0),
manufacturer=self.manufacturer
)
self.car1.save()
self.car2 = Car(
name="208", launched=datetime(2010, 10, 9, 0, 0),
manufacturer=self.manufacturer
)
self.car2.save()
self.category1 = Category(
title="Category 1", slug="category-1", icon="icon.jpeg"
)
self.category1.save()
self.car2.categories.add(self.category1)
self.car2.save()
self.car3 = Car(name="308", launched=datetime(2010, 11, 9, 0, 0))
self.car3.save()
self.category2 = Category(title="Category 2", slug="category-2")
self.category2.save()
self.car3.categories.add(self.category1, self.category2)
self.car3.save()
self.ad1 = Ad(
title=_("Ad number 1"), url="www.ad1.com",
description="My super ad description 1",
car=self.car1
)
self.ad1.save()
self.ad2 = Ad(
title="Ad number 2", url="www.ad2.com",
description="My super ad descriptio 2",
car=self.car1
)
self.ad2.save()
self.car1.save()
def test_get_doc_with_relationships(self):
s = CarDocument.search().query("match", name=self.car2.name)
result = s.execute()
self.assertEqual(len(result), 1)
car2_doc = result[0]
self.assertEqual(car2_doc.ads, [])
self.assertEqual(car2_doc.name, self.car2.name)
self.assertEqual(int(car2_doc.meta.id), self.car2.pk)
self.assertEqual(car2_doc.launched, self.car2.launched)
self.assertEqual(car2_doc.manufacturer.name,
self.car2.manufacturer.name)
self.assertEqual(car2_doc.manufacturer.country,
COUNTRIES[self.manufacturer.country_code])
s = CarDocument.search().query("match", name=self.car3.name)
result = s.execute()
car3_doc = result[0]
self.assertEqual(car3_doc.manufacturer, {})
self.assertEqual(car3_doc.name, self.car3.name)
self.assertEqual(int(car3_doc.meta.id), self.car3.pk)
def test_get_doc_with_reverse_relationships(self):
s = CarDocument.search().query("match", name=self.car1.name)
result = s.execute()
self.assertEqual(len(result), 1)
car1_doc = result[0]
self.assertEqual(car1_doc.ads, [
{
'title': self.ad1.title,
'description': self.ad1.description,
'pk': self.ad1.pk,
},
{
'title': self.ad2.title,
'description': self.ad2.description,
'pk': self.ad2.pk,
},
])
self.assertEqual(car1_doc.name, self.car1.name)
self.assertEqual(int(car1_doc.meta.id), self.car1.pk)
def test_get_doc_with_many_to_many_relationships(self):
s = CarDocument.search().query("match", name=self.car3.name)
result = s.execute()
self.assertEqual(len(result), 1)
car1_doc = result[0]
self.assertEqual(car1_doc.categories, [
{
'title': self.category1.title,
'slug': self.category1.slug,
'icon': self.category1.icon,
},
{
'title': self.category2.title,
'slug': self.category2.slug,
'icon': '',
}
])
def test_doc_to_dict(self):
s = CarDocument.search().query("match", name=self.car2.name)
result = s.execute()
self.assertEqual(len(result), 1)
car2_doc = result[0]
self.assertEqual(car2_doc.to_dict(), {
'type': self.car2.type,
'launched': self.car2.launched,
'name': self.car2.name,
'manufacturer': {
'name': self.manufacturer.name,
'country': COUNTRIES[self.manufacturer.country_code],
},
'categories': [{
'title': self.category1.title,
'slug': self.category1.slug,
'icon': self.category1.icon,
}]
})
s = CarDocument.search().query("match", name=self.car3.name)
result = s.execute()
self.assertEqual(len(result), 1)
car3_doc = result[0]
self.assertEqual(car3_doc.to_dict(), {
'type': self.car3.type,
'launched': self.car3.launched,
'name': self.car3.name,
'categories': [
{
'title': self.category1.title,
'slug': self.category1.slug,
'icon': self.category1.icon,
},
{
'title': self.category2.title,
'slug': self.category2.slug,
'icon': '',
}
]
})
def test_index_to_dict(self):
index_dict = car_index.to_dict()
self.assertEqual(index_dict['settings'], {
'number_of_shards': 1,
'number_of_replicas': 0,
'analysis': {
'analyzer': {
'html_strip': {
'tokenizer': 'standard',
'filter': ['standard', 'lowercase',
'stop', 'snowball'],
'type': 'custom',
'char_filter': ['html_strip']
}
}
}
})
self.assertEqual(index_dict['mappings'], {
'manufacturer_document': {
'properties': {
'created': {'type': 'date'},
'name': {'type': 'string'},
'country': {'type': 'string'},
'country_code': {'type': 'string'},
'logo': {'type': 'string'},
}
},
'car_document': {
'properties': {
'ads': {
'type': 'nested',
'properties': {
'description': {
'type': 'string', 'analyzer':
'html_strip'
},
'pk': {'type': 'integer'},
'title': {'type': 'string'},
},
},
'categories': {
'type': 'nested',
'properties': {
'title': {'type': 'string'},
'slug': {'type': 'string'},
'icon': {'type': 'string'},
},
},
'manufacturer': {
'type': 'object',
'properties': {
'country': {'type': 'string'},
'name': {'type': 'string'},
},
},
'name': {'type': 'string'},
'launched': {'type': 'date'},
'type': {'type': 'string'},
}
}
})
def test_related_docs_are_updated(self):
# test foreignkey relation
self.manufacturer.name = 'Citroen'
self.manufacturer.save()
s = CarDocument.search().query("match", name=self.car2.name)
car2_doc = s.execute()[0]
self.assertEqual(car2_doc.manufacturer.name, 'Citroen')
self.assertEqual(len(car2_doc.ads), 0)
ad3 = Ad.objects.create(
title=_("Ad number 3"), url="www.ad3.com",
description="My super ad description 3",
car=self.car2
)
s = CarDocument.search().query("match", name=self.car2.name)
car2_doc = s.execute()[0]
self.assertEqual(len(car2_doc.ads), 1)
ad3.delete()
s = CarDocument.search().query("match", name=self.car2.name)
car2_doc = s.execute()[0]
self.assertEqual(len(car2_doc.ads), 0)
self.manufacturer.delete()
s = CarDocument.search().query("match", name=self.car2.name)
car2_doc = s.execute()[0]
self.assertEqual(car2_doc.manufacturer, {})
def test_m2m_related_docs_are_updated(self):
# test m2m add
category = Category(
title="Category", slug="category", icon="icon.jpeg"
)
category.save()
self.car2.categories.add(category)
s = CarDocument.search().query("match", name=self.car2.name)
car2_doc = s.execute()[0]
self.assertEqual(len(car2_doc.categories), 2)
# test m2m deletion
self.car2.categories.remove(category)
s = CarDocument.search().query("match", name=self.car2.name)
car2_doc = s.execute()[0]
self.assertEqual(len(car2_doc.categories), 1)
self.category1.car_set.clear()
s = CarDocument.search().query("match", name=self.car2.name)
car2_doc = s.execute()[0]
self.assertEqual(len(car2_doc.categories), 0)
def test_delete_create_populate_commands(self):
out = StringIO()
self.assertTrue(ad_index.exists())
self.assertTrue(car_index.exists())
call_command('search_index', action='delete',
force=True, stdout=out, models=['tests.ad'])
self.assertFalse(ad_index.exists())
self.assertTrue(car_index.exists())
call_command('search_index', action='create',
models=['tests.ad'], stdout=out)
self.assertTrue(ad_index.exists())
result = AdDocument().search().execute()
self.assertEqual(len(result), 0)
call_command('search_index', action='populate',
models=['tests.ad'], stdout=out)
result = AdDocument().search().execute()
self.assertEqual(len(result), 2)
def test_rebuild_command(self):
out = StringIO()
result = AdDocument().search().execute()
self.assertEqual(len(result), 2)
Ad(title="Ad title 3").save()
call_command('search_index', action='populate',
force=True, stdout=out, models=['tests.ad'])
result = AdDocument().search().execute()
self.assertEqual(len(result), 3)
def test_to_queryset(self):
Ad(title="Nothing that match", car=self.car1).save()
qs = AdDocument().search().query(
'match', title="Ad number 2").to_queryset()
self.assertEqual(qs.count(), 2)
self.assertEqual(list(qs), [self.ad2, self.ad1])
def test_queryset_pagination(self):
ad3 = Ad(title="Ad 3", car=self.car1)
ad3.save()
with self.assertNumQueries(1):
AdDocument().update(Ad.objects.all())
doc = PaginatedAdDocument()
with self.assertNumQueries(3):
doc.update(Ad.objects.all().order_by('-id'))
self.assertEqual(
set(int(instance.meta.id) for instance in
doc.search().query('match', title="Ad")),
set([ad3.pk, self.ad1.pk, self.ad2.pk])
)
| 35.797059
| 73
| 0.513516
|
16be308adebf34b3a62305e50d8bd715f5e92f0f
| 251
|
py
|
Python
|
Practica3_Matias.py
|
mbermejo1/PracticasSF2
|
0dd57b24c658cb75f0783776d9178f13180563fe
|
[
"MIT"
] | null | null | null |
Practica3_Matias.py
|
mbermejo1/PracticasSF2
|
0dd57b24c658cb75f0783776d9178f13180563fe
|
[
"MIT"
] | null | null | null |
Practica3_Matias.py
|
mbermejo1/PracticasSF2
|
0dd57b24c658cb75f0783776d9178f13180563fe
|
[
"MIT"
] | null | null | null |
print("\n\t\t-----> Welcome to Matias list changer <-----\t\t\n")
listn = [1,2,3,4,5,6,7,8,9,10]
print(f"\nThis is the list without changes ===> {listn}\n")
listn[4] *= 2
listn[7] *= 2
listn[9] *= 2
print(f"\nThis is the modified list ===> {listn}\n")
| 35.857143
| 65
| 0.589641
|
f716de749187532c276040a0b1e00777b44337ce
| 592
|
py
|
Python
|
api_logic_server_cli/project_prototype/util.py
|
valhuber/ApiLogicServer
|
a4acd8d886a18d4d500e0fffffcaa2f1c0765040
|
[
"BSD-3-Clause"
] | 71
|
2021-01-23T17:34:33.000Z
|
2022-03-29T13:11:29.000Z
|
api_logic_server_cli/project_prototype/util.py
|
valhuber/ApiLogicServer
|
a4acd8d886a18d4d500e0fffffcaa2f1c0765040
|
[
"BSD-3-Clause"
] | 38
|
2021-01-24T21:56:30.000Z
|
2022-03-08T18:49:00.000Z
|
api_logic_server_cli/project_prototype/util.py
|
valhuber/ApiLogicServer
|
a4acd8d886a18d4d500e0fffffcaa2f1c0765040
|
[
"BSD-3-Clause"
] | 14
|
2021-01-23T16:20:44.000Z
|
2022-03-24T10:48:28.000Z
|
import sqlite3
from os import path
import sys
import logging
app_logger = logging.getLogger("api_logic_server_app")
def log(msg: any) -> None:
app_logger.info(msg)
# print("TIL==> " + msg)
def connection() -> sqlite3.Connection:
ROOT: str = path.dirname(path.realpath(__file__))
log(ROOT)
_connection = sqlite3.connect(path.join(ROOT, "sqlitedata.db"))
return _connection
def dbpath(dbname: str) -> str:
ROOT: str = path.dirname(path.realpath(__file__))
log('ROOT: '+ROOT)
PATH: str = path.join(ROOT, dbname)
log('DBPATH: '+PATH)
return PATH
| 22.769231
| 67
| 0.675676
|
77502af18bf5c8e08f138efacb1134a2917a190e
| 4,056
|
py
|
Python
|
app/tests/v1/test_meets.py
|
exdus/Questioner-api
|
22be46ef17b25c73949b105dca43dd9ea2b930ca
|
[
"MIT"
] | null | null | null |
app/tests/v1/test_meets.py
|
exdus/Questioner-api
|
22be46ef17b25c73949b105dca43dd9ea2b930ca
|
[
"MIT"
] | null | null | null |
app/tests/v1/test_meets.py
|
exdus/Questioner-api
|
22be46ef17b25c73949b105dca43dd9ea2b930ca
|
[
"MIT"
] | null | null | null |
import unittest
from app import create_app
import os
import json
import pytest
import datetime
from app.api.v1.models.models import MEETUP_LIST
KEY = os.getenv("SECRET")
class BaseTest(unittest.TestCase):
def setUp(self):
self.app = create_app("testing")
self.client = self.app.test_client()
target_time = datetime.datetime.now() + datetime.timedelta(days=7)
target_time = target_time.replace(microsecond=0)
today_now = datetime.datetime.now()
self.meetup1 = {
"topic": "My first meetup",
"images": ["/home/zonecc/pictures/img1.png", "/home/zonecc/picturesimg2/png"],
"location": "Home",
"happenOn": target_time.strftime("%D %H:%M %p"),
"tags": ["#At home", "#coding", "#enjoy"]
}
self.meetup11 = {
"topic": "",
"images": ["/home/zonecc/pictures/img1.png", "/home/zonecc/picturesimg2/png"],
"location": "Home",
"happenOn": target_time.strftime("%D %H:%M %p"),
"tags": ["#At home", "#coding", "#enjoy"]
}
self.meetup1created = {
"id": 1,
"createOn": today_now.strftime("%d %h %Y"),
"topic": "My first meetup",
"images": ["/home/zonecc/pictures/img1.png", "/home/zonecc/picturesimg2/png"],
"location": "Home",
"happenOn": target_time.strftime("%D %H:%M %p"),
"tags": ["#At home", "#coding", "#enjoy"]
}
self.meetup2 = {
"images": ["/home/zonecc/pictures/img1.png", "/home/zonecc/picturesimg2/png"],
"location": "Home",
"happenOn": target_time.strftime("%D %H:%M %p"),
"tags": ["#At home", "#coding", "#enjoy"]
}
self.nodata = {}
def tearDown(self):
pass
class TestMeetup(BaseTest):
def test_created_meetup_success(self):
response = self.client.post(
'/api/v1/meetups', data=json.dumps(self.meetup1), content_type="application/json")
#meet_resp = json.loads(response.data.decode('utf-8', KEY))
#self.assertEqual(response.status_code, 201)
#self.assertEqual(meet_resp["data"], self.meetup1created)
def test_create_meetup_fail_no_data(self):
response = self.client.post(
'/api/v1/meetups', data=json.dumps(self.nodata), content_type="application/json")
#self.assertEqual(response.status_code, 404)
def test_get_all_meetups_success(self):
MEETUP_LIST.append(self.meetup1created)
response = self.client.get(
'/api/v1/meetups', data=json.dumps(self.meetup1), content_type="application/json")
#self.assertEqual(response.status_code, 200)
def test_get_all_upcoming_success(self):
MEETUP_LIST.append(self.meetup1created)
response = self.client.get(
'/api/v1/meetups/upcoming', data=json.dumps(self.meetup1), content_type="application/json")
#self.assertEqual(response.status_code, 200)
def test_get_single_meetup_success(self):
MEETUP_LIST.append(self.meetup1created)
response = self.client.get(
'/api/v1/meetups/1', data=json.dumps(self.meetup1), content_type="application/json")
#self.assertEqual(response.status_code, 200)
def test_get_single_meetup_fail(self):
MEETUP_LIST.append(self.meetup1created)
response = self.client.get(
'/api/v1/meetups/1000', data=json.dumps(self.meetup1), content_type="application/json")
#self.assertEqual(response.status_code, 404)
def test_delete_meetup_fail(self):
response = self.client.delete(
'/api/v1/meetups/1000', data=json.dumps(self.meetup1), content_type="application/json")
#self.assertEqual(response.status_code, 404)
def test_delete_meetup_success(self):
response = self.client.delete(
'/api/v1/meetups/1', data=json.dumps(self.meetup1), content_type="application/json")
#self.assertEqual(response.status_code, 200)
| 38.264151
| 103
| 0.618343
|
49bd82a86d63d362216011c9e2162e0037e7c6bd
| 3,036
|
py
|
Python
|
tests/sources/test_textures.py
|
awesome-archive/webots
|
8e74fb8393d1e3a6540749afc492635c43f1b30f
|
[
"Apache-2.0"
] | 2
|
2019-07-12T13:47:44.000Z
|
2019-08-17T02:53:54.000Z
|
tests/sources/test_textures.py
|
golbh/webots
|
8e74fb8393d1e3a6540749afc492635c43f1b30f
|
[
"Apache-2.0"
] | null | null | null |
tests/sources/test_textures.py
|
golbh/webots
|
8e74fb8393d1e3a6540749afc492635c43f1b30f
|
[
"Apache-2.0"
] | 1
|
2019-07-13T17:58:04.000Z
|
2019-07-13T17:58:04.000Z
|
# Copyright 1996-2018 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test textures."""
import unittest
import os
import fnmatch
from PIL import Image
class TestTextures(unittest.TestCase):
"""Unit test of the textures."""
def setUp(self):
"""Get all the textures to be tested."""
# 1. Get all the images from projects and resources
images = []
for directory in ['projects', 'resources']:
for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME'] + os.sep + directory):
for fileName in fnmatch.filter(fileNames, '*.png'):
image = os.path.join(rootPath, fileName)
images.append(image)
for fileName in fnmatch.filter(fileNames, '*.jpg'):
image = os.path.join(rootPath, fileName)
images.append(image)
# 2. filter-out the images which are not textures
self.textures = []
for image in images:
if not (
'controllers' in image or
'icons' in image or
'libraries' in image or
'plugins' in image or
'simulator-sdk' in image or
'resources' + os.sep + 'images' in image or
'resources' + os.sep + 'web' in image or
'resources' + os.sep + 'wren' in image
):
self.textures.append(image)
def test_textures_dimensions_are_power_of_two(self):
"""Test that the released textures dimensions are power of two."""
def is_perfect_power_of_two(a):
assert isinstance(a, int)
while a % 2 == 0:
a = a / 2
if a == 1:
return True
return False
for texture in self.textures:
im = Image.open(texture)
self.assertTrue(
is_perfect_power_of_two(im.size[0]) and is_perfect_power_of_two(im.size[1]),
msg='texture "%s": dimension is not a power of two: (%d, %d)' % (texture, im.size[0], im.size[1])
)
def test_textures_profile(self):
"""Test that the released textures don't contain an ICC profile."""
for texture in self.textures:
im = Image.open(texture)
self.assertTrue(
im.info.get("icc_profile") is None,
msg='texture "%s" contains an ICC profile' % (texture)
)
if __name__ == '__main__':
unittest.main()
| 37.481481
| 113
| 0.584321
|
5cd5c7416f796784e724f794ec1830134bc41fd3
| 28,094
|
py
|
Python
|
graphicsDisplay.py
|
chaobiubiu/contest_pacman
|
f8381af6e6be09a894dc301c94803ca33af5271b
|
[
"BSD-4-Clause-UC"
] | 1
|
2021-11-30T06:44:08.000Z
|
2021-11-30T06:44:08.000Z
|
graphicsDisplay.py
|
chaobiubiu/contest_pacman
|
f8381af6e6be09a894dc301c94803ca33af5271b
|
[
"BSD-4-Clause-UC"
] | 4
|
2021-09-30T21:21:02.000Z
|
2021-10-11T22:02:26.000Z
|
graphicsDisplay.py
|
chaobiubiu/contest_pacman
|
f8381af6e6be09a894dc301c94803ca33af5271b
|
[
"BSD-4-Clause-UC"
] | null | null | null |
# graphicsDisplay.py
# ------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from graphicsUtils import *
import math, time
from game import Directions
###########################
# GRAPHICS DISPLAY CODE #
###########################
# Most code by Dan Klein and John Denero written or rewritten for cs188, UC Berkeley.
# Some code from a Pacman implementation by LiveWires, and used / modified with permission.
DEFAULT_GRID_SIZE = 30.0
INFO_PANE_HEIGHT = 35
BACKGROUND_COLOR = formatColor(0,0,0)
WALL_COLOR = formatColor(0.0/255.0, 51.0/255.0, 255.0/255.0)
INFO_PANE_COLOR = formatColor(.4,.4,0)
SCORE_COLOR = formatColor(.9, .9, .9)
PACMAN_OUTLINE_WIDTH = 2
PACMAN_CAPTURE_OUTLINE_WIDTH = 4
GHOST_COLORS = []
GHOST_COLORS.append(formatColor(.9,0,0)) # Red
GHOST_COLORS.append(formatColor(0,.3,.9)) # Blue
GHOST_COLORS.append(formatColor(.98,.41,.07)) # Orange
GHOST_COLORS.append(formatColor(.1,.75,.7)) # Green
GHOST_COLORS.append(formatColor(1.0,0.6,0.0)) # Yellow
GHOST_COLORS.append(formatColor(.4,0.13,0.91)) # Purple
TEAM_COLORS = GHOST_COLORS[:2]
GHOST_SHAPE = [
( 0, 0.3 ),
( 0.25, 0.75 ),
( 0.5, 0.3 ),
( 0.75, 0.75 ),
( 0.75, -0.5 ),
( 0.5, -0.75 ),
(-0.5, -0.75 ),
(-0.75, -0.5 ),
(-0.75, 0.75 ),
(-0.5, 0.3 ),
(-0.25, 0.75 )
]
GHOST_SIZE = 0.65
SCARED_COLOR = formatColor(1,1,1)
#GHOST_VEC_COLORS = map(colorToVector, GHOST_COLORS)
GHOST_VEC_COLORS = [colorToVector(c) for c in GHOST_COLORS]
PACMAN_COLOR = formatColor(255.0/255.0,255.0/255.0,61.0/255)
PACMAN_SCALE = 0.5
#pacman_speed = 0.25
# Food
FOOD_COLOR = formatColor(1,1,1)
FOOD_SIZE = 0.1
# Laser
LASER_COLOR = formatColor(1,0,0)
LASER_SIZE = 0.02
# Capsule graphics
CAPSULE_COLOR = formatColor(1,1,1)
CAPSULE_SIZE = 0.25
# Drawing walls
WALL_RADIUS = 0.15
class InfoPane:
def __init__(self, layout, gridSize):
self.gridSize = gridSize
self.width = (layout.width) * gridSize
self.base = (layout.height + 1) * gridSize
self.height = INFO_PANE_HEIGHT
self.fontSize = 24
self.textColor = PACMAN_COLOR
self.drawPane()
def toScreen(self, pos, y = None):
"""
Translates a point relative from the bottom left of the info pane.
"""
if y == None:
x,y = pos
else:
x = pos
x = self.gridSize + x # Margin
y = self.base + y
return x,y
def drawPane(self):
self.scoreText = text( self.toScreen(0, 0 ), self.textColor, "SCORE: 0", "Times", self.fontSize, "bold")
def initializeGhostDistances(self, distances):
self.ghostDistanceText = []
size = 20
if self.width < 240:
size = 12
if self.width < 160:
size = 10
for i, d in enumerate(distances):
t = text( self.toScreen(self.width/2 + self.width/8 * i, 0), GHOST_COLORS[i+1], d, "Times", size, "bold")
self.ghostDistanceText.append(t)
def updateScore(self, score):
changeText(self.scoreText, "SCORE: % 4d" % score)
def setTeam(self, isBlue):
text = "RED TEAM"
if isBlue: text = "BLUE TEAM"
self.teamText = text( self.toScreen(300, 0 ), self.textColor, text, "Times", self.fontSize, "bold")
def updateGhostDistances(self, distances):
if len(distances) == 0: return
if 'ghostDistanceText' not in dir(self): self.initializeGhostDistances(distances)
else:
for i, d in enumerate(distances):
changeText(self.ghostDistanceText[i], d)
def drawGhost(self):
pass
def drawPacman(self):
pass
def drawWarning(self):
pass
def clearIcon(self):
pass
def updateMessage(self, message):
pass
def clearMessage(self):
pass
class PacmanGraphics:
def __init__(self, zoom=1.0, frameTime=0.0, capture=False):
self.have_window = 0
self.currentGhostImages = {}
self.pacmanImage = None
self.zoom = zoom
self.gridSize = DEFAULT_GRID_SIZE * zoom
self.capture = capture
self.frameTime = frameTime
def checkNullDisplay(self):
return False
def initialize(self, state, isBlue = False):
self.isBlue = isBlue
self.startGraphics(state)
# self.drawDistributions(state)
self.distributionImages = None # Initialized lazily
self.drawStaticObjects(state)
self.drawAgentObjects(state)
# Information
self.previousState = state
def startGraphics(self, state):
self.layout = state.layout
layout = self.layout
self.width = layout.width
self.height = layout.height
self.make_window(self.width, self.height)
self.infoPane = InfoPane(layout, self.gridSize)
self.currentState = layout
def drawDistributions(self, state):
walls = state.layout.walls
dist = []
for x in range(walls.width):
distx = []
dist.append(distx)
for y in range(walls.height):
( screen_x, screen_y ) = self.to_screen( (x, y) )
block = square( (screen_x, screen_y),
0.5 * self.gridSize,
color = BACKGROUND_COLOR,
filled = 1, behind=2)
distx.append(block)
self.distributionImages = dist
def drawStaticObjects(self, state):
layout = self.layout
self.drawWalls(layout.walls)
self.food = self.drawFood(layout.food)
self.capsules = self.drawCapsules(layout.capsules)
refresh()
def drawAgentObjects(self, state):
self.agentImages = [] # (agentState, image)
for index, agent in enumerate(state.agentStates):
if agent.isPacman:
image = self.drawPacman(agent, index)
self.agentImages.append( (agent, image) )
else:
image = self.drawGhost(agent, index)
self.agentImages.append( (agent, image) )
refresh()
def swapImages(self, agentIndex, newState):
"""
Changes an image from a ghost to a pacman or vis versa (for capture)
"""
prevState, prevImage = self.agentImages[agentIndex]
for item in prevImage: remove_from_screen(item)
if newState.isPacman:
image = self.drawPacman(newState, agentIndex)
self.agentImages[agentIndex] = (newState, image )
else:
image = self.drawGhost(newState, agentIndex)
self.agentImages[agentIndex] = (newState, image )
refresh()
def update(self, newState):
agentIndex = newState._agentMoved
agentState = newState.agentStates[agentIndex]
if self.agentImages[agentIndex][0].isPacman != agentState.isPacman: self.swapImages(agentIndex, agentState)
prevState, prevImage = self.agentImages[agentIndex]
if agentState.isPacman:
self.animatePacman(agentState, prevState, prevImage)
else:
self.moveGhost(agentState, agentIndex, prevState, prevImage)
self.agentImages[agentIndex] = (agentState, prevImage)
if newState._foodEaten != None:
self.removeFood(newState._foodEaten, self.food)
if newState._capsuleEaten != None:
self.removeCapsule(newState._capsuleEaten, self.capsules)
self.infoPane.updateScore(newState.score)
if 'ghostDistances' in dir(newState):
self.infoPane.updateGhostDistances(newState.ghostDistances)
def make_window(self, width, height):
grid_width = (width-1) * self.gridSize
grid_height = (height-1) * self.gridSize
screen_width = 2*self.gridSize + grid_width
screen_height = 2*self.gridSize + grid_height + INFO_PANE_HEIGHT
begin_graphics(screen_width,
screen_height,
BACKGROUND_COLOR,
"CS188 Pacman")
def drawPacman(self, pacman, index):
position = self.getPosition(pacman)
screen_point = self.to_screen(position)
endpoints = self.getEndpoints(self.getDirection(pacman))
width = PACMAN_OUTLINE_WIDTH
outlineColor = PACMAN_COLOR
fillColor = PACMAN_COLOR
if self.capture:
outlineColor = TEAM_COLORS[index % 2]
fillColor = GHOST_COLORS[index]
width = PACMAN_CAPTURE_OUTLINE_WIDTH
return [circle(screen_point, PACMAN_SCALE * self.gridSize,
fillColor = fillColor, outlineColor = outlineColor,
endpoints = endpoints,
width = width)]
def getEndpoints(self, direction, position=(0,0)):
x, y = position
pos = x - int(x) + y - int(y)
width = 30 + 80 * math.sin(math.pi* pos)
delta = width / 2
if (direction == 'West'):
endpoints = (180+delta, 180-delta)
elif (direction == 'North'):
endpoints = (90+delta, 90-delta)
elif (direction == 'South'):
endpoints = (270+delta, 270-delta)
else:
endpoints = (0+delta, 0-delta)
return endpoints
def movePacman(self, position, direction, image):
screenPosition = self.to_screen(position)
endpoints = self.getEndpoints( direction, position )
r = PACMAN_SCALE * self.gridSize
moveCircle(image[0], screenPosition, r, endpoints)
refresh()
def animatePacman(self, pacman, prevPacman, image):
if self.frameTime < 0:
print('Press any key to step forward, "q" to play')
keys = wait_for_keys()
if 'q' in keys:
self.frameTime = 0.1
if self.frameTime > 0.01 or self.frameTime < 0:
start = time.time()
fx, fy = self.getPosition(prevPacman)
px, py = self.getPosition(pacman)
frames = 4.0
for i in range(1,int(frames) + 1):
pos = px*i/frames + fx*(frames-i)/frames, py*i/frames + fy*(frames-i)/frames
self.movePacman(pos, self.getDirection(pacman), image)
refresh()
sleep(abs(self.frameTime) / frames)
else:
self.movePacman(self.getPosition(pacman), self.getDirection(pacman), image)
refresh()
def getGhostColor(self, ghost, ghostIndex):
if ghost.scaredTimer > 0:
return SCARED_COLOR
else:
return GHOST_COLORS[ghostIndex]
def drawGhost(self, ghost, agentIndex):
pos = self.getPosition(ghost)
dir = self.getDirection(ghost)
(screen_x, screen_y) = (self.to_screen(pos) )
coords = []
for (x, y) in GHOST_SHAPE:
coords.append((x*self.gridSize*GHOST_SIZE + screen_x, y*self.gridSize*GHOST_SIZE + screen_y))
colour = self.getGhostColor(ghost, agentIndex)
body = polygon(coords, colour, filled = 1)
WHITE = formatColor(1.0, 1.0, 1.0)
BLACK = formatColor(0.0, 0.0, 0.0)
dx = 0
dy = 0
if dir == 'North':
dy = -0.2
if dir == 'South':
dy = 0.2
if dir == 'East':
dx = 0.2
if dir == 'West':
dx = -0.2
leftEye = circle((screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2, WHITE, WHITE)
rightEye = circle((screen_x+self.gridSize*GHOST_SIZE*(0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2, WHITE, WHITE)
leftPupil = circle((screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08, BLACK, BLACK)
rightPupil = circle((screen_x+self.gridSize*GHOST_SIZE*(0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08, BLACK, BLACK)
ghostImageParts = []
ghostImageParts.append(body)
ghostImageParts.append(leftEye)
ghostImageParts.append(rightEye)
ghostImageParts.append(leftPupil)
ghostImageParts.append(rightPupil)
return ghostImageParts
def moveEyes(self, pos, dir, eyes):
(screen_x, screen_y) = (self.to_screen(pos) )
dx = 0
dy = 0
if dir == 'North':
dy = -0.2
if dir == 'South':
dy = 0.2
if dir == 'East':
dx = 0.2
if dir == 'West':
dx = -0.2
moveCircle(eyes[0],(screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2)
moveCircle(eyes[1],(screen_x+self.gridSize*GHOST_SIZE*(0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2)
moveCircle(eyes[2],(screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08)
moveCircle(eyes[3],(screen_x+self.gridSize*GHOST_SIZE*(0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08)
def moveGhost(self, ghost, ghostIndex, prevGhost, ghostImageParts):
old_x, old_y = self.to_screen(self.getPosition(prevGhost))
new_x, new_y = self.to_screen(self.getPosition(ghost))
delta = new_x - old_x, new_y - old_y
for ghostImagePart in ghostImageParts:
move_by(ghostImagePart, delta)
refresh()
if ghost.scaredTimer > 0:
color = SCARED_COLOR
else:
color = GHOST_COLORS[ghostIndex]
edit(ghostImageParts[0], ('fill', color), ('outline', color))
self.moveEyes(self.getPosition(ghost), self.getDirection(ghost), ghostImageParts[-4:])
refresh()
def getPosition(self, agentState):
if agentState.configuration == None: return (-1000, -1000)
return agentState.getPosition()
def getDirection(self, agentState):
if agentState.configuration == None: return Directions.STOP
return agentState.configuration.getDirection()
def finish(self):
end_graphics()
def to_screen(self, point):
( x, y ) = point
#y = self.height - y
x = (x + 1)*self.gridSize
y = (self.height - y)*self.gridSize
return ( x, y )
# Fixes some TK issue with off-center circles
def to_screen2(self, point):
( x, y ) = point
#y = self.height - y
x = (x + 1)*self.gridSize
y = (self.height - y)*self.gridSize
return ( x, y )
def drawWalls(self, wallMatrix):
wallColor = WALL_COLOR
for xNum, x in enumerate(wallMatrix):
if self.capture and (xNum * 2) < wallMatrix.width: wallColor = TEAM_COLORS[0]
if self.capture and (xNum * 2) >= wallMatrix.width: wallColor = TEAM_COLORS[1]
for yNum, cell in enumerate(x):
if cell: # There's a wall here
pos = (xNum, yNum)
screen = self.to_screen(pos)
screen2 = self.to_screen2(pos)
# draw each quadrant of the square based on adjacent walls
wIsWall = self.isWall(xNum-1, yNum, wallMatrix)
eIsWall = self.isWall(xNum+1, yNum, wallMatrix)
nIsWall = self.isWall(xNum, yNum+1, wallMatrix)
sIsWall = self.isWall(xNum, yNum-1, wallMatrix)
nwIsWall = self.isWall(xNum-1, yNum+1, wallMatrix)
swIsWall = self.isWall(xNum-1, yNum-1, wallMatrix)
neIsWall = self.isWall(xNum+1, yNum+1, wallMatrix)
seIsWall = self.isWall(xNum+1, yNum-1, wallMatrix)
# NE quadrant
if (not nIsWall) and (not eIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (0,91), 'arc')
if (nIsWall) and (not eIsWall):
# vertical line
line(add(screen, (self.gridSize*WALL_RADIUS, 0)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-0.5)-1)), wallColor)
if (not nIsWall) and (eIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
if (nIsWall) and (eIsWall) and (not neIsWall):
# outer circle
circle(add(screen2, (self.gridSize*2*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (180,271), 'arc')
line(add(screen, (self.gridSize*2*WALL_RADIUS-1, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS+1)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-0.5))), wallColor)
# NW quadrant
if (not nIsWall) and (not wIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (90,181), 'arc')
if (nIsWall) and (not wIsWall):
# vertical line
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, 0)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-0.5)-1)), wallColor)
if (not nIsWall) and (wIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5)-1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
if (nIsWall) and (wIsWall) and (not nwIsWall):
# outer circle
circle(add(screen2, (self.gridSize*(-2)*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (270,361), 'arc')
line(add(screen, (self.gridSize*(-2)*WALL_RADIUS+1, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5), self.gridSize*(-1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS+1)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-0.5))), wallColor)
# SE quadrant
if (not sIsWall) and (not eIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (270,361), 'arc')
if (sIsWall) and (not eIsWall):
# vertical line
line(add(screen, (self.gridSize*WALL_RADIUS, 0)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(0.5)+1)), wallColor)
if (not sIsWall) and (eIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(1)*WALL_RADIUS)), wallColor)
if (sIsWall) and (eIsWall) and (not seIsWall):
# outer circle
circle(add(screen2, (self.gridSize*2*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (90,181), 'arc')
line(add(screen, (self.gridSize*2*WALL_RADIUS-1, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5, self.gridSize*(1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS-1)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(0.5))), wallColor)
# SW quadrant
if (not sIsWall) and (not wIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (180,271), 'arc')
if (sIsWall) and (not wIsWall):
# vertical line
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, 0)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(0.5)+1)), wallColor)
if (not sIsWall) and (wIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5)-1, self.gridSize*(1)*WALL_RADIUS)), wallColor)
if (sIsWall) and (wIsWall) and (not swIsWall):
# outer circle
circle(add(screen2, (self.gridSize*(-2)*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (0,91), 'arc')
line(add(screen, (self.gridSize*(-2)*WALL_RADIUS+1, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5), self.gridSize*(1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS-1)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(0.5))), wallColor)
def isWall(self, x, y, walls):
if x < 0 or y < 0:
return False
if x >= walls.width or y >= walls.height:
return False
return walls[x][y]
def drawFood(self, foodMatrix ):
foodImages = []
color = FOOD_COLOR
for xNum, x in enumerate(foodMatrix):
if self.capture and (xNum * 2) < foodMatrix.width: color = TEAM_COLORS[0]
if self.capture and (xNum * 2) >= foodMatrix.width: color = TEAM_COLORS[1]
imageRow = []
foodImages.append(imageRow)
for yNum, cell in enumerate(x):
if cell: # There's food here
screen = self.to_screen((xNum, yNum ))
dot = circle( screen,
FOOD_SIZE * self.gridSize,
outlineColor = color, fillColor = color,
width = 1)
imageRow.append(dot)
else:
imageRow.append(None)
return foodImages
def drawCapsules(self, capsules ):
capsuleImages = {}
for capsule in capsules:
( screen_x, screen_y ) = self.to_screen(capsule)
dot = circle( (screen_x, screen_y),
CAPSULE_SIZE * self.gridSize,
outlineColor = CAPSULE_COLOR,
fillColor = CAPSULE_COLOR,
width = 1)
capsuleImages[capsule] = dot
return capsuleImages
def removeFood(self, cell, foodImages ):
x, y = cell
remove_from_screen(foodImages[x][y])
def removeCapsule(self, cell, capsuleImages ):
x, y = cell
remove_from_screen(capsuleImages[(x, y)])
def drawExpandedCells(self, cells):
"""
Draws an overlay of expanded grid positions for search agents
"""
n = float(len(cells))
baseColor = [1.0, 0.0, 0.0]
self.clearExpandedCells()
self.expandedCells = []
for k, cell in enumerate(cells):
screenPos = self.to_screen( cell)
cellColor = formatColor(*[(n-k) * c * .5 / n + .25 for c in baseColor])
block = square(screenPos,
0.5 * self.gridSize,
color = cellColor,
filled = 1, behind=2)
self.expandedCells.append(block)
if self.frameTime < 0:
refresh()
def clearExpandedCells(self):
if 'expandedCells' in dir(self) and len(self.expandedCells) > 0:
for cell in self.expandedCells:
remove_from_screen(cell)
def updateDistributions(self, distributions):
"Draws an agent's belief distributions"
# copy all distributions so we don't change their state
distributions = map(lambda x: x.copy(), distributions)
if self.distributionImages == None:
self.drawDistributions(self.previousState)
for x in range(len(self.distributionImages)):
for y in range(len(self.distributionImages[0])):
image = self.distributionImages[x][y]
weights = [dist[ (x,y) ] for dist in distributions]
if sum(weights) != 0:
pass
# Fog of war
color = [0.0,0.0,0.0]
colors = GHOST_VEC_COLORS[1:] # With Pacman
if self.capture: colors = GHOST_VEC_COLORS
for weight, gcolor in zip(weights, colors):
color = [min(1.0, c + 0.95 * g * weight ** .3) for c,g in zip(color, gcolor)]
changeColor(image, formatColor(*color))
refresh()
class FirstPersonPacmanGraphics(PacmanGraphics):
def __init__(self, zoom = 1.0, showGhosts = True, capture = False, frameTime=0):
PacmanGraphics.__init__(self, zoom, frameTime=frameTime)
self.showGhosts = showGhosts
self.capture = capture
def initialize(self, state, isBlue = False):
self.isBlue = isBlue
PacmanGraphics.startGraphics(self, state)
# Initialize distribution images
walls = state.layout.walls
dist = []
self.layout = state.layout
# Draw the rest
self.distributionImages = None # initialize lazily
self.drawStaticObjects(state)
self.drawAgentObjects(state)
# Information
self.previousState = state
def lookAhead(self, config, state):
if config.getDirection() == 'Stop':
return
else:
pass
# Draw relevant ghosts
allGhosts = state.getGhostStates()
visibleGhosts = state.getVisibleGhosts()
for i, ghost in enumerate(allGhosts):
if ghost in visibleGhosts:
self.drawGhost(ghost, i)
else:
self.currentGhostImages[i] = None
def getGhostColor(self, ghost, ghostIndex):
return GHOST_COLORS[ghostIndex]
def getPosition(self, ghostState):
if not self.showGhosts and not ghostState.isPacman and ghostState.getPosition()[1] > 1:
return (-1000, -1000)
else:
return PacmanGraphics.getPosition(self, ghostState)
def add(x, y):
return (x[0] + y[0], x[1] + y[1])
# Saving graphical output
# -----------------------
# Note: to make an animated gif from this postscript output, try the command:
# convert -delay 7 -loop 1 -compress lzw -layers optimize frame* out.gif
# convert is part of imagemagick (freeware)
SAVE_POSTSCRIPT = False
POSTSCRIPT_OUTPUT_DIR = 'frames'
FRAME_NUMBER = 0
import os
def saveFrame():
"Saves the current graphical output as a postscript file"
global SAVE_POSTSCRIPT, FRAME_NUMBER, POSTSCRIPT_OUTPUT_DIR
if not SAVE_POSTSCRIPT: return
if not os.path.exists(POSTSCRIPT_OUTPUT_DIR): os.mkdir(POSTSCRIPT_OUTPUT_DIR)
name = os.path.join(POSTSCRIPT_OUTPUT_DIR, 'frame_%08d.ps' % FRAME_NUMBER)
FRAME_NUMBER += 1
writePostscript(name) # writes the current canvas
| 41.254038
| 189
| 0.58347
|
042c120e4deaf8aeae06a9e2136f39d6c224c161
| 6,511
|
py
|
Python
|
rpython/rlib/rjitlog/test/test_jitlog.py
|
m4sterchain/mesapy
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
[
"Apache-2.0",
"OpenSSL"
] | 381
|
2018-08-18T03:37:22.000Z
|
2022-02-06T23:57:36.000Z
|
rpython/rlib/rjitlog/test/test_jitlog.py
|
m4sterchain/mesapy
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
[
"Apache-2.0",
"OpenSSL"
] | 16
|
2018-09-22T18:12:47.000Z
|
2022-02-22T20:03:59.000Z
|
rpython/rlib/rjitlog/test/test_jitlog.py
|
m4sterchain/mesapy
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
[
"Apache-2.0",
"OpenSSL"
] | 30
|
2018-08-20T03:16:34.000Z
|
2022-01-12T17:39:22.000Z
|
import py
import sys
from rpython.jit.tool.oparser import pure_parse
from rpython.jit.metainterp.optimizeopt.util import equaloplists
from rpython.jit.metainterp.resoperation import ResOperation, rop
from rpython.jit.backend.model import AbstractCPU
from rpython.jit.metainterp.history import ConstInt, ConstPtr
from rpython.rlib.rjitlog import rjitlog as jl
from rpython.jit.metainterp.history import AbstractDescr
from rpython.rlib.objectmodel import compute_unique_id
class FakeCallAssemblerLoopToken(AbstractDescr):
def __init__(self, target):
self._ll_function_addr = target
def repr_of_descr(self):
return 'looptoken'
class FakeLog(object):
def __init__(self):
self.values = []
def _write_marked(self, id, text):
self.values.append(id + text)
def _get_location(greenkey_list):
assert len(greenkey_list) == 0
return '/home/pypy/jit.py', 0, 'enclosed', 99, 'DEL'
class TestLogger(object):
def make_metainterp_sd(self):
class FakeJitDriver(object):
class warmstate(object):
get_location_types = [jl.MP_FILENAME,jl.MP_INT,jl.MP_SCOPE, jl.MP_INT, jl.MP_OPCODE]
@staticmethod
def get_location(greenkey_list):
return [jl.wrap(jl.MP_FILENAME[0],'s','/home/pypy/jit.py'),
jl.wrap(jl.MP_INT[0], 'i', 0),
jl.wrap(jl.MP_SCOPE[0], 's', 'enclosed'),
jl.wrap(jl.MP_INT[0], 'i', 99),
jl.wrap(jl.MP_OPCODE[0], 's', 'DEL')
]
class FakeMetaInterpSd:
cpu = AbstractCPU()
cpu.ts = None
jitdrivers_sd = [FakeJitDriver()]
def get_name_from_address(self, addr):
return 'Name'
return FakeMetaInterpSd()
def test_debug_merge_point(self, tmpdir):
logger = jl.JitLogger()
file = tmpdir.join('binary_file')
file.ensure()
fd = file.open('wb')
jl.jitlog_init(fd.fileno())
logger.start_new_trace(self.make_metainterp_sd(), jd_name='jdname')
log_trace = logger.log_trace(jl.MARK_TRACE, None, None)
op = ResOperation(rop.DEBUG_MERGE_POINT, [ConstInt(0), ConstInt(0), ConstInt(0)])
log_trace.write([], [op])
#the next line will close 'fd', instead of logger.finish()
fd.close()
binary = file.read()
is_32bit = chr(sys.maxint == 2**31-1)
assert binary == (jl.MARK_START_TRACE) + jl.encode_le_addr(1) + \
jl.encode_str('loop') + jl.encode_le_addr(0) + \
jl.encode_str('jdname') + \
(jl.MARK_TRACE) + jl.encode_le_addr(1) + \
(jl.MARK_INPUT_ARGS) + jl.encode_str('') + \
(jl.MARK_INIT_MERGE_POINT) + b'\x05\x00\x01s\x00i\x08s\x00i\x10s' + \
(jl.MARK_MERGE_POINT) + \
b'\xff' + jl.encode_str('/home/pypy/jit.py') + \
b'\x00' + jl.encode_le_64bit(0) + \
b'\xff' + jl.encode_str('enclosed') + \
b'\x00' + jl.encode_le_64bit(99) + \
b'\xff' + jl.encode_str('DEL')
def test_common_prefix(self):
fakelog = FakeLog()
compressor = jl.PrefixCompressor(1)
# nothing to compress yet!
result = jl.encode_merge_point(fakelog, compressor, [jl.StringValue(0x0,'s','hello')])
assert result == b"\xff\x05\x00\x00\x00hello"
assert fakelog.values == []
#
result = jl.encode_merge_point(fakelog, compressor, [jl.StringValue(0x0,'s','hello')])
assert result == b"\xef"
assert fakelog.values == [(jl.MARK_COMMON_PREFIX) + "\x00\x05\x00\x00\x00hello"]
#
fakelog.values = []
result = jl.encode_merge_point(fakelog, compressor, [jl.StringValue(0x0,'s','heiter')])
assert result == b"\x00\x04\x00\x00\x00iter"
assert fakelog.values == [(jl.MARK_COMMON_PREFIX) + "\x00\x02\x00\x00\x00he"]
#
fakelog.values = []
result = jl.encode_merge_point(fakelog, compressor, [jl.StringValue(0x0,'s','heute')])
assert result == b"\x00\x03\x00\x00\x00ute"
assert fakelog.values == []
#
fakelog.values = []
result = jl.encode_merge_point(fakelog, compressor, [jl.StringValue(0x0,'s','welt')])
assert result == b"\xff\x04\x00\x00\x00welt"
assert fakelog.values == []
#
fakelog.values = []
result = jl.encode_merge_point(fakelog, compressor, [jl.StringValue(0x0,'s','welle')])
assert result == b"\x00\x02\x00\x00\x00le"
assert fakelog.values == [(jl.MARK_COMMON_PREFIX) + "\x00\x03\x00\x00\x00wel"]
def test_common_prefix_func(self):
assert jl.commonprefix("","") == ""
assert jl.commonprefix("/hello/world","/path/to") == "/"
assert jl.commonprefix("pyramid","python") == "py"
assert jl.commonprefix("0"*100,"0"*100) == "0"*100
with py.test.raises(AssertionError):
jl.commonprefix(None,None)
def test_redirect_assembler(self, tmpdir):
looptoken = FakeCallAssemblerLoopToken(0x0)
newlooptoken = FakeCallAssemblerLoopToken(0x1234)
#
logger = jl.JitLogger()
file = tmpdir.join('binary_file')
file.ensure()
fd = file.open('wb')
jl.jitlog_init(fd.fileno())
logger.start_new_trace(self.make_metainterp_sd(), jd_name='jdname')
log_trace = logger.log_trace(jl.MARK_TRACE, None, None)
op = ResOperation(rop.CALL_ASSEMBLER_I, [], descr=looptoken)
log_trace.write([], [op])
jl.redirect_assembler(looptoken, newlooptoken, 0x1234)
#the next line will close 'fd', instead of logger.finish()
fd.close()
binary = file.read()
opnum = jl.encode_le_16bit(rop.CALL_ASSEMBLER_I)
id_looptoken = compute_unique_id(looptoken)
new_id_looptoken = compute_unique_id(newlooptoken)
end = jl.MARK_RESOP_DESCR + opnum + jl.encode_str('i0,looptoken') + \
jl.encode_le_addr(id_looptoken) + jl.encode_str('') + \
jl.MARK_REDIRECT_ASSEMBLER + \
jl.encode_le_addr(id_looptoken) + \
jl.encode_le_addr(new_id_looptoken) + \
jl.encode_le_addr(newlooptoken._ll_function_addr)
assert binary.endswith(end)
| 43.697987
| 100
| 0.594532
|
2a2be8327ddd572395d975e3251b3db59ecdbbdf
| 191
|
py
|
Python
|
server/src/shared_helpers/env.py
|
JackDanger/go-links
|
7ca62bdeafc59c6523e36518fd64c293e1add280
|
[
"Apache-2.0"
] | null | null | null |
server/src/shared_helpers/env.py
|
JackDanger/go-links
|
7ca62bdeafc59c6523e36518fd64c293e1add280
|
[
"Apache-2.0"
] | null | null | null |
server/src/shared_helpers/env.py
|
JackDanger/go-links
|
7ca62bdeafc59c6523e36518fd64c293e1add280
|
[
"Apache-2.0"
] | null | null | null |
import os
def current_env_is_production():
return os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine/')
def current_env_is_local():
return not current_env_is_production()
| 19.1
| 74
| 0.769634
|
6708d010ed7591822346319fec7248897003f403
| 247
|
py
|
Python
|
data_types_and_variables/exercise/01_integer_operations.py
|
Galchov/python-fundamentals
|
4939bdd1c66a7b458fd9ffd0a01d714de26724b5
|
[
"MIT"
] | null | null | null |
data_types_and_variables/exercise/01_integer_operations.py
|
Galchov/python-fundamentals
|
4939bdd1c66a7b458fd9ffd0a01d714de26724b5
|
[
"MIT"
] | null | null | null |
data_types_and_variables/exercise/01_integer_operations.py
|
Galchov/python-fundamentals
|
4939bdd1c66a7b458fd9ffd0a01d714de26724b5
|
[
"MIT"
] | null | null | null |
number_one = int(input())
number_two = int(input())
number_three = int(input())
number_four = int(input())
sum_one_two = number_one + number_two
integer_division = sum_one_two // number_three
result = integer_division * number_four
print(result)
| 24.7
| 46
| 0.765182
|
b90bad385f11b0bb6e930a1e35e23694a1812340
| 1,240
|
py
|
Python
|
legacy/steps/deployer/__init__.py
|
ParikhKadam/zenml
|
867e4d4c982a50447bd182b30af37f2141dac5a4
|
[
"Apache-2.0"
] | 1,275
|
2020-11-19T14:18:25.000Z
|
2021-08-13T07:31:39.000Z
|
legacy/steps/deployer/__init__.py
|
ParikhKadam/zenml
|
867e4d4c982a50447bd182b30af37f2141dac5a4
|
[
"Apache-2.0"
] | 62
|
2020-11-30T16:06:14.000Z
|
2021-08-10T08:34:52.000Z
|
legacy/steps/deployer/__init__.py
|
ParikhKadam/zenml
|
867e4d4c982a50447bd182b30af37f2141dac5a4
|
[
"Apache-2.0"
] | 75
|
2020-12-22T19:15:08.000Z
|
2021-08-13T03:07:50.000Z
|
# Copyright (c) ZenML GmbH 2020. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
from zenml.steps.deployer.base_deployer import BaseDeployerStep
from zenml.steps.deployer.gcaip_deployer import GCAIPDeployer
from zenml.logger import get_logger
from zenml.utils.requirement_utils import check_integration, \
CORTEX_INTEGRATION
logger = get_logger(__name__)
try:
check_integration(CORTEX_INTEGRATION)
from zenml.steps.deployer.cortex_deployer import CortexDeployer
except ModuleNotFoundError as e:
logger.debug(f"There were failed imports due to missing integrations. "
f"HuggingFaceTokenizerStep was not imported. "
f"More information:")
logger.debug(e)
| 40
| 75
| 0.760484
|
821813b4286ce228612b9f828ed5f148b4a9f635
| 7,981
|
py
|
Python
|
community_selection.py
|
anguoyuan/Hypergraph-Propagation-and-Community-Selection-for-Objects-Retrieval
|
5595abca7eb231cd6edf8db293ad5d1d2d4b7121
|
[
"MIT"
] | 13
|
2021-11-08T08:18:05.000Z
|
2022-01-07T05:56:30.000Z
|
community_selection.py
|
anguoyuan/Hypergraph-Propagation-and-Community-Selection-for-Objects-Retrieval
|
5595abca7eb231cd6edf8db293ad5d1d2d4b7121
|
[
"MIT"
] | 1
|
2021-11-12T20:21:53.000Z
|
2021-12-21T14:57:25.000Z
|
community_selection.py
|
anguoyuan/Hypergraph-Propagation-and-Community-Selection-for-Objects-Retrieval
|
5595abca7eb231cd6edf8db293ad5d1d2d4b7121
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 5 16:02:42 2021
@author: Guoyuan An
"""
#prepare the uncertainty calculation
from scipy.stats import entropy
import numpy as np
import pickle
def prepare_community_selection(dataset,COMMUNITY_SELECTION):
graph_directaries={'roxford':'graph/delg/roxford/0301/',
'rparis': 'graph/delg/rparis/',
'R1Moxford': 'graph/delg/R1Moxford/',
'R1Mparis':'graph/delg/R1Mparis/'}
global _graph_dir, Neighbors,_dataset_meta, _dataset
_dataset=dataset
try:
graph_dir=graph_directaries[_dataset]
except:
print('only allow rparis, roxford, R1Moxford, and R1Mparis')
#load Neighbors
Neighbors=np.load(graph_dir+'Neighbors.npy',allow_pickle=True).item() #dict. key is image index, value is list of neighbor image indexes
#load the ground truth file
if _dataset=='roxford':
with open('data/roxford/gnd_roxford5k.pkl','rb') as f:
_dataset_meta=pickle.load(f)
elif _dataset=='rparis':
with open('data/rparis/gnd_rparis6k.pkl','rb') as f:
_dataset_meta=pickle.load(f)
if COMMUNITY_SELECTION==2:
#prepare the delg match
global _geom,_features,_qgeom,_qfeatures
def _read_delg_index():
#return the list of index features and locations; list of array
geom,features=[],[]
for img in _dataset_meta['imlist']:
geom_path='features/'+_dataset+'_np_delg_features/'+img+'_local_locations.npy'
features_path='features/'+_dataset+'_np_delg_features/'+img+'_local_descriptors.npy'
geom.append(np.load(geom_path))
features.append(np.load(features_path))
return geom, features
_geom,_features=_read_delg_index()
def _read_delg_query():
#return the list of index features and locations; list of array
geom,features=[],[]
for img in _dataset_meta['qimlist']:
geom_path='features/'+_dataset+'_np_delg_features/'+img+'_local_locations.npy'
features_path='features/'+_dataset+'_np_delg_features/'+img+'_local_descriptors.npy'
geom.append(np.load(geom_path))
features.append(np.load(features_path))
return geom, features
_qgeom,_qfeatures=_read_delg_query()
missing=[]
def extract_sub_graph(first_search,bound=100):
def add_to_subgraph(img):
potential=[]
# If added to existing sub_graph, return True
for i,s in enumerate(sub_graph):
if len( set(Neighbors[img]).intersection(s) )!=0:
potential.append(s)
sub_graph.remove(s)
if len(potential)==1:
s=potential[0]
s.add(img)
sub_graph.append(s)
return True
elif len(potential)>1:
s=set([img])
for x in potential:
s.update(x)
sub_graph.append(s)
return True
else:
return False
sub_graph=list() #list of set, ็ฑๅพๅค็connected components็ปๆ
for i,img in enumerate(first_search[:bound]):
#try to add to existing sub_graph
tmp=add_to_subgraph(img) #tmp is True if succesfully add
#otherwise, try to connect with other remaining nodes
if tmp==False:
s=set(Neighbors[img]).intersection(first_search[i:bound])
s.add(img) # in case Neighbors[img] doesn't contain img
sub_graph.append(s)
return sub_graph
def calculate_entropy(sub_graph):
#calculate entropy
length=sum([len(s) for s in sub_graph])
numbers=[len(community)/length for community in sub_graph]
e=entropy(numbers)
return e
def _inlier_Region(inlier_locs):
'''
calculate the inlier region
Parameters
----------
inlier_locs : numpy array (72,2)
DESCRIPTION.
Returns
-------
up : TYPE
DESCRIPTION.
down : TYPE
DESCRIPTION.
left : TYPE
DESCRIPTION.
right : TYPE
DESCRIPTION.
'''
down,up=np.max(inlier_locs[:,0]),np.min(inlier_locs[:,0])
right,left=np.max(inlier_locs[:,1]),np.min(inlier_locs[:,1])
size=(right-left)*(down-up)
return size
N_RANSAC=0
from utils.image_reranking import MatchFeatures
def match_one_pair_delg(query, index):
global N_RANSAC
N_RANSAC+=1
query_locations=_qgeom[query]
query_descriptors=_qfeatures[query]
index_locations=_geom[index]
index_descriptors=_features[index]
# inlier_locations: list [array([400., 176.]),...]
inlier_number,match_viz_io,query_inlier_locations,index_inlier_locations,q2i,i2q=MatchFeatures(
query_locations,query_descriptors,
index_locations, index_descriptors,
ransac_seed=None, descriptor_matching_threshold=0.9,
ransac_residual_threshold=10.0,use_ratio_test=False)
if inlier_number != 0:
query_inlier_locations=np.array(query_inlier_locations)
index_inlier_locations=np.array(index_inlier_locations) # np array ไพ(6,2)
index_size=_inlier_Region(index_inlier_locations)
query_size=_inlier_Region(query_inlier_locations)
return inlier_number,index_size
else:
return 0,999999999
################################################################################
#to find the new dominant image
import cv2
def find_dominant(Gs,first_search,q):
#use sift for spatial matching
dominant_image=first_search[0]
for i in first_search[:100]:
global N_RANSAC
N_RANSAC+=1
img1 = cv2.imread('data/oxford5k/jpg/'+_dataset_meta['qimlist'][q]+'.jpg',cv2.IMREAD_COLOR)
a,b,c,d=_dataset_meta['gnd'][q]['bbx']
left, upper, right, lower=int(a),int(b),int(c),int(d) #(left, upper, right, lower)
img1=img1[upper:lower,left:right] #[y1:y2, x1:x2]
gray1 = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
img2 = cv2.imread('data/oxford5k/jpg/'+_dataset_meta['imlist'][i]+'.jpg',cv2.IMREAD_COLOR)
gray2 = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
sift = cv2.SIFT_create()
# ไฝฟ็จSIFTๆฅๆพๅ
ณ้ฎ็นkey pointsๅๆ่ฟฐ็ฌฆdescriptors
kp1,des1 = sift.detectAndCompute(gray1, None)
kp2,des2 = sift.detectAndCompute(gray2, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# store all the good matches as per Lowe's ratio test.
# we don't allow a feature in index image to be matched by several query features
good = []
matched_features=set()
for m,n in matches:
if m.distance < 0.7*n.distance and (m.trainIdx not in matched_features):
good.append(m)
matched_features.add(m.trainIdx)
if len(good)<4:
continue
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
if sum(matchesMask)>20:
dominant_image=i
break
return dominant_image
| 33.961702
| 141
| 0.587019
|
83e473452747651e72e59ffe90441d7572e844b5
| 9,624
|
py
|
Python
|
examples/vpn-plugin-radius-auth/vpn_plugin_radius_auth.py
|
coalfire/auth-script-openvpn
|
b999bfaf77b35dd7a42eb3c53e026f700e5b2bcc
|
[
"Apache-2.0"
] | null | null | null |
examples/vpn-plugin-radius-auth/vpn_plugin_radius_auth.py
|
coalfire/auth-script-openvpn
|
b999bfaf77b35dd7a42eb3c53e026f700e5b2bcc
|
[
"Apache-2.0"
] | null | null | null |
examples/vpn-plugin-radius-auth/vpn_plugin_radius_auth.py
|
coalfire/auth-script-openvpn
|
b999bfaf77b35dd7a42eb3c53e026f700e5b2bcc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""
vpn-plugin-radius-auth
authenticate against radius with username and password.
Use with auth-script-openvpn
"""
import logging
from logging.handlers import RotatingFileHandler
import os
import sys
import configargparse
from pyrad.client import Client
from pyrad.dictionary import Dictionary
import pyrad.packet
def get_logger(location, level=logging.INFO):
"""
Accept location (file-like object),
optional level (logging level, defaulting to logging.INFO).
Return a logger object.
"""
logger = logging.getLogger("vpn_plugin_radius_auth")
logger.setLevel(level)
channel = logging.StreamHandler()
channel_fmt = logging.Formatter("%(levelname)s - %(message)s")
channel.setFormatter(channel_fmt)
logger.addHandler(channel)
filehandle = RotatingFileHandler(location, maxBytes=10240, backupCount=10)
filehandle_fmt = logging.Formatter(
"%(asctime)s - %(levelname)s - %(message)s", "%Y-%m-%dT%H:%M:%S"
)
filehandle.setFormatter(filehandle_fmt)
logger.addHandler(filehandle)
return logger
def write_dictionary(location: str) -> bool:
"""
Accept location (file-like object).
Write a default radius dictionary to the location if it does not exist.
return True
"""
rad_dictionary = """
ATTRIBUTE User-Name 1 string
ATTRIBUTE User-Password 2 string encrypt=1
ATTRIBUTE NAS-IP-Address 3 ipaddr
"""
# Dedent
dict_fmt = "\n".join(line.strip() for line in rad_dictionary.split("\n"))
if not os.path.exists(location):
with open(location, "w", encoding="utf-8") as my_dictionary:
my_dictionary.write(dict_fmt)
return location
code_meaning = {
1: "AccessRequest",
2: "AccessAccept",
3: "AccessReject",
4: "AccountingRequest",
5: "AccountingResponse",
11: "AccessChallenge",
12: "StatusServer",
13: "StatusClient",
40: "DisconnectRequest",
41: "DisconnectACK",
42: "DisconnectNAK",
43: "CoARequest",
44: "CoAACK",
45: "CoANAK",
}
def _is_authorized_p(server: str, credentials: dict, logger):
"""
Accept server (str), credentials (dict), logger.
return True if server authorizes credentials,
False is server denies authorization,
None on error.
"""
port = credentials["port"]
dictionary = credentials["dictionary"]
username = credentials["username"]
nas_ip = credentials["nas_ip"]
encoded_secret = bytes(credentials["shared_secret"], encoding="utf-8")
logger.info(
"Creating radius client of %s:%d using dictionary %s for config %s",
server,
port,
dictionary,
credentials["config"],
)
client = Client(
server=server,
secret=encoded_secret,
dict=Dictionary(dictionary),
authport=port,
timeout=10,
)
logger.info(
"Creating AuthPacket for user %s from %s",
username,
nas_ip,
)
request = client.CreateAuthPacket(
code=pyrad.packet.AccessRequest,
User_Name=username,
NAS_IP_Address=nas_ip,
)
logger.info(
"Encrypting password for %s",
username,
)
# For reasons I do not understand,
# we receive non-Latin characters ASCII-encoded with surrogate escapes.
# To feed this to pyrad.packet, we want unicode.
# So convert it to bytes, and back out to UTF-8.
recoded = (
credentials["password"]
.encode("ASCII", "surrogateescape")
.decode("UTF-8")
)
request["User-Password"] = request.PwCrypt(recoded)
logger.info(
"sending AccessRequest for user %s to %s:%d",
username,
server,
port,
)
if credentials["dry_run"] is True:
reply_code = pyrad.packet.AccessAccept
else:
try:
reply_code = client.SendPacket(request).code
except Exception as err:
logger.error(
"error sending AccessRequest for user %s to %s:%d: %s: %s",
username,
server,
port,
type(err),
err,
)
return None
logger.info(
"got reply code %d: %s for user %s from %s:%d",
reply_code,
code_meaning.get(reply_code, "unknown"),
username,
server,
port,
)
return reply_code == pyrad.packet.AccessAccept
def any_and_not_false(iterable: list) -> bool:
"""
Accept iter.
A reimplementation of any,
with the differnce that the first False short-circuits.
The purpose of this is to short circuit
the moment we get an auth or a denial from a radius server,
while letting non-responses fail over to the next server.
>>> any_and_not_false([False, True, None])
False
>>> any_and_not_false([False, None, True])
False
>>> any_and_not_false([True, False, None])
True
>>> any_and_not_false([True, None, False])
True
>>> any_and_not_false([None, True, False])
True
>>> any_and_not_false([None, False, False])
False
>>> any_and_not_false([None, None])
False
"""
for item in iterable:
if item:
return True
if item is False:
return False
return False
def is_authorized_p(credentials: dict, logger) -> bool:
"""
Accept credentials (dict).
return True if credentials are authorized.
else False.
"""
c = credentials
if not write_dictionary(c["dictionary"]):
return False
return any_and_not_false(
_is_authorized_p(server, c, logger) for server in c["servers"]
)
def write_auth_file(authorized: bool, auth_file: str) -> bool:
"""
Accept authorized (bool),
auth_file (file-like object).
Write 1 to auth_file if authorized is True;
otherwise write 0 to auth_file.
FIXME what happens to openvpn if we can't write the file?
"""
auth_message = str(int(authorized)) + "\n"
with open(auth_file, "w", encoding="utf-8") as output:
output.write(auth_message)
def main():
"""
Parse args,
get environment,
set umask,
pass off to is_authorized_p and write_auth_file.
"""
parser = configargparse.ArgumentParser()
parser.add_argument(
"-c",
"--config",
help="""Use config file CONFIG.
Defaults to /etc/openvpn/radius/auth.conf""",
is_config_file=True,
default="/etc/openvpn/radius/auth.conf",
)
parser.add_argument(
"-S",
"--servers",
help="""Servers to send packets to, in order of preference.
Config file syntax is 'servers = [server_1, server_2]'""",
required=True,
metavar="SERVER",
nargs="+",
)
parser.add_argument(
"-p",
"--port",
help="""Port to send radius packets to.
Default is 1812.""",
default=1812,
type=int,
)
parser.add_argument(
"-s",
"--shared-secret",
help="Radius shared secret",
required=True,
)
parser.add_argument(
"-i",
"--nas-ip",
help="""IP to report to the radius server where this packet comes from.
Defaults to 127.0.0.1""",
default="127.0.0.1",
)
parser.add_argument(
"-d",
"--dictionary-location",
help="""filesystem location of the radius dictionary to use.
Defaults to /etc/openvpn/radius-vpn.dictionary.
This file will be written with default settings if it does not exist.""",
default="/etc/openvpn/radius-vpn.dictionary",
dest="dictionary",
)
parser.add_argument(
"-L",
"--log-location",
help="""Where to log to.
Defaults to /var/log/openvpn/vpn-plugin-radius-auth.log
""",
default="/var/log/openvpn/vpn-plugin-radius-auth.log",
)
parser.add_argument(
"-D",
"--dry-run",
help="""Assemble and print packet;
do not send it.
Treat auth as successful.
""",
action="store_true",
)
args = parser.parse_args()
logger = get_logger(location=args.log_location)
cred_items = [
"servers",
"port",
"shared_secret",
"nas_ip",
"dictionary",
"dry_run",
]
credentials = {k: v for k, v in vars(args).items() if k in cred_items}
credentials["username"] = os.environ.get("username", "fake_user")
credentials["password"] = os.environ.get("password", "fake_password")
credentials["config"] = os.environ.get("config", "fake_config")
auth_control_file = os.environ.get(
"auth_control_file", "fake_auth_control_file"
)
# We will be writing logs, auth_file, and possibly a dictionary.
# all should be 644 permissions.
os.umask(0o133)
# Any exception means we should not authorize...
try:
authorized = is_authorized_p(credentials, logger)
except Exception as err:
logger.error(
"exception authorizing %s: %s: %s",
credentials["username"],
type(err),
err,
)
authorized = False
logger.info("user %s authorized: %s", credentials["username"], authorized)
try:
write_auth_file(authorized, auth_control_file)
except Exception as err:
logger.error(err)
# ... and we always want to exit successfully
sys.exit(0)
if __name__ == "__main__":
main()
| 27.575931
| 84
| 0.601205
|
19929d099cf2d46fc7c83060fb911192e1a4ba8d
| 8,994
|
py
|
Python
|
core/generate_hact_graphs.py
|
histocartography/hact-net
|
4b66b41a2f00b768029b6aaa1152c746faf2b9c6
|
[
"MIT"
] | 16
|
2021-06-11T05:23:43.000Z
|
2022-03-23T20:30:42.000Z
|
core/generate_hact_graphs.py
|
yujing1997/hact-net
|
4b66b41a2f00b768029b6aaa1152c746faf2b9c6
|
[
"MIT"
] | 4
|
2021-06-02T02:58:10.000Z
|
2022-03-22T12:31:33.000Z
|
core/generate_hact_graphs.py
|
yujing1997/hact-net
|
4b66b41a2f00b768029b6aaa1152c746faf2b9c6
|
[
"MIT"
] | 6
|
2021-05-30T23:36:57.000Z
|
2022-03-22T12:31:58.000Z
|
"""
Extract HACT graphs for all the sample in the BRACS dataset.
"""
import os
from glob import glob
import argparse
from PIL import Image
import numpy as np
from tqdm import tqdm
import torch
from dgl.data.utils import save_graphs
import h5py
from histocartography.preprocessing import (
VahadaneStainNormalizer, # stain normalizer
NucleiExtractor, # nuclei detector
DeepFeatureExtractor, # feature extractor
KNNGraphBuilder, # kNN graph builder
ColorMergedSuperpixelExtractor, # tissue detector
DeepFeatureExtractor, # feature extractor
RAGGraphBuilder, # build graph
AssignmnentMatrixBuilder # assignment matrix
)
# BRACS subtype to 7-class label
TUMOR_TYPE_TO_LABEL = {
'N': 0,
'PB': 1,
'UDH': 2,
'ADH': 3,
'FEA': 4,
'DCIS': 5,
'IC': 6
}
MIN_NR_PIXELS = 50000
MAX_NR_PIXELS = 50000000
STAIN_NORM_TARGET_IMAGE = '../data/target.png' # define stain normalization target image.
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_path',
type=str,
help='path to the BRACS data.',
default='',
required=False
)
parser.add_argument(
'--save_path',
type=str,
help='path to save the cell graphs.',
default='../data/',
required=False
)
return parser.parse_args()
class HACTBuilding:
def __init__(self):
# 1. define stain normalizer
self.normalizer = VahadaneStainNormalizer(target_path=STAIN_NORM_TARGET_IMAGE)
# 2. define CG builders
self._build_cg_builders()
# 3. define TG builders
self._build_tg_builders()
# 4. define assignment matrix builder
self.assignment_matrix_builder = AssignmnentMatrixBuilder()
# 5. define var to store image IDs that failed (for whatever reason)
self.image_ids_failing = []
def _build_cg_builders(self):
# a define nuclei extractor
self.nuclei_detector = NucleiExtractor()
# b define feature extractor: Extract patches of 72x72 pixels around each
# nucleus centroid, then resize to 224 to match ResNet input size.
self.nuclei_feature_extractor = DeepFeatureExtractor(
architecture='resnet34',
patch_size=72,
resize_size=224
)
# c define k-NN graph builder with k=5 and thresholding edges longer
# than 50 pixels. Add image size-normalized centroids to the node features.
# For e.g., resulting node features are 512 features from ResNet34 + 2
# normalized centroid features.
self.knn_graph_builder = KNNGraphBuilder(k=5, thresh=50, add_loc_feats=True)
def _build_tg_builders(self):
# a define nuclei extractor
self.tissue_detector = ColorMergedSuperpixelExtractor(
superpixel_size=500,
compactness=20,
blur_kernel_size=1,
threshold=0.05,
downsampling_factor=4
)
# b define feature extractor: Extract patches of 144x144 pixels all over
# the tissue regions. Each patch is resized to 224 to match ResNet input size.
self.tissue_feature_extractor = DeepFeatureExtractor(
architecture='resnet34',
patch_size=144,
resize_size=224
)
# c define RAG builder. Append normalized centroid to the node features.
self.rag_graph_builder = RAGGraphBuilder(add_loc_feats=True)
def _build_cg(self, image):
nuclei_map, nuclei_centroids = self.nuclei_detector.process(image)
features = self.nuclei_feature_extractor.process(image, nuclei_map)
graph = self.knn_graph_builder.process(nuclei_map, features)
return graph, nuclei_centroids
def _build_tg(self, image):
superpixels, _ = self.tissue_detector.process(image)
features = self.tissue_feature_extractor.process(image, superpixels)
graph = self.rag_graph_builder.process(superpixels, features)
return graph, superpixels
def process(self, image_path, save_path, split):
# 1. get image path
subdirs = os.listdir(image_path)
image_fnames = []
for subdir in (subdirs + ['']): # look for all the subdirs AND the image path
image_fnames += glob(os.path.join(image_path, subdir, '*.png'))
print('*** Start analysing {} images ***'.format(len(image_fnames)))
for image_path in tqdm(image_fnames):
# a. load image & check if already there
_, image_name = os.path.split(image_path)
image = np.array(Image.open(image_path))
nr_pixels = image.shape[0] * image.shape[1]
image_label = TUMOR_TYPE_TO_LABEL[image_name.split('_')[2]]
cg_out = os.path.join(save_path, 'cell_graphs', split, image_name.replace('.png', '.bin'))
tg_out = os.path.join(save_path, 'tissue_graphs', split, image_name.replace('.png', '.bin'))
assign_out = os.path.join(save_path, 'assignment_matrices', split, image_name.replace('.png', '.h5'))
# if file was not already created + not too big + not too small, then process
if not self._exists(cg_out, tg_out, assign_out) and self._valid_image(nr_pixels):
# b. stain norm the image
try:
image = self.normalizer.process(image)
except:
print('Warning: {} failed during stain normalization.'.format(image_path))
self.image_ids_failing.append(image_path)
pass
try:
cell_graph, nuclei_centroid = self._build_cg(image)
save_graphs(
filename=cg_out,
g_list=[cell_graph],
labels={"label": torch.tensor([image_label])}
)
except:
print('Warning: {} failed during cell graph generation.'.format(image_path))
self.image_ids_failing.append(image_path)
pass
try:
tissue_graph, tissue_map = self._build_tg(image)
save_graphs(
filename=tg_out,
g_list=[tissue_graph],
labels={"label": torch.tensor([image_label])}
)
except:
print('Warning: {} failed during tissue graph generation.'.format(image_path))
self.image_ids_failing.append(image_path)
pass
try:
assignment_matrix = self.assignment_matrix_builder.process(nuclei_centroid, tissue_map)
with h5py.File(assign_out, "w") as output_file:
output_file.create_dataset(
"assignment_matrix",
data=assignment_matrix,
compression="gzip",
compression_opts=9,
)
except:
print('Warning: {} failed during assignment matrix generation.'.format(image_path))
self.image_ids_failing.append(image_path)
pass
else:
print('Image:', image_path, ' was already processed or is too large/small.')
print('Out of {} images, {} successful HACT graph generations.'.format(
len(image_fnames),
len(image_fnames) - len(self.image_ids_failing)
))
print('Failing IDs are:', self.image_ids_failing)
def _valid_image(self, nr_pixels):
if nr_pixels > MIN_NR_PIXELS and nr_pixels < MAX_NR_PIXELS:
return True
return False
def _exists(self, cg_out, tg_out, assign_out):
if os.path.isfile(cg_out) and os.path.isfile(tg_out) and os.path.isfile(assign_out):
return True
return False
if __name__ == "__main__":
# 1. handle i/o
args = parse_arguments()
if not os.path.isdir(args.image_path) or not os.listdir(args.image_path):
raise ValueError("Data directory is either empty or does not exist.")
split = ''
if 'train' in args.image_path:
split = 'train'
elif 'val' in args.image_path:
split = 'val'
else:
split = 'test'
os.makedirs(os.path.join(args.save_path, 'cell_graphs', split), exist_ok=True)
os.makedirs(os.path.join(args.save_path, 'tissue_graphs', split), exist_ok=True)
os.makedirs(os.path.join(args.save_path, 'assignment_matrices', split), exist_ok=True)
# 2. generate HACT graphs one-by-one, will automatically
# run on GPU if available.
hact_builder = HACTBuilding()
hact_builder.process(args.image_path, args.save_path, split)
| 37.012346
| 113
| 0.603736
|
6c11a9bfc1a262152ea0a9ed3ebcc257fa29cd86
| 705
|
py
|
Python
|
Handout/pygame.i/referencia/config.py
|
pedroivoal/Dessoft
|
da4da1b48112db8da6a6b2ac5beef396c6d624d7
|
[
"MIT"
] | null | null | null |
Handout/pygame.i/referencia/config.py
|
pedroivoal/Dessoft
|
da4da1b48112db8da6a6b2ac5beef396c6d624d7
|
[
"MIT"
] | null | null | null |
Handout/pygame.i/referencia/config.py
|
pedroivoal/Dessoft
|
da4da1b48112db8da6a6b2ac5beef396c6d624d7
|
[
"MIT"
] | null | null | null |
from os import path
# Estabelece a pasta que contem as figuras e sons.
IMG_DIR = path.join(path.dirname(__file__), 'assets', 'img')
SND_DIR = path.join(path.dirname(__file__), 'assets', 'snd')
FNT_DIR = path.join(path.dirname(__file__), 'assets', 'font')
# Dados gerais do jogo.
WIDTH = 480 # Largura da tela
HEIGHT = 600 # Altura da tela
FPS = 60 # Frames por segundo
# Define tamanhos
METEOR_WIDTH = 50
METEOR_HEIGHT = 38
SHIP_WIDTH = 50
SHIP_HEIGHT = 38
# Define algumas variรกveis com as cores bรกsicas
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
# Estados para controle do fluxo da aplicaรงรฃo
INIT = 0
GAME = 1
QUIT = 2
| 22.741935
| 61
| 0.689362
|
cd214604cbbaaddc0fa132092702d79c4c6c77aa
| 7,789
|
py
|
Python
|
flexget/logger.py
|
RSully/flexget-flexget
|
ab36590e569511a43c1e35b1dfae9b7fb8db1535
|
[
"MIT"
] | null | null | null |
flexget/logger.py
|
RSully/flexget-flexget
|
ab36590e569511a43c1e35b1dfae9b7fb8db1535
|
[
"MIT"
] | null | null | null |
flexget/logger.py
|
RSully/flexget-flexget
|
ab36590e569511a43c1e35b1dfae9b7fb8db1535
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, unicode_literals, print_function
import collections
import contextlib
import logging
import logging.handlers
import sys
import threading
import uuid
import warnings
from flexget import __version__
from flexget.utils.tools import io_encoding
# A level more detailed than DEBUG
TRACE = 5
# A level more detailed than INFO
VERBOSE = 15
# Stores `task`, logging `session_id`, and redirected `output` stream in a thread local context
local_context = threading.local()
def get_level_no(level):
if not isinstance(level, int):
# Python logging api is horrible. This is getting the level number, which is required on python 2.6.
level = logging.getLevelName(level.upper())
return level
@contextlib.contextmanager
def task_logging(task):
"""Context manager which adds task information to log messages."""
old_task = getattr(local_context, 'task', '')
local_context.task = task
try:
yield
finally:
local_context.task = old_task
class SessionFilter(logging.Filter):
def __init__(self, session_id):
self.session_id = session_id
def filter(self, record):
return getattr(record, 'session_id', None) == self.session_id
@contextlib.contextmanager
def capture_output(stream, loglevel=None):
"""Context manager which captures all log and console output to given `stream` while in scope."""
root_logger = logging.getLogger()
old_level = root_logger.getEffectiveLevel()
old_id = getattr(local_context, 'session_id', None)
# Keep using current, or create one if none already set
local_context.session_id = old_id or uuid.uuid4()
old_output = getattr(local_context, 'output', None)
old_loglevel = getattr(local_context, 'loglevel', None)
streamhandler = logging.StreamHandler(stream)
streamhandler.setFormatter(FlexGetFormatter())
streamhandler.addFilter(SessionFilter(local_context.session_id))
if loglevel is not None:
loglevel = get_level_no(loglevel)
streamhandler.setLevel(loglevel)
# If requested loglevel is lower than the root logger is filtering for, we need to turn it down.
# All existing handlers should have their desired level set and not be affected.
if not root_logger.isEnabledFor(loglevel):
root_logger.setLevel(loglevel)
local_context.output = stream
local_context.loglevel = loglevel
root_logger.addHandler(streamhandler)
try:
yield
finally:
root_logger.removeHandler(streamhandler)
root_logger.setLevel(old_level)
local_context.session_id = old_id
local_context.output = old_output
local_context.loglevel = old_loglevel
def get_capture_stream():
"""If output is currently being redirected to a stream, returns that stream."""
return getattr(local_context, 'output', None)
def get_capture_loglevel():
"""If output is currently being redirected to a stream, returns declared loglevel for that stream."""
return getattr(local_context, 'loglevel', None)
def console(text):
"""
Print to console safely. Output is able to be captured by different streams in different contexts.
Any plugin wishing to output to the user's console should use this function instead of print so that
output can be redirected when FlexGet is invoked from another process.
"""
if not isinstance(text, str):
text = unicode(text).encode(io_encoding, 'replace')
output = getattr(local_context, 'output', sys.stdout)
print(text, file=output)
class RollingBuffer(collections.deque):
"""File-like that keeps a certain number of lines of text in memory."""
def write(self, line):
self.append(line)
class FlexGetLogger(logging.Logger):
"""Custom logger that adds trace and verbose logging methods, and contextual information to log records."""
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
extra = extra or {}
extra.update(
task=getattr(local_context, 'task', ''),
session_id=getattr(local_context, 'session_id', ''))
# Replace newlines in log messages with \n
if isinstance(msg, basestring):
msg = msg.replace('\n', '\\n')
return logging.Logger.makeRecord(self, name, level, fn, lno, msg, args, exc_info, func, extra)
def trace(self, msg, *args, **kwargs):
"""Log at TRACE level (more detailed than DEBUG)."""
self.log(TRACE, msg, *args, **kwargs)
def verbose(self, msg, *args, **kwargs):
"""Log at VERBOSE level (displayed when FlexGet is run interactively.)"""
self.log(VERBOSE, msg, *args, **kwargs)
class FlexGetFormatter(logging.Formatter):
"""Custom formatter that can handle both regular log records and those created by FlexGetLogger"""
flexget_fmt = '%(asctime)-15s %(levelname)-8s %(name)-13s %(task)-15s %(message)s'
def __init__(self):
logging.Formatter.__init__(self, self.flexget_fmt, '%Y-%m-%d %H:%M')
def format(self, record):
if not hasattr(record, 'task'):
record.task = ''
return logging.Formatter.format(self, record)
_logging_configured = False
_buff_handler = None
_logging_started = False
# Stores the last 50 debug messages
debug_buffer = RollingBuffer(maxlen=50)
def initialize(unit_test=False):
"""Prepare logging.
"""
global _logging_configured, _logging_started, _buff_handler
if _logging_configured:
return
if 'dev' in __version__:
warnings.filterwarnings('always', category=DeprecationWarning, module='flexget.*')
warnings.simplefilter('once', append=True)
logging.addLevelName(TRACE, 'TRACE')
logging.addLevelName(VERBOSE, 'VERBOSE')
_logging_configured = True
# with unit test we want a bit simpler setup
if unit_test:
logging.basicConfig()
_logging_started = True
return
# Store any log messages in a buffer until we `start` function is run
logger = logging.getLogger()
_buff_handler = logging.handlers.BufferingHandler(1000 * 1000)
logger.addHandler(_buff_handler)
logger.setLevel(logging.NOTSET)
# Add a handler that sores the last 50 debug lines to `debug_buffer` for use in crash reports
crash_handler = logging.StreamHandler(debug_buffer)
crash_handler.setLevel(logging.DEBUG)
crash_handler.setFormatter(FlexGetFormatter())
logger.addHandler(crash_handler)
def start(filename=None, level=logging.INFO, to_console=True, to_file=True):
"""After initialization, start file logging.
"""
global _logging_started
assert _logging_configured
if _logging_started:
return
# root logger
logger = logging.getLogger()
level = get_level_no(level)
logger.setLevel(level)
formatter = FlexGetFormatter()
if to_file:
file_handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1000 * 1024, backupCount=9)
file_handler.setFormatter(formatter)
file_handler.setLevel(level)
logger.addHandler(file_handler)
# without --cron we log to console
if to_console:
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
console_handler.setLevel(level)
logger.addHandler(console_handler)
# flush what we have stored from the plugin initialization
logger.removeHandler(_buff_handler)
if _buff_handler:
for record in _buff_handler.buffer:
if logger.isEnabledFor(record.levelno):
logger.handle(record)
_buff_handler.flush()
_logging_started = True
# Set our custom logger class as default
logging.setLoggerClass(FlexGetLogger)
| 34.312775
| 111
| 0.706381
|
eab9eaccd128cad9a8c678e07d6e9420bdb41087
| 95
|
py
|
Python
|
examples/test_simple.py
|
VStoilovskyi/agent-python-pytest
|
325f56938009ae89ae7739410adcccc0d94f2cde
|
[
"Apache-2.0"
] | 85
|
2017-05-03T10:44:32.000Z
|
2022-03-30T06:57:10.000Z
|
examples/test_simple.py
|
VStoilovskyi/agent-python-pytest
|
325f56938009ae89ae7739410adcccc0d94f2cde
|
[
"Apache-2.0"
] | 212
|
2017-04-26T12:18:07.000Z
|
2022-03-28T11:49:34.000Z
|
examples/test_simple.py
|
VStoilovskyi/agent-python-pytest
|
325f56938009ae89ae7739410adcccc0d94f2cde
|
[
"Apache-2.0"
] | 103
|
2017-04-12T15:34:58.000Z
|
2022-01-24T17:11:29.000Z
|
"""Simple example test."""
def test_simple():
"""Simple example test."""
assert True
| 13.571429
| 30
| 0.610526
|
5d39b5031a17de51e4976e417a97bd5433ae39a0
| 649
|
py
|
Python
|
office__excel__openpyxl__xlwt/xlsx__openpyxl__hello_world.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 117
|
2015-12-18T07:18:27.000Z
|
2022-03-28T00:25:54.000Z
|
office__excel__openpyxl__xlwt/xlsx__openpyxl__hello_world.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 8
|
2018-10-03T09:38:46.000Z
|
2021-12-13T19:51:09.000Z
|
office__excel__openpyxl__xlwt/xlsx__openpyxl__hello_world.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 28
|
2016-08-02T17:43:47.000Z
|
2022-03-21T08:31:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install openpyxl
import openpyxl
columns = ['Name', 'Age', 'Course']
rows = [
['Vasya', '16', 1],
['Anya', '17', 2],
['Inna', '16', 1],
]
wb = openpyxl.Workbook()
# Remove default sheet
for sheet_name in wb.sheetnames:
sheet = wb.get_sheet_by_name(sheet_name)
wb.remove_sheet(sheet)
ws = wb.create_sheet('Students')
for i, value in enumerate(columns, 1):
ws.cell(row=1, column=i).value = value
for i, row in enumerate(rows, 2):
for j, value in enumerate(row, 1):
ws.cell(row=i, column=j).value = value
wb.save('excel.xlsx')
| 18.027778
| 46
| 0.625578
|
b00ea7a6aa448145f48c581ebcf013143ee92bc4
| 29,856
|
py
|
Python
|
lingvo/core/spectrum_augmenter.py
|
zh794390558/lingvo
|
ecdf678179018ca07f4f52d065b9bf3fe2dc7c5a
|
[
"Apache-2.0"
] | null | null | null |
lingvo/core/spectrum_augmenter.py
|
zh794390558/lingvo
|
ecdf678179018ca07f4f52d065b9bf3fe2dc7c5a
|
[
"Apache-2.0"
] | null | null | null |
lingvo/core/spectrum_augmenter.py
|
zh794390558/lingvo
|
ecdf678179018ca07f4f52d065b9bf3fe2dc7c5a
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lingvo layers that used for spectrum augmentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import py_utils
_SPECAUGMENT_ARGS = (
'freq_mask_max_bins',
'freq_mask_count',
'use_dynamic_time_mask_max_frames',
'time_mask_max_frames',
'time_mask_count',
'time_mask_max_ratio',
'time_masks_per_frame',
'time_warp_bound',
'time_warp_max_frames',
'time_warp_max_ratio',
)
def _hat(x):
"""Hat function.
The hat function is a piecewise linear function defined such that
1) x < -1: _hat(x) = 0
2) -1 <= x < 0: _hat(x) = x + 1
3) 0 <= x < 1: _hat(x) = -x + 1
4) x > 1 : _hat(x) = 0
Args:
x: A tensor.
Returns:
Tensor obtained by element-wise application of the hat function.
"""
return tf.nn.relu(x + 1) - 2 * tf.nn.relu(x) + tf.nn.relu(x - 1)
class SpectrumAugmenter(base_layer.BaseLayer):
"""Performs data augmentation as according to the SpecAug paper.
https://arxiv.org/pdf/1904.08779.pdf
"""
@classmethod
def Params(cls):
p = super(SpectrumAugmenter, cls).Params()
p.Define('freq_mask_max_bins', 15,
'Maximum number of frequency bins of frequency masking.')
p.Define('freq_mask_count', 1,
'Number of times we apply masking on the frequency axis.')
# TODO(danielspark): Deprecate 'use_dynamic_time_mask_max_frames' and
# introduce enum parameter to replace it.
p.Define(
'use_dynamic_time_mask_max_frames', False,
'If true, time_mask_max_frames is determined by '
'time_mask_max_ratio * utterance_length.')
p.Define(
'time_mask_max_frames', 50, 'Maximum number of frames of time masking. '
'Overridden when use_dynamic_time_mask_max_frames = True.')
p.Define(
'time_mask_count', 1,
'Number of times we apply masking on the time axis. '
'Acts as upper-bound when time_masks_per_frame > 0.')
p.Define('time_mask_max_ratio', 1.0,
'Maximum portion allowed for time masking.')
p.Define(
'time_masks_per_frame', 0.0,
'Ratio of number of time masks to be applied against the number '
'of frames. If > 0, multiplicity of the time mask is determined by '
'min(time_masks_per_frame * utterance_length, time_mask_count).')
p.Define(
'time_warp_bound', 'static',
'To be set to either `dynamic` or `static`. '
'If `dynamic`, time warp bound is determined by '
'time_warp_max_ratio * utterance_length. '
'If `static`, time warp bound is determined by '
'min(time_warp_max_frames, time_warp_max_ratio * utterance_length).')
p.Define('time_warp_max_frames', 0,
'Maximum number of frames for shifting in time warping.')
p.Define('time_warp_max_ratio', 0.0,
'Maximum portion of frames for shifting in time warping.')
p.Define('use_noise', False, 'Whether to noisify the time masked region.')
p.Define('gaussian_noise', False, 'Use Gaussian distribution for noise.')
p.Define('unstack', False,
'Whether to unstack features before applying SpecAugment.')
p.Define('stack_height', 3,
'Number of frames stacked on top of each other.')
p.Define(
'domain_ids', [0],
'If domain ids was given, this parameters describe which domain '
'will be augmented, e.g. '
'p.domain_ids = [2, 7, 1] '
'p.time_mask_count = [1, 2, 0] '
'implies domain 2 will have 1, 7 has 2 and 1 has 0 time masks. '
'All other domain will not augmented if it exists.')
return p
@base_layer.initializer
def __init__(self, params):
super(SpectrumAugmenter, self).__init__(params)
p = self.params
num_domains = len(p.domain_ids)
for field in _SPECAUGMENT_ARGS:
v = getattr(p, field)
if isinstance(v, (list, tuple)):
assert len(v) == num_domains
else:
setattr(p, field, [v] * num_domains)
assert p.freq_mask_max_bins[0] > -1
assert p.time_mask_max_frames[0] > -1
assert p.time_warp_max_frames[0] > -1
def _GetMask(self,
batch_size,
choose_range,
mask_size,
max_length=None,
masks_per_frame=0.0,
multiplicity=1,
dtype=tf.float32,
max_ratio=1.0):
"""Returns fixed size multi-masks starting from random positions.
A multi-mask is a mask obtained by applying multiple masks.
This function when max_length is given:
1) Sample random mask lengths less than max_length with shape
(batch_size, multiplicity).
2) Truncate lengths to a max of (choose_range * max_ratio),
so that each mask is fully contained within the corresponding sequence.
3) Random sample start points of shape (batch_size, multiplicity)
with in (choose_range - lengths).
4) For each batch, multiple masks (whose number is given by the
multiplicity) are constructed.
5) Return a mask of shape (batch_size, mask_size) where masks are
obtained by composing the masks constructed in step 4).
If masks_per_frame > 0, the number is given by
min(masks_per_frame * choose_range, multiplicity).
If not, all the masks are composed. The masked regions are set to zero.
This function when max_length is not given:
1) Sample random mask lengths less than (choose_range * max_ratio)
with shape (batch_size, multiplicity).
2) Proceed to steps 3), 4) and 5) of the above.
Args:
batch_size: Batch size. Integer number.
choose_range: Range within which the masked entries must lie. Tensor of
shape (batch_size,).
mask_size: Size of the mask. Integer number.
max_length: Maximum number of allowed consecutive masked entries. Integer
number or None.
masks_per_frame: Number of masks per frame. Float number. If > 0, the
multiplicity of the mask is set to be masks_per_frame * choose_range.
multiplicity: Maximum number of total masks. Integer number.
dtype: Data type.
max_ratio: Maximum portion of the entire range allowed to be masked. Float
number.
Returns:
mask: a fixed size multi-mask starting from a random position with shape
(batch_size, mask_size).
"""
p = self.params
# Non-empty random seed values are only used for testing
# seed_1 and seed_2 are set separately to avoid correlation of
# mask size and mask position.
if p.random_seed:
seed_1 = p.random_seed + 1
seed_2 = 2 * p.random_seed
else:
seed_1 = p.random_seed
seed_2 = p.random_seed
# Sample lengths for multiple masks.
if max_length and max_length > 0:
max_length = tf.broadcast_to(tf.cast(max_length, dtype), (batch_size,))
else:
max_length = tf.cast(choose_range, dtype=dtype) * max_ratio
masked_portion = tf.random.uniform((batch_size, multiplicity),
minval=0.0,
maxval=1.0,
dtype=dtype,
seed=seed_1)
masked_frame_size = tf.einsum('b,bm->bm', max_length, masked_portion)
masked_frame_size = tf.cast(masked_frame_size, dtype=tf.int32)
# Make sure the sampled length was smaller than max_ratio * length_bound.
# Note that sampling in this way was biased
# (shorter sequence may over-masked.)
choose_range = tf.expand_dims(choose_range, -1)
choose_range = tf.tile(choose_range, [1, multiplicity])
length_bound = tf.cast(choose_range, dtype=dtype)
length_bound = tf.cast(max_ratio * length_bound, dtype=tf.int32)
length = tf.minimum(masked_frame_size, tf.maximum(length_bound, 1))
# Choose starting point.
random_start = tf.random.uniform((batch_size, multiplicity),
maxval=1.0,
seed=seed_2)
start_with_in_valid_range = random_start * tf.cast(
(choose_range - length + 1), dtype=dtype)
start = tf.cast(start_with_in_valid_range, tf.int32)
end = start + length - 1
# Shift starting and end point by small value.
delta = tf.constant(0.1)
start = tf.expand_dims(tf.cast(start, dtype) - delta, -1)
start = tf.tile(start, [1, 1, mask_size])
end = tf.expand_dims(tf.cast(end, dtype) + delta, -1)
end = tf.tile(end, [1, 1, mask_size])
# Construct pre-mask of shape (batch_size, multiplicity, mask_size).
diagonal = tf.expand_dims(
tf.expand_dims(tf.cast(tf.range(mask_size), dtype=dtype), 0), 0)
diagonal = tf.tile(diagonal, [batch_size, multiplicity, 1])
pre_mask = tf.cast(
tf.logical_and(diagonal < end, diagonal > start), dtype=dtype)
# Sum masks with appropriate multiplicity.
if masks_per_frame > 0:
multiplicity_weights = tf.tile(
tf.expand_dims(tf.range(multiplicity, dtype=dtype), 0),
[batch_size, 1])
multiplicity_tensor = masks_per_frame * tf.cast(choose_range, dtype=dtype)
multiplicity_weights = tf.cast(
multiplicity_weights < multiplicity_tensor, dtype=dtype)
pre_mask = tf.einsum('bmt,bm->bt', pre_mask, multiplicity_weights)
else:
pre_mask = tf.reduce_sum(pre_mask, 1)
mask = tf.cast(1.0 - tf.cast(pre_mask > 0, dtype=dtype), dtype=dtype)
if p.fprop_dtype is not None and p.fprop_dtype != p.dtype:
mask = tf.cast(mask, p.fprop_dtype)
return mask
def _GetWarpMatrix(self,
batch_size,
choose_range,
matrix_size,
max_warp_frames=None,
dtype=tf.float32,
max_ratio=1.0):
"""Returns warp matrices starting from random positions.
In this function when max_warp_frames != None:
1) Sample random warp displacements from the interval
[-max_warp_frames, max_warp_frames) to yield shift tensor
with shape (batch_size,).
2) Truncate lengths to a maximum magnitude of (choose_range * max_ratio),
so that each shift is fully contained within the
corresponding sequence.
3) Random sample origin points of shape (batch_size, multiplicity)
with in [shift, choose_range - shift).
4) Return a batch of 1-D linear maps that fix the boundary points and
shift the origin point by the shift.
When max_warp_frames == None:
1) Sample random warp displacements with magnitudes less than
(choose_range * max_ratio) to yield shift tensor with
shape (batch_size,).
2) Proceed through steps 3), 4).
Args:
batch_size: Batch size. Integer number.
choose_range: Range within which the warp reference points must lie.
Tensor of shape (batch_size,).
matrix_size: Dimension of vector space warp matrix is applied to. Integer
number.
max_warp_frames: Upper-bound on the warp distance. Integer or None.
dtype: Data type.
max_ratio: Maximum ratio between the shift distance and choose_range.
Float number.
Returns:
warp_matrix: An array of fixed size warp matrices with shape
(batch_size, matrix_size, matrix_size).
"""
p = self.params
# Non-empty random seed values are only used for testing
# seed_1 and seed_2 are set separately to avoid correlation of
# warp magnitude and origin position.
if p.random_seed:
seed_1 = p.random_seed - 1
seed_2 = 2 * p.random_seed + 1
else:
seed_1 = p.random_seed
seed_2 = p.random_seed
choose_range_dtype = tf.cast(choose_range, dtype=dtype)
length_upper_bound = tf.cast(max_ratio * choose_range_dtype, dtype=tf.int32)
# Set shift length.
if max_warp_frames and max_warp_frames > 0:
shift = tf.random.uniform((batch_size,),
minval=-1 * max_warp_frames,
maxval=max_warp_frames + 1,
dtype=tf.int32,
seed=seed_1)
else:
random_ratio = tf.random.uniform((batch_size,),
minval=-1.0,
maxval=1.0,
seed=seed_1,
dtype=dtype)
shift = tf.cast(random_ratio * tf.cast(length_upper_bound, dtype=dtype),
tf.int32)
# Make sure the sampled length was smaller than max_ratio * length_bound.
# Note that sampling in this way is biased.
# (Shorter sequence may over-masked.)
final_shift = tf.maximum(-length_upper_bound,
tf.minimum(shift, length_upper_bound))
# Choose origin anchor point.
mid_range = tf.cast(choose_range, dtype=tf.int32)
mid_range = tf.maximum(choose_range - 2, 0)
random_origin = tf.random.uniform((batch_size,), maxval=1.0, seed=seed_2)
origin_with_in_valid_range = random_origin * tf.cast(mid_range, dtype=dtype)
origin = tf.cast(origin_with_in_valid_range, tf.int32) + 1
# Set destination point of the origin anchor point under the warp map.
destination = origin + final_shift
# Cast origin and destination.
origin = tf.cast(origin, dtype=dtype)
destination = tf.cast(destination, dtype=dtype)
return self._ConstructWarpMatrix(
batch_size=batch_size,
matrix_size=matrix_size,
origin=origin,
destination=destination,
choose_range=choose_range_dtype,
dtype=dtype)
def _ConstructWarpMatrix(self, batch_size, matrix_size, origin, destination,
choose_range, dtype):
"""Returns warp matrices according to origin, destination and choose_range.
This function constructs a batch of warp matrices which maps the batch
of origin points to the batch of destination points with fixed boundary
coordinates at 0 and choose_range.
The warping function, defined by the origin anchor point `origin`,
the destination of the origin anchor point `destination` and the
length of the domain in the warping axis `choose_range` is a piecewise
linear map that fixes the points 0 and `choose_range` and maps
`origin` to `destination`.
For the warping matrix to be non-singular, destination must lie in the
range 1<= destination <= choose_range - 1, so a destination
out of this range is adjusted to be in this range before the warping
matrix is constructed.
The warping map can be explicitly written by first defining the slopes:
1) slope_0 = origin / destination.
2) slope_1 = (choose_range - origin) / (choose_range - destination).
3) slope_2 = 1.0.
Then the origin point orig_i of the mapped coordinate i is given by:
1) i < destination: orig_i = slope_0 * i.
2) destination <= i < choose_range:
orig_i = slope_1 * i - (slope_1 - slope_0) * destination.
3) i >= choose_range: orig_i = i.
Denoting n_i = ceil(orig_i), the warp matrix element warp[i][j] is given by:
1) j = n_i: 1 - n_i + orig_i.
2) j = n_i - 1: n_i - orig_i.
3) Otherwise: 0.
Applying the warp matrix to an array of pixels, i.e.,
warped_pixel[i] = sum_j warp[i][j] * pixel[j], one would get
warped_pixel[i] = (n_i-orig_i) pixel[n_i-1] + (1-n_i+orig_i) pixel[n_i].
Args:
batch_size: Batch size. Integer number.
matrix_size: Dimension of the vector space the warp matrix is applied to.
Integer number.
origin: Origin anchor point for warping. Tensor of shape (batch_size,) and
data type dtype.
destination: Destination of the origin anchor point upon warping. Tensor
of shape (batch_size,) and data type dtype.
choose_range: Range within which the warp reference points must lie.
Tensor of shape (batch_size,) data type dtype.
dtype: Data type of origin, destination, choose_range and the output warp
matrix.
Returns:
warp_matrix: An array of fixed size warp matrices with shape
(batch_size, matrix_size, matrix_size).
"""
p = self.params
# Entries of destination must be in the range
# 1 <= destination <= choose_range - 1
# for warp matrix to have non-singular values.
destination = tf.minimum(tf.maximum(destination, 1.0), choose_range - 1.0)
# Construct piece-wise linear function fixing boundary points
# specified by zero, choose_range and matrix size and maps
# the origin anchor point to the destination.
destination_bc = tf.broadcast_to(destination, (matrix_size, batch_size))
destination_bc = tf.transpose(destination_bc)
choose_range_bc = tf.broadcast_to(choose_range, (matrix_size, batch_size))
choose_range_bc = tf.transpose(choose_range_bc)
# Slopes of piece-wise linear function.
slope_0 = origin / destination
slope_1 = (choose_range - origin) / (choose_range - destination)
slope_2 = 1.0
# x is a batch of origin matrices.
# The origin matrix is the matrix such that
# origin[i][j] = Origin coordinate of coordinate i for the warp map.
# Denoting the destination of the origin anchor point in the
# warp map as "dest," the origin coordinate of point i is given by:
# 1) i < dest: slope_0 * i.
# 2) dest <= i < choose_range: slope_1 * i - (slope_1 - slope_0) * dest.
# 3) i >= choose_range: i.
x = tf.broadcast_to(
tf.cast(tf.range(matrix_size), dtype=dtype), (batch_size, matrix_size))
x = (
tf.einsum('b,bi->bi', slope_0, x) + tf.einsum(
'b,bi->bi', slope_1 - slope_0, tf.nn.relu(x - destination_bc)) +
tf.einsum('b,bi->bi', slope_2 - slope_1,
tf.nn.relu(x - choose_range_bc)))
x = tf.broadcast_to(x, (matrix_size, batch_size, matrix_size))
x = tf.transpose(x, perm=[1, 2, 0])
# y is a batch of coordinate matrices.
# A coordinate matrix is a matrix such that
# coordinate[i][j] = j.
y = tf.broadcast_to(
tf.cast(tf.range(matrix_size), dtype=dtype),
(batch_size, matrix_size, matrix_size))
# Warp matrix is obtained by applying hat function element-wise to (x-y).
# Denoting the origin point of i under the warp map as orig_i,
# and n_i = ceil(orig_i), the warp matrix element warp[i][j] is given by:
# 1) j = n_i: 1 - n_i + orig_i.
# 2) j = n_i - 1: n_i - orig_i.
# 3) Otherwise: 0.
# Applying the warp matrix to pixels, i.e.,
# warped_pixel[i] = sum_j warp[i][j] * original_pixel[j], one would get
# warped_pixel[i] = (n_i - orig_i) * original_pixel[n_i-1]
# + (1 - n_i + orig_i) * original_pixel[n_i].
warp_matrix = x - y
warp_matrix = _hat(warp_matrix)
if p.fprop_dtype is not None and p.fprop_dtype != dtype:
warp_matrix = tf.cast(warp_matrix, p.fprop_dtype)
return warp_matrix
def _FrequencyMask(self,
inputs,
dtype=tf.float32,
domain_id_index=0):
"""Applies frequency masking with given degree to inputs.
Args:
inputs: Batch of input features of shape (batch_size, time_length,
num_freq, channels).
dtype: Data type.
domain_id_index: domain id index.
Returns:
Inputs with random frequency masking applied.
"""
p = self.params
# Mask parameters.
freq_mask_max_bins = p.freq_mask_max_bins[domain_id_index]
multiplicity = p.freq_mask_count[domain_id_index]
# If masking length or count is zero, do nothing.
if freq_mask_max_bins == 0 or multiplicity == 0:
return inputs
# Arguments to pass to mask generator.
batch_size, _, num_freq, _ = py_utils.GetShape(inputs)
choose_range = tf.cast(
tf.broadcast_to(num_freq, (batch_size,)), dtype=tf.int32)
# Create masks in frequency direction and apply.
block_arrays = self._GetMask(
tf.shape(inputs)[0],
choose_range=choose_range,
mask_size=num_freq,
max_length=freq_mask_max_bins,
masks_per_frame=0.0,
multiplicity=multiplicity,
dtype=dtype,
max_ratio=1.0)
outputs = tf.einsum('bxyc,by->bxyc', inputs, block_arrays)
return outputs
def _TimeMask(self,
inputs,
seq_lengths,
noisify=False,
gaussian_noise=False,
dtype=tf.float32,
domain_id_index=0):
"""Applies time masking with given degree to inputs.
Args:
inputs: Batch of input features of shape (batch_size, time_length,
num_freq, channels).
seq_lengths: The actual sequence lengths which mask been sampled of shape
(batch_size,).
noisify: Whether to noisify the masked out regions.
gaussian_noise: Whether to use gaussian noise when noisifying.
dtype: Data type.
domain_id_index: domain id index.
Returns:
Inputs with random time masking applied.
"""
p = self.params
# Get time masking parameters.
time_mask_max_frames = p.time_mask_max_frames[domain_id_index]
time_masks_per_frame = p.time_masks_per_frame[domain_id_index]
use_dynamic_time_mask_max_frames = \
p.use_dynamic_time_mask_max_frames[domain_id_index]
multiplicity = p.time_mask_count[domain_id_index]
max_ratio = p.time_mask_max_ratio[domain_id_index]
# If maximum mask length is zero, do nothing.
if ((time_mask_max_frames == 0 and not use_dynamic_time_mask_max_frames) or
max_ratio <= 0.0):
return inputs
if multiplicity == 0:
return inputs
seq_lengths = tf.cast(seq_lengths, tf.int32)
batch_size, time_length, _, _ = py_utils.GetShape(inputs)
# When using dynamic time mask size, discard upper-bound on
# maximum allowed frames for time mask.
if use_dynamic_time_mask_max_frames:
time_mask_max_frames = None
# Create masks in time direction and apply.
block_arrays = self._GetMask(
batch_size,
choose_range=seq_lengths,
mask_size=time_length,
max_length=time_mask_max_frames,
masks_per_frame=time_masks_per_frame,
multiplicity=multiplicity,
dtype=dtype,
max_ratio=max_ratio)
outputs = tf.einsum(
'bxyc,bx->bxyc', inputs, block_arrays, name='einsum_formasking')
if noisify:
# Sample noise with standard deviation with factor * 0.1 + 0.0001
# TODO(ngyuzh): Make sure this won't affect EOS.
if gaussian_noise:
stddev = 1.0
else:
factor = tf.random_uniform((),
minval=1.0,
maxval=2.0,
dtype=dtype,
seed=p.random_seed)
stddev = factor * 0.1 + 0.0001
noise = tf.random.normal(
[tf.shape(inputs)[0],
tf.shape(inputs)[1],
tf.shape(inputs)[2]],
stddev=stddev,
seed=p.random_seed)
if p.fprop_dtype is not None and p.fprop_dtype != p.dtype:
noise = tf.cast(noise, p.fprop_dtype)
outputs_mask = tf.einsum(
'bxy,bx->bxy',
noise,
1.0 - block_arrays,
name='einsum_fornoisymasking')
outputs = outputs + tf.expand_dims(outputs_mask, -1)
return outputs
def _TimeWarp(self, inputs, seq_lengths, dtype=tf.float32, domain_id_index=0):
"""Applies time warping with given degree to inputs.
Args:
inputs: Batch of input features of shape (batch_size, time_length,
num_freq, channels).
seq_lengths: The actual sequence lengths which mask been sampled of shape
(batch_size,).
dtype: Data type.
domain_id_index: Domain ID index.
Returns:
Inputs with random time warping applied.
"""
p = self.params
batch_size, time_length, _, _ = py_utils.GetShape(inputs)
# Get parameters for warping.
time_warp_max_frames = p.time_warp_max_frames[domain_id_index]
max_ratio = p.time_warp_max_ratio[domain_id_index]
time_warp_bound = p.time_warp_bound[domain_id_index]
assert time_warp_bound in ('static', 'dynamic')
# If maximum warp length is zero, do nothing.
if ((time_warp_max_frames == 0 and time_warp_bound == 'static') or
max_ratio <= 0.0):
return inputs
seq_lengths = tf.cast(seq_lengths, tf.int32)
# Discard upper-bound on time-warp frames when
# dynamic time warping is used.
if time_warp_bound == 'dynamic':
time_warp_max_frames = None
# Create warping matrix in time direction and apply
warp_matrix = self._GetWarpMatrix(
batch_size,
choose_range=seq_lengths,
matrix_size=time_length,
max_warp_frames=time_warp_max_frames,
dtype=dtype,
max_ratio=max_ratio)
outputs = tf.einsum(
'bxyc,bzx->bzyc', inputs, warp_matrix, name='einsum_forwarping')
return outputs
def UnstackFeatures(self, src_inputs, src_paddings):
"""Unstacks src_input and src_paddings based off stack height."""
sh = self.params.stack_height
bs, old_series_length, _, channels = py_utils.GetShape(src_inputs)
unstacked_series_length = old_series_length * sh
src_inputs = tf.reshape(src_inputs,
[bs, unstacked_series_length, -1, channels])
content = 1 - src_paddings
lengths = tf.cast(sh * tf.reduce_sum(content, axis=1), tf.int32)
mask = tf.sequence_mask(lengths, maxlen=unstacked_series_length)
src_paddings = 1 - tf.cast(mask, tf.int32)
return src_inputs, src_paddings
def _AugmentationNetwork(self,
series_length,
inputs,
paddings,
domain_id_index=0):
"""Returns augmented features.
Args:
series_length: Total length of time series.
inputs: Batch of input features of shape (batch_size, time_length,
num_freq, channels).
paddings: Batch of padding vectors of shape (batch_size, time_length).
domain_id_index: domain id index.
Returns:
Batch of output features of shape (batch_size, time_length, num_freq,
channels) obtained by applying random augmentations to inputs.
"""
p = self.params
dtype = p.dtype
# Unstack the features.
if p.unstack:
inputs, paddings = self.UnstackFeatures(inputs, paddings)
lengths = tf.reduce_sum(1 - paddings, 1)
inputs = self._TimeWarp(
inputs, lengths, dtype=dtype, domain_id_index=domain_id_index)
inputs = self._TimeMask(
inputs,
lengths,
noisify=p.use_noise,
gaussian_noise=p.gaussian_noise,
dtype=dtype,
domain_id_index=domain_id_index)
inputs = self._FrequencyMask(
inputs, dtype=dtype, domain_id_index=domain_id_index)
# Restack the features after applying specaugment.
if p.unstack:
inputs = tf.reshape(
inputs, [tf.shape(inputs)[0], series_length, -1,
tf.shape(inputs)[3]])
return inputs
def FProp(self, theta, inputs, paddings, domain_ids=None):
"""Applies data augmentation by randomly mask spectrum in inputs.
Args:
theta: A NestedMap object containing weights' values of this layer and its
children layers.
inputs: A tensor of shape [batch, time, freq, num_channels].
paddings: A 0/1 tensor of shape [batch, time].
domain_ids: input domain_ids of shape [batch, time].
Returns:
A pair of 2 tensors:
- augmented_inputs: A tensor of shape [batch, time, freq, num_channels].
- paddings: A 0/1 tensor of shape [batch, time].
"""
p = self.params
batch_size, series_length, _, _ = py_utils.GetShape(inputs)
if len(p.domain_ids) > 1:
augmented_inputs = tf.zeros_like(inputs)
original_inputs = inputs
for i, domain_id in enumerate(p.domain_ids):
augmented_domain = self._AugmentationNetwork(
series_length, inputs, paddings, domain_id_index=i)
target_domain = tf.cast(
tf.expand_dims(tf.tile([domain_id], [batch_size]), -1),
dtype=p.dtype)
# [batch, time].
domain_mask = tf.cast(
tf.equal(domain_ids, target_domain), dtype=p.dtype)
augmented_domain = tf.einsum(
'bxyc,bx->bxyc',
augmented_domain,
domain_mask,
name='einsum_domainmasking')
original_inputs = tf.einsum(
'bxyc,bx->bxyc',
original_inputs,
1.0 - domain_mask,
name='einsum_domainmasking2')
augmented_inputs = augmented_domain + augmented_inputs
augmented_inputs = original_inputs + augmented_inputs
else:
augmented_inputs = self._AugmentationNetwork(
series_length, inputs, paddings, domain_id_index=0)
return augmented_inputs, paddings
| 39.702128
| 80
| 0.647508
|
2933ad96285f7e2883b20ba959b39745931d273c
| 1,474
|
py
|
Python
|
bot.py
|
sanjay-271022/droplink-bot
|
2fcc42bdcbe178839c03471105a997aa56895a09
|
[
"MIT"
] | null | null | null |
bot.py
|
sanjay-271022/droplink-bot
|
2fcc42bdcbe178839c03471105a997aa56895a09
|
[
"MIT"
] | null | null | null |
bot.py
|
sanjay-271022/droplink-bot
|
2fcc42bdcbe178839c03471105a997aa56895a09
|
[
"MIT"
] | null | null | null |
from os import environ
import aiohttp
from pyrogram import Client, filters
API_ID = environ.get('API_ID')
API_HASH = environ.get('API_HASH')
BOT_TOKEN = environ.get('BOT_TOKEN')
API_KEY = environ.get('API_KEY', 'e3eddb3e7c5513eee187120fce788ddc4a1a643b')
bot = Client('droplink bot',
api_id=API_ID,
api_hash=API_HASH,
bot_token=BOT_TOKEN,
workers=50,
sleep_threshold=10)
@bot.on_message(filters.command('start') & filters.private)
async def start(bot, message):
await message.reply(
f"**Hi {message.chat.first_name}!**\n\n"
"I'm a specialised bot for shortening Droplink.co links which can help you earn money by just sharing links. I am made by @tele_united_bots.")
@bot.on_message(filters.regex(r'https?://[^\s]+') & filters.private)
async def link_handler(bot, message):
link = message.matches[0].group(0)
try:
short_link = await get_shortlink(link)
await message.reply(f'Here is your [`{short_link}`]({short_link})', quote=True)
except Exception as e:
await message.reply(f'Error: {e}', quote=True)
async def get_shortlink(link):
url = 'https://droplink.co/api'
params = {'api': API_KEY, 'url': link}
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params, raise_for_status=True) as response:
data = await response.json()
return data["shortenedUrl"]
bot.run()
| 32.043478
| 150
| 0.666893
|
322cecc89245dc68f69986caf088035a3b9638d1
| 2,670
|
py
|
Python
|
aleph/logic/resolver.py
|
aaronarnold2/aleph
|
1728f4db8863554d5b0722546838970e53ec72bd
|
[
"MIT"
] | null | null | null |
aleph/logic/resolver.py
|
aaronarnold2/aleph
|
1728f4db8863554d5b0722546838970e53ec72bd
|
[
"MIT"
] | null | null | null |
aleph/logic/resolver.py
|
aaronarnold2/aleph
|
1728f4db8863554d5b0722546838970e53ec72bd
|
[
"MIT"
] | null | null | null |
# Bulk object resolver.
# The purpose of this module is to quickly load objects of different
# types from the backend. It's typically used by the API serialiser
# to ensure that nested objects are loaded only once.
#
import logging
from normality import stringify
from collections import defaultdict
from aleph.core import cache
from aleph.model import (
Role,
Collection,
Alert,
Entity,
EntitySet,
EntitySetItem,
Export,
)
from aleph.logic.roles import get_role
from aleph.logic.alerts import get_alert
from aleph.logic.entitysets import get_entityset, get_entitysetitem
from aleph.logic.export import get_export
from aleph.index.collections import get_collection
from aleph.index.entities import entities_by_ids
log = logging.getLogger(__name__)
LOADERS = {
Role: get_role,
Collection: get_collection,
Alert: get_alert,
EntitySet: get_entityset,
EntitySetItem: get_entitysetitem,
Export: get_export,
}
def _instrument_stub(stub):
if not hasattr(stub, "_rx_queue"):
stub._rx_queue = set()
if not hasattr(stub, "_rx_cache"):
stub._rx_cache = {}
def queue(stub, clazz, key, schema=None):
"""Notify the resolver associated with `stub` that the given object
needs to be retrieved. Multiple calls with the same object signature
will be merged."""
_instrument_stub(stub)
key = stringify(key)
if key is None:
return
stub._rx_queue.add((clazz, key, schema))
def resolve(stub):
_instrument_stub(stub)
cache_keys = {}
schemata = {}
for clazz, key, schema in stub._rx_queue:
if (clazz, key) in stub._rx_cache:
continue
cid = cache.object_key(clazz, key)
cache_keys[cid] = (clazz, key)
schemata[cid] = schema
keys = list(cache_keys.keys())
queries = defaultdict(list)
for cid, value in cache.get_many_complex(keys):
clazz, key = cache_keys.get(cid)
if value is None:
# log.info("MISS [%s]: %s", clazz.__name__, key)
if clazz == Entity:
queries[schemata.get(cid)].append(key)
loader = LOADERS.get(clazz)
if loader is not None:
value = loader(key)
stub._rx_cache[(clazz, key)] = value
for schema, ids in queries.items():
for entity in entities_by_ids(ids, schemata=schema, cached=True):
stub._rx_cache[(Entity, entity.get("id"))] = entity
def get(stub, clazz, key):
"""Retrieve an object that has been loaded (or None)."""
_instrument_stub(stub)
key = stringify(key)
if key is None:
return
return stub._rx_cache.get((clazz, key))
| 28.709677
| 73
| 0.667416
|
26a467fd481a18961360769b005f30c2c9482ebc
| 753
|
py
|
Python
|
ase_project/urls.py
|
akshayk17/ase_project
|
2838ddd15e755d749653009484848421f5644615
|
[
"MIT"
] | null | null | null |
ase_project/urls.py
|
akshayk17/ase_project
|
2838ddd15e755d749653009484848421f5644615
|
[
"MIT"
] | null | null | null |
ase_project/urls.py
|
akshayk17/ase_project
|
2838ddd15e755d749653009484848421f5644615
|
[
"MIT"
] | null | null | null |
"""ase_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.227273
| 77
| 0.710491
|
3bb00845d448d306ce2d2763704049f54428545a
| 5,697
|
py
|
Python
|
sdk/python/pulumi_azure_native/netapp/get_account.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/netapp/get_account.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/netapp/get_account.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetAccountResult',
'AwaitableGetAccountResult',
'get_account',
]
@pulumi.output_type
class GetAccountResult:
"""
NetApp account resource
"""
def __init__(__self__, active_directories=None, encryption=None, id=None, location=None, name=None, provisioning_state=None, system_data=None, tags=None, type=None):
if active_directories and not isinstance(active_directories, list):
raise TypeError("Expected argument 'active_directories' to be a list")
pulumi.set(__self__, "active_directories", active_directories)
if encryption and not isinstance(encryption, dict):
raise TypeError("Expected argument 'encryption' to be a dict")
pulumi.set(__self__, "encryption", encryption)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="activeDirectories")
def active_directories(self) -> Optional[Sequence['outputs.ActiveDirectoryResponse']]:
"""
Active Directories
"""
return pulumi.get(self, "active_directories")
@property
@pulumi.getter
def encryption(self) -> Optional['outputs.AccountEncryptionResponse']:
"""
Encryption settings
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system meta data relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetAccountResult(GetAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAccountResult(
active_directories=self.active_directories,
encryption=self.encryption,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_account(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccountResult:
"""
NetApp account resource
API Version: 2020-12-01.
:param str account_name: The name of the NetApp account
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:netapp:getAccount', __args__, opts=opts, typ=GetAccountResult).value
return AwaitableGetAccountResult(
active_directories=__ret__.active_directories,
encryption=__ret__.encryption,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
system_data=__ret__.system_data,
tags=__ret__.tags,
type=__ret__.type)
| 33.122093
| 169
| 0.638757
|
86de0cb454bb1f7277211f99f55be3929a35beba
| 8,091
|
py
|
Python
|
statsmodels/robust/scale.py
|
haribharadwaj/statsmodels
|
8675b890607fe6f116b1186dcba4c387c5e3778a
|
[
"BSD-3-Clause"
] | 34
|
2018-07-13T11:30:46.000Z
|
2022-01-05T13:48:10.000Z
|
venv/lib/python3.6/site-packages/statsmodels/robust/scale.py
|
HeyWeiPan/vnpy_crypto
|
844381797a475a01c05a4e162592a5a6e3a48032
|
[
"MIT"
] | 6
|
2015-08-28T16:59:03.000Z
|
2019-04-12T22:29:01.000Z
|
venv/lib/python3.6/site-packages/statsmodels/robust/scale.py
|
HeyWeiPan/vnpy_crypto
|
844381797a475a01c05a4e162592a5a6e3a48032
|
[
"MIT"
] | 28
|
2015-04-01T20:02:25.000Z
|
2021-07-03T00:09:28.000Z
|
"""
Support and standalone functions for Robust Linear Models
References
----------
PJ Huber. 'Robust Statistics' John Wiley and Sons, Inc., New York, 1981.
R Venables, B Ripley. 'Modern Applied Statistics in S'
Springer, New York, 2002.
"""
from statsmodels.compat.python import callable, range
import numpy as np
from scipy.stats import norm as Gaussian
from . import norms
from statsmodels.tools import tools
def mad(a, c=Gaussian.ppf(3/4.), axis=0, center=np.median):
# c \approx .6745
"""
The Median Absolute Deviation along given axis of an array
Parameters
----------
a : array-like
Input array.
c : float, optional
The normalization constant. Defined as scipy.stats.norm.ppf(3/4.),
which is approximately .6745.
axis : int, optional
The defaul is 0. Can also be None.
center : callable or float
If a callable is provided, such as the default `np.median` then it
is expected to be called center(a). The axis argument will be applied
via np.apply_over_axes. Otherwise, provide a float.
Returns
-------
mad : float
`mad` = median(abs(`a` - center))/`c`
"""
a = np.asarray(a)
if callable(center):
center = np.apply_over_axes(center, a, axis)
return np.median((np.fabs(a-center))/c, axis=axis)
class Huber(object):
"""
Huber's proposal 2 for estimating location and scale jointly.
Parameters
----------
c : float, optional
Threshold used in threshold for chi=psi**2. Default value is 1.5.
tol : float, optional
Tolerance for convergence. Default value is 1e-08.
maxiter : int, optional0
Maximum number of iterations. Default value is 30.
norm : statsmodels.robust.norms.RobustNorm, optional
A robust norm used in M estimator of location. If None,
the location estimator defaults to a one-step
fixed point version of the M-estimator using Huber's T.
call
Return joint estimates of Huber's scale and location.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> chem_data = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03,
... 3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, 3.7, 3.7, 3.7,
... 3.77, 5.28, 28.95])
>>> sm.robust.scale.huber(chem_data)
(array(3.2054980819923693), array(0.67365260010478967))
"""
def __init__(self, c=1.5, tol=1.0e-08, maxiter=30, norm=None):
self.c = c
self.maxiter = maxiter
self.tol = tol
self.norm = norm
tmp = 2 * Gaussian.cdf(c) - 1
self.gamma = tmp + c**2 * (1 - tmp) - 2 * c * Gaussian.pdf(c)
def __call__(self, a, mu=None, initscale=None, axis=0):
"""
Compute Huber's proposal 2 estimate of scale, using an optional
initial value of scale and an optional estimate of mu. If mu
is supplied, it is not reestimated.
Parameters
----------
a : array
1d array
mu : float or None, optional
If the location mu is supplied then it is not reestimated.
Default is None, which means that it is estimated.
initscale : float or None, optional
A first guess on scale. If initscale is None then the standardized
median absolute deviation of a is used.
Notes
-----
`Huber` minimizes the function
sum(psi((a[i]-mu)/scale)**2)
as a function of (mu, scale), where
psi(x) = np.clip(x, -self.c, self.c)
"""
a = np.asarray(a)
if mu is None:
n = a.shape[0] - 1
mu = np.median(a, axis=axis)
est_mu = True
else:
n = a.shape[0]
mu = mu
est_mu = False
if initscale is None:
scale = mad(a, axis=axis)
else:
scale = initscale
scale = tools.unsqueeze(scale, axis, a.shape)
mu = tools.unsqueeze(mu, axis, a.shape)
return self._estimate_both(a, scale, mu, axis, est_mu, n)
def _estimate_both(self, a, scale, mu, axis, est_mu, n):
"""
Estimate scale and location simultaneously with the following
pseudo_loop:
while not_converged:
mu, scale = estimate_location(a, scale, mu), estimate_scale(a, scale, mu)
where estimate_location is an M-estimator and estimate_scale implements
the check used in Section 5.5 of Venables & Ripley
"""
for _ in range(self.maxiter):
# Estimate the mean along a given axis
if est_mu:
if self.norm is None:
# This is a one-step fixed-point estimator
# if self.norm == norms.HuberT
# It should be faster than using norms.HuberT
nmu = np.clip(a, mu-self.c*scale,
mu+self.c*scale).sum(axis) / a.shape[axis]
else:
nmu = norms.estimate_location(a, scale, self.norm, axis, mu,
self.maxiter, self.tol)
else:
# Effectively, do nothing
nmu = mu.squeeze()
nmu = tools.unsqueeze(nmu, axis, a.shape)
subset = np.less_equal(np.fabs((a - mu)/scale), self.c)
card = subset.sum(axis)
nscale = np.sqrt(np.sum(subset * (a - nmu)**2, axis) \
/ (n * self.gamma - (a.shape[axis] - card) * self.c**2))
nscale = tools.unsqueeze(nscale, axis, a.shape)
test1 = np.alltrue(np.less_equal(np.fabs(scale - nscale),
nscale * self.tol))
test2 = np.alltrue(np.less_equal(np.fabs(mu - nmu), nscale*self.tol))
if not (test1 and test2):
mu = nmu; scale = nscale
else:
return nmu.squeeze(), nscale.squeeze()
raise ValueError('joint estimation of location and scale failed to converge in %d iterations' % self.maxiter)
huber = Huber()
class HuberScale(object):
"""
Huber's scaling for fitting robust linear models.
Huber's scale is intended to be used as the scale estimate in the
IRLS algorithm and is slightly different than the `Huber` class.
Parameters
----------
d : float, optional
d is the tuning constant for Huber's scale. Default is 2.5
tol : float, optional
The convergence tolerance
maxiter : int, optiona
The maximum number of iterations. The default is 30.
Methods
-------
call
Return's Huber's scale computed as below
Notes
--------
Huber's scale is the iterative solution to
scale_(i+1)**2 = 1/(n*h)*sum(chi(r/sigma_i)*sigma_i**2
where the Huber function is
chi(x) = (x**2)/2 for \|x\| < d
chi(x) = (d**2)/2 for \|x\| >= d
and the Huber constant h = (n-p)/n*(d**2 + (1-d**2)*\
scipy.stats.norm.cdf(d) - .5 - d*sqrt(2*pi)*exp(-0.5*d**2)
"""
def __init__(self, d=2.5, tol=1e-08, maxiter=30):
self.d = d
self.tol = tol
self.maxiter = maxiter
def __call__(self, df_resid, nobs, resid):
h = (df_resid)/nobs*(self.d**2 + (1-self.d**2)*\
Gaussian.cdf(self.d)-.5 - self.d/(np.sqrt(2*np.pi))*\
np.exp(-.5*self.d**2))
s = mad(resid)
subset = lambda x: np.less(np.fabs(resid/x),self.d)
chi = lambda s: subset(s)*(resid/s)**2/2+(1-subset(s))*(self.d**2/2)
scalehist = [np.inf,s]
niter = 1
while (np.abs(scalehist[niter-1] - scalehist[niter])>self.tol \
and niter < self.maxiter):
nscale = np.sqrt(1/(nobs*h)*np.sum(chi(scalehist[-1]))*\
scalehist[-1]**2)
scalehist.append(nscale)
niter += 1
#if niter == self.maxiter:
# raise ValueError("Huber's scale failed to converge")
return scalehist[-1]
hubers_scale = HuberScale()
| 34.139241
| 117
| 0.563095
|
5e45fa0e3463adf0460ab3ead7bb0ce0fbca5ce3
| 253
|
py
|
Python
|
vbb_backend/program/tests/test_program.py
|
patrickb42/backend-vbb-portal
|
88362bc5b4d5cab95aa67e12694f98371604b65a
|
[
"MIT"
] | 3
|
2021-04-14T02:59:09.000Z
|
2021-06-08T00:17:27.000Z
|
vbb_backend/program/tests/test_program.py
|
patrickb42/backend-vbb-portal
|
88362bc5b4d5cab95aa67e12694f98371604b65a
|
[
"MIT"
] | 81
|
2020-12-08T00:11:52.000Z
|
2021-08-09T18:13:32.000Z
|
vbb_backend/program/tests/test_program.py
|
patrickb42/backend-vbb-portal
|
88362bc5b4d5cab95aa67e12694f98371604b65a
|
[
"MIT"
] | 5
|
2021-01-12T04:50:26.000Z
|
2021-06-04T02:00:03.000Z
|
import pytest
from vbb_backend.program.models import Program
@pytest.mark.django_db
def test_program_create(program_factory):
newProgram1 = program_factory.create()
newProgram2 = program_factory.create()
assert Program.objects.count() == 2
| 28.111111
| 46
| 0.786561
|
b49a2c9711a21f0f0dc6fc6a82bfb18b12b23b30
| 2,162
|
py
|
Python
|
tests/decorator/test_methods.py
|
rtshilston/flask-cors
|
9fae3e5cf69c634f6144556fb848b7af05cd7bfe
|
[
"MIT"
] | 749
|
2015-02-13T09:30:52.000Z
|
2022-03-30T22:17:14.000Z
|
tests/decorator/test_methods.py
|
rtshilston/flask-cors
|
9fae3e5cf69c634f6144556fb848b7af05cd7bfe
|
[
"MIT"
] | 161
|
2015-02-26T05:59:28.000Z
|
2022-03-31T10:40:13.000Z
|
tests/decorator/test_methods.py
|
rtshilston/flask-cors
|
9fae3e5cf69c634f6144556fb848b7af05cd7bfe
|
[
"MIT"
] | 126
|
2015-02-24T23:26:38.000Z
|
2022-03-21T04:58:49.000Z
|
# -*- coding: utf-8 -*-
"""
test
~~~~
Flask-CORS is a simple extension to Flask allowing you to support cross
origin resource sharing (CORS) using a simple decorator.
:copyright: (c) 2016 by Cory Dolphin.
:license: MIT, see LICENSE for more details.
"""
from ..base_test import FlaskCorsTestCase
from flask import Flask
from flask_cors import *
from flask_cors.core import *
class MethodsCase(FlaskCorsTestCase):
def setUp(self):
self.app = Flask(__name__)
@self.app.route('/defaults')
@cross_origin()
def defaults():
return 'Should only return headers on pre-flight OPTIONS request'
@self.app.route('/test_methods_defined')
@cross_origin(methods=['POST'])
def test_get():
return 'Only allow POST'
def test_defaults(self):
''' Access-Control-Allow-Methods headers should only be returned
if the client makes an OPTIONS request.
'''
self.assertFalse(ACL_METHODS in self.get('/defaults', origin='www.example.com').headers)
self.assertFalse(ACL_METHODS in self.head('/defaults', origin='www.example.com').headers)
res = self.preflight('/defaults', 'POST', origin='www.example.com')
for method in ALL_METHODS:
self.assertTrue(method in res.headers.get(ACL_METHODS))
def test_methods_defined(self):
''' If the methods parameter is defined, it should override the default
methods defined by the user.
'''
self.assertFalse(ACL_METHODS in self.get('/test_methods_defined').headers)
self.assertFalse(ACL_METHODS in self.head('/test_methods_defined').headers)
res = self.preflight('/test_methods_defined', 'POST', origin='www.example.com')
self.assertTrue('POST' in res.headers.get(ACL_METHODS))
res = self.preflight('/test_methods_defined', 'PUT', origin='www.example.com')
self.assertFalse(ACL_METHODS in res.headers)
res = self.get('/test_methods_defined', origin='www.example.com')
self.assertFalse(ACL_METHODS in res.headers)
if __name__ == "__main__":
unittest.main()
| 34.870968
| 97
| 0.66235
|
81a516f0d78d15d0084b0f0d0a4cf6969b58d153
| 3,438
|
py
|
Python
|
test/test_conversion.py
|
kallyas/PythonAlgorithms
|
e9b4c8dddad101ef0ff4bd4786d506f34f6f4d80
|
[
"MIT"
] | 1
|
2022-02-23T19:22:44.000Z
|
2022-02-23T19:22:44.000Z
|
test/test_conversion.py
|
kallyas/PythonAlgorithms
|
e9b4c8dddad101ef0ff4bd4786d506f34f6f4d80
|
[
"MIT"
] | null | null | null |
test/test_conversion.py
|
kallyas/PythonAlgorithms
|
e9b4c8dddad101ef0ff4bd4786d506f34f6f4d80
|
[
"MIT"
] | null | null | null |
import unittest
from conversion import *
class TestConversions(unittest.TestCase):
def test_decimal_to_binary(self):
self.assertEqual(decimal_to_binary('8'), '1000')
self.assertEqual(decimal_to_binary('10'), '1010')
self.assertEqual(decimal_to_binary('15'), '1111')
def test_decimal_to_binary_invalid(self):
self.assertRaises(ValueError, decimal_to_binary, 'abc')
self.assertRaises(ValueError, decimal_to_binary, '1.5')
self.assertRaises(ValueError, decimal_to_binary, '-1')
def test_decimal_to_binary_empty(self):
self.assertRaises(ValueError, decimal_to_binary, '')
def test_binary_to_decimal(self):
self.assertEqual(binary_to_decimal('1000'), 8)
self.assertEqual(binary_to_decimal('1010'), 10)
self.assertEqual(binary_to_decimal('1111'), 15)
def test_binary_to_decimal_invalid(self):
self.assertRaises(ValueError, binary_to_decimal, 'abc')
self.assertRaises(ValueError, binary_to_decimal, '1.5')
self.assertRaises(ValueError, binary_to_decimal, '-1')
def test_binary_to_decimal_empty(self):
self.assertRaises(ValueError, binary_to_decimal, '')
def test_hexa_decimal_to_decimal(self):
self.assertEqual(hexa_decimal_to_decimal('A'), 10)
self.assertEqual(hexa_decimal_to_decimal('F'), 15)
self.assertEqual(hexa_decimal_to_decimal('1'), 1)
def test_hexa_decimal_to_decimal_invalid(self):
self.assertRaises(ValueError, hexa_decimal_to_decimal, 'abc')
self.assertRaises(ValueError, hexa_decimal_to_decimal, '1.5')
self.assertRaises(ValueError, hexa_decimal_to_decimal, '-1')
def test_hexa_decimal_to_decimal_empty(self):
self.assertRaises(ValueError, hexa_decimal_to_decimal, '')
def test_decimal_to_hexa(self):
self.assertEqual(decimal_to_hexa(10), 'A')
self.assertEqual(decimal_to_hexa(15), 'F')
self.assertEqual(decimal_to_hexa(1), '1')
def test_decimal_to_hexa_invalid(self):
self.assertRaises(ValueError, decimal_to_hexa, 'abc')
self.assertRaises(ValueError, decimal_to_hexa, '1.5')
self.assertRaises(ValueError, decimal_to_hexa, '-1')
def test_decimal_to_hexa_empty(self):
self.assertRaises(ValueError, decimal_to_hexa, '')
def test_octal_to_decimal(self):
self.assertEqual(octal_to_decimal('43'), 35)
self.assertEqual(octal_to_decimal('10'), 8)
self.assertEqual(octal_to_decimal('15'), 13)
def test_octal_to_decimal_invalid(self):
self.assertRaises(ValueError, octal_to_decimal, 'abc')
self.assertRaises(ValueError, octal_to_decimal, '1.5')
self.assertRaises(ValueError, octal_to_decimal, '-1')
def test_octal_to_decimal_empty(self):
self.assertRaises(ValueError, octal_to_decimal, '')
def test_decimal_to_octal(self):
self.assertEqual(decimal_to_octal('8'), '10')
self.assertEqual(decimal_to_octal('10'), '12')
self.assertEqual(decimal_to_octal('175'), '257')
def test_decimal_to_octal_invalid(self):
self.assertRaises(ValueError, decimal_to_octal, 'abc')
self.assertRaises(ValueError, decimal_to_octal, '1.5')
self.assertRaises(ValueError, decimal_to_octal, '-1')
def test_decimal_to_octal_empty(self):
self.assertRaises(ValueError, decimal_to_octal, '')
if __name__ == '__main__':
unittest.main()
| 39.068182
| 69
| 0.710878
|
3f76218043283966396ce8e2b27f8d98de3ee5e0
| 36,227
|
py
|
Python
|
template/provider.py
|
lmr/Template-Toolkit-Python
|
3b1affc79c2f650b057956b0dbf6e0cb51515999
|
[
"Artistic-2.0"
] | 2
|
2021-05-07T08:50:06.000Z
|
2021-06-12T15:48:23.000Z
|
template/provider.py
|
lmr/Template-Toolkit-Python
|
3b1affc79c2f650b057956b0dbf6e0cb51515999
|
[
"Artistic-2.0"
] | 1
|
2018-11-29T08:49:42.000Z
|
2018-11-29T08:49:42.000Z
|
template/provider.py
|
lmr/Template-Toolkit-Python
|
3b1affc79c2f650b057956b0dbf6e0cb51515999
|
[
"Artistic-2.0"
] | null | null | null |
#
# The Template-Python distribution is Copyright (C) Sean McAfee 2007-2008,
# derived from the Perl Template Toolkit Copyright (C) 1996-2007 Andy
# Wardley. All Rights Reserved.
#
# The file "LICENSE" at the top level of this source distribution describes
# the terms under which this file may be distributed.
#
import collections
import os
import re
import time
from template.config import Config
from template.constants import *
from template.document import Document
from template.util import Literal, Struct, TemplateException, modtime
"""
template.provider - Provider module for loading/compiling templates
SYNOPSIS
provider = template.provider.Provider(options)
tmpl = provider.fetch(name)
DESCRIPTION
The template.provider module is used to load, parse, compile and cache
template documents. This object may be sub-classed to provide more
specific facilities for loading, or otherwise providing access to
templates.
The template.context.Context objects maintain a list of Provider
objects which are polled in turn (via fetch()) to return a requested
template. Each may return a compiled template, raise an error, or
decline to serve the reqest, giving subsequent providers a chance to
do so.
This is the "Chain of Responsiblity" pattern. See 'Design Patterns' for
further information.
The Provider class can also be subclassed to provide templates from a
different source; for example, a database. See SUBCLASSIC, below.
This documentation needs work.
PUBLIC METHODS
__init__(options)
Constructor method which initializes a new Provider object. The
optional parameter may be a dictionary containing any of the following
items:
* INCLUDE_PATH
The INCLUDE_PATH is used to specify one or more directories in which
template files are located. When a template is requested that isn't
defined locally as a BLOCK, each of the INCLUDE_PATH directories is
searched in turn to locate the template file. Multiple directories
can be specified as a list or as a single string where each directory
is delimited by ':'.
provider = template.provider.Provider({
'INCLUDE_PATH': '/usr/local/templates',
})
provider = template.provider.Provider({
'INCLUDE_PATH': '/usr/local/templates:/tmp/my/templates',
})
provider = template.provider.Provider({
'INCLUDE_PATH': [ '/usr/local/templates',
'/tmp/my/templates' ],
})
On Win32 systems, a little extra magic is invoked, ignoring delimiters
that have ':' followed by a '/' or '\'. This avoids confusion when using
directory names like 'C:\Blah Blah'.
When specified as a list, the INCLUDE_PATH path can contain elements
which dynamically generate a list of INCLUDE_PATH directories. These
generator elements can be specified as a callable object or an object
which implements a paths() method.
provider = template.provider.Provider({
'INCLUDE_PATH': [ '/usr/local/templates',
lambda: incpath_generator(),
my.incpath.generator.Generator( ... ) ],
})
Each time a template is requested and the INCLUDE_PATH examined, the
callable or object method will be called. A list of directories
should be returned. Generator subroutines and objects should report
errors by raising an exception.
For example:
def incpath_generator():
# ...some code...
if all_is_well:
return [list_of_directories]
else:
raise MyError("cannot generate INCLUDE_PATH...\n")
or:
class MyGenerator:
def paths(self):
# ... some code ...
if all_is_well:
return [list_of_directories]
else:
raise MyError("cannot generate INCLUDE_PATH...\n")
* DELIMITER
Used to provide an alternative delimiter character sequence for
separating paths specified in the INCLUDE_PATH. The default value for
DELIMITER is ':'.
# tolerate Silly Billy's file system conventions
provider = template.provider.Provider({
'DELIMITER': '; ',
'INCLUDE_PATH': 'C:/HERE/NOW; D:/THERE/THEN',
})
# better solution: install Linux! :-)
On Win32 systems, the default delimiter is a little more intelligent,
splitting paths only on ':' characters that aren't followed by a '/'.
This means that the following should work as planned, splitting the
INCLUDE_PATH into 2 separate directories, C:/foo and C:/bar.
# on Win32 only
provider = template.provider.Provider({
'INCLUDE_PATH': 'C:/Foo:C:/Bar'
})
However, if you're using Win32 then it's recommended that you
explicitly set the DELIMITER character to something else (e.g. ';')
rather than rely on this subtle magic.
* ABSOLUTE
The ABSOLUTE flag is used to indicate if templates specified with
absolute filenames (e.g. '/foo/bar') should be processed. It is
disabled by default and any attempt to load a template by such a
name will cause a 'file' exception to be raised.
provider = template.provider.Provider({
'ABSOLUTE': 1,
})
# this is why it's disabled by default
[% INSERT /etc/passwd %]
On Win32 systems, the regular expression for matching absolute
pathnames is tweaked slightly to also detect filenames that start with
a driver letter and colon, such as:
C:/Foo/Bar
* RELATIVE
The RELATIVE flag is used to indicate if templates specified with
filenames relative to the current directory (e.g. './foo/bar' or
'../../some/where/else') should be loaded. It is also disabled by
default, and will raise a 'file' error if such template names are
encountered.
provider = template.provider.Provider({
'RELATIVE': 1,
})
[% INCLUDE ../logs/error.log %]
* DEFAULT
The DEFAULT option can be used to specify a default template which
should be used whenever a specified template can't be found in the
INCLUDE_PATH.
provider = template.provider.Provider({
'DEFAULT': 'notfound.html',
})
If a non-existant template is requested through the template.process()
method, or by an INCLUDE, PROCESS or WRAPPER directive, then the
DEFAULT template will instead be processed, if defined. Note that the
DEFAULT template is not used when templates are specified with
absolute or relative filenames, or as a file object or template literal.
* CACHE_SIZE
The template.provider module caches compiled templates to avoid the
need to re-parse template files or blocks each time they are used.
The CACHE_SIZE option is used to limit the number of compiled
templates that the module should cache.
By default, the CACHE_SIZE is None and all compiled templates are
cached. When set to any positive value, the cache will be limited to
storing no more than that number of compiled templates. When a new
template is loaded and compiled and the cache is full (i.e. the number
of entries == CACHE_SIZE), the least recently used compiled template
is discarded to make room for the new one.
The CACHE_SIZE can be set to 0 to disable caching altogether.
provider = template.provider.Provider({
'CACHE_SIZE': 64, # only cache 64 compiled templates
})
provider = template.provider.Provider({
'CACHE_SIZE': 0, # don't cache any compiled templates
})
As well as caching templates as they are found, the Provider also
implements negative caching to keep track of templates that are not
found. This allows the provider to quickly decline a request for a
template that it has previously failed to locate, saving the effort of
going to look for it again. This is useful when an INCLUDE_PATH
includes multiple providers, ensuring that the request is passed down
through the providers as quickly as possible.
* STAT_TTL
This value can be set to control how long the Provider will keep a
template cached in memory before checking to see if the source
template has changed.
provider = template.provider.Provider({
'STAT_TTL': 60, # one minute
})
The default value is 1 (second). You'll probably want to set this to a
higher value if you're running the Template Toolkit inside a
persistent web server application. For example, set it to 60 and the
provider will only look for changes to templates once a minute at
most. However, during development (or any time you're making frequent
changes to templates) you'll probably want to keep it set to a low
value so that you don't have to wait for the provider to notice that
your templates have changed.
* COMPILE_EXT
From version 2 onwards, the Template Toolkit has the ability to
compile templates to Python code and save them to disk for subsequent
use (i.e. cache persistence). The COMPILE_EXT option may be provided
to specify a filename extension for compiled template files. It is
None by default and no attempt will be made to read or write any
compiled template files.
provider = template.provider.Provider({
'COMPILE_EXT': '.ttc',
})
If COMPILE_EXT is defined (and COMPILE_DIR isn't, see below) then compiled
template files with the COMPILE_EXT extension will be written to the same
directory from which the source template files were loaded.
Compiling and subsequent reuse of templates happens automatically
whenever the COMPILE_EXT or COMPILE_DIR options are set. The Template
Toolkit will automatically reload and reuse compiled files when it
finds them on disk. If the corresponding source file has been
modified since the compiled version as written, then it will load and
re-compile the source and write a new compiled version to disk.
This form of cache persistence offers significant benefits in terms of
time and resources required to reload templates. Compiled templates
can be reloaded by a simple import, leaving Python to handle all the
parsing and compilation. This is a Good Thing.
* COMPILE_DIR
The COMPILE_DIR option is used to specify an alternate directory root
under which compiled template files should be saved.
provider = template.provider.Provider({
'COMPILE_DIR': '/tmp/ttc',
})
The COMPILE_EXT option may also be specified to have a consistent file
extension added to these files.
provider1 = template.provider.Provider({
'COMPILE_DIR': '/tmp/ttc',
'COMPILE_EXT': '.ttc1',
})
provider2 = template.provider.Provider({
'COMPILE_DIR': '/tmp/ttc',
'COMPILE_EXT': '.ttc2',
})
When COMPILE_EXT is undefined, the compiled template files have the
same name as the original template files, but reside in a different
directory tree.
Each directory in the INCLUDE_PATH is replicated in full beneath the
COMPILE_DIR directory. This example:
provider = template.provider.Provider({
'COMPILE_DIR': '/tmp/ttc',
'INCLUDE_PATH': '/home/abw/templates:/usr/share/templates',
})
would create the following directory structure:
/tmp/ttc/home/abw/templates/
/tmp/ttc/usr/share/templates/
Files loaded from different INCLUDE_PATH directories will have their
compiled forms save in the relevant COMPILE_DIR directory.
On Win32 platforms a filename may by prefixed by a drive letter and
colon. e.g.
C:/My Templates/header
The colon will be silently stripped from the filename when it is added
to the COMPILE_DIR value(s) to prevent illegal filename being generated.
Any colon in COMPILE_DIR elements will be left intact. For example:
# Win32 only
provider = template.provider.Provider({
'DELIMITER': ';',
'COMPILE_DIR': 'C:/TT2/Cache',
'INCLUDE_PATH': 'C:/TT2/Templates;D:/My Templates',
})
This would create the following cache directories:
C:/TT2/Cache/C/TT2/Templates
C:/TT2/Cache/D/My Templates
* TOLERANT
The TOLERANT flag is used by the various Template Toolkit provider
modules (template.provider, template.plugins, template.filters) to
control their behaviour when errors are encountered. By default, any
errors are reported as such, with the request for the particular
resource (template, plugin, filter) being denied and an exception
raised. When the TOLERANT flag is set to any true values, errors will
be silently ignored and the provider will instead return None. This
allows a subsequent provider to take responsibility for providing the
resource, rather than failing the request outright. If all providers
decline to service the request, either through tolerated failure or a
genuine disinclination to comply, then a'<resource> not found'
exception is raised.
* PARSER
The template.parser module implements a parser object for compiling
templates into Python code which can then be executed. A default
object of this class is created automatically and then used by the
Provider whenever a template is loaded and requires compilation. The
PARSER option can be used to provide an alternate parser object.
provider = template.provider.Provider({
'PARSER': myorg.template.parser.Parser({ ... }),
})
* DEBUG
The DEBUG option can be used to enable debugging messages from the
template.provider module by setting it to include the DEBUG_PROVIDER
value.
from template.constants import *
template = template.Template({
'DEBUG': DEBUG_PROVIDER,
})
fetch(name)
Returns a compiled template for the name specified. If the template
cannot be found then None is returned. If an error occurs (e.g. read
error, parse error) then an exception is raised. If the TOLERANT flag
is set the the method returns None instead of raising an exception.
store(name, template)
Stores the compiled template 'template' in the cache under the name 'name'.
Subsequent calls to fetch(name) will return this template in preference to
any disk-based file.
include_path(newpath)
Accessor method for the INCLUDE_PATH setting. If called with an
argument, this method will replace the existing INCLUDE_PATH with
the new value.
paths()
This method generates a copy of the INCLUDE_PATH list. Any elements
in the list which are dynamic generators (e.g. callables or objects
implementing a paths() method) will be called and the list of
directories returned merged into the output list.
It is possible to provide a generator which returns itself, thus
sending this method into an infinite loop. To detect and prevent this
from happening, the MAX_DIRS class variable, set to 64 by default,
limits the maximum number of paths that can be added to, or generated
for the output list. If this number is exceeded then the method will
immediately return an error reporting as much.
SUBCLASSING
The Provider class can be subclassed to provide templates from a
different source (e.g. a database). In most cases you'll just need to
provide custom implementations of the _template_modified() and
_template_content() methods.
Caching in memory and on disk will still be applied (if enabled) when
overriding these methods.
_template_modified(path)
Returns a timestamp of the path passed in by calling stat(). This can
be overridden, for example, to return a last modified value from a
database. The value returned should be a Unix epoch timestamp
although a sequence number should work as well.
_template_content(path, modtime=None)
This method returns the content of the template for all INCLUDE,
PROCESS, and INSERT directives. It returns the content of the
template located at 'path', or None if no such file exists.
If the optional parameter 'modtime' is present, the modification time
of the file is stored in its 'modtime' attribute.
"""
RELATIVE_PATH = re.compile(r"(?:^|/)\.+/")
class Error(Exception):
pass
class Provider:
"""This class handles the loading, compiling and caching of
templates.
Multiple Provider objects can be stacked and queried in turn to
effect a Chain-of-Command between them. A provider will attempt to
return the requested template, raise an exception, or decline to
provide the template (by returning None), allowing subsequent
providers to attempt to deliver it. See 'Design Patterns' for
further details.
"""
MAX_DIRS = 64
STAT_TTL = 1
DEBUG = False
def __init__(self, params):
size = params.get("CACHE_SIZE", 2)
paths = params.get("INCLUDE_PATH", ".")
cdir = params.get("COMPILE_DIR", "")
dlim = params.get("DELIMITER", os.name == "nt" and r":(?!\/)" or ":")
debug = params.get("DEBUG")
if isinstance(paths, str):
paths = re.split(dlim, paths)
if size == 1 or size < 0:
size = 2
if debug is not None:
self.__debug = debug & (DEBUG_PROVIDER & DEBUG_FLAGS)
else:
self.__debug = self.DEBUG
if cdir:
for path in paths:
if not isinstance(path, str):
continue
if os.name == "nt":
path = path.replace(":", "")
if not os.path.isdir(path):
os.makedirs(path)
self.__lookup = {}
self.__notfound = {} # Tracks templates *not* found.
self.__slots = 0
self.__size = size
self.__include_path = paths
self.__delimiter = dlim
self.__compile_dir = cdir
self.__compile_ext = params.get("COMPILE_EXT", "")
self.__absolute = bool(params.get("ABSOLUTE"))
self.__relative = bool(params.get("RELATIVE"))
self.__tolerant = bool(params.get("TOLERANT"))
self.__document = params.get("DOCUMENT", Document)
self.__parser = params.get("PARSER")
self.__default = params.get("DEFAULT")
self.__encoding = params.get("ENCODING")
self.__stat_ttl = params.get("STAT_TTL", self.STAT_TTL)
self.__params = params
self.__head = None
self.__tail = None
def fetch(self, name, prefix=None):
"""Returns a compiled template for the name specified by parameter.
The template is returned from the internal cache if it exists, or
loaded and then subsequently cached. The ABSOLUTE and RELATIVE
configuration flags determine if absolute (e.g. '/something...')
and/or relative (e.g. './something') paths should be honoured.
The INCLUDE_PATH is otherwise used to find the named file. 'name'
may also be a template.util.Literal object that contains the
template text, or a file object from which the content is read.
The compiled template is not cached in these latter cases given
that there is no filename to cache under. A subsequent call to
store(name, compiled) can be made to cache the compiled template
for future fetch() calls, if necessary.
Returns a compiled template or None if the template could not be
found. On error (e.g. the file was found but couldn't be read or
parsed), an exception is raised. The TOLERANT configuration
option can be set to downgrade any errors to None.
"""
if not isinstance(name, str):
data = self._load(name)
data = self._compile(data)
return data and data.data
elif os.path.isabs(name):
if self.__absolute:
return self._fetch(name)
elif not self.__tolerant:
raise Error("%s: absolute paths are not allowed (set ABSOLUTE option)"
% name)
elif RELATIVE_PATH.search(name):
if self.__relative:
return self._fetch(name)
elif not self.__tolerant:
raise Error("%s: relative paths are not allowed (set RELATIVE option)"
% name)
elif self.__include_path:
return self._fetch_path(name)
return None
def _load(self, name, alias=None):
"""Load template text from a string (template.util.Literal), file
object, or from an absolute filename.
Returns an object with the following attributes:
name filename or 'alias', if provided, or 'input text', etc.
text template text
time modification time of file, or current time for files/strings
load time file was loaded (now!)
On error, raises an exception, or returns None if TOLERANT is set.
"""
now = time.time()
if alias is None and isinstance(name, str):
alias = name
if isinstance(name, Literal):
return Data(name.text(), alias, alt="input text", load=0)
elif not isinstance(name, str):
return Data(name.read(), alias, alt="input file", load=0)
if self._template_modified(name):
when = Struct()
text = self._template_content(name, when)
if text is not None:
return Data(text, alias, when=when.modtime, path=name)
return None
def _fetch(self, name, t_name=None):
"""Fetch a file from cache or disk by specification of an absolute
or relative filename.
'name' is the path to search (possibly prefixed by INCLUDE_PATH).
't_name' is the template name.
No search of the INCLUDE_PATH is made. If the file is found and
loaded, it is compiled and cached.
"""
# First see if the named template is in the memory cache.
slot = self.__lookup.get(name)
if slot:
# Test is cache is fresh, and reload/compile if not.
self._refresh(slot)
return slot.data
now = time.time()
last_stat_time = self.__notfound.get(name)
if last_stat_time:
expires_in = last_stat_time + self.__stat_ttl - now
if expires_in > 0:
return None
else:
del self.__notfound[name]
# Is there an up-to-date compiled version on disk?
if self._compiled_is_current(name):
compiled_template = self._load_compiled(self._compiled_filename(name))
if compiled_template:
return self.store(name, compiled_template)
# Now fetch template from source, compile, and cache.
tmpl = self._load(name, t_name)
if tmpl:
tmpl = self._compile(tmpl, self._compiled_filename(name))
return self.store(name, tmpl.data)
# Template could not be found. Add to the negative/notfound cache.
self.__notfound[name] = now
return None
def _compile(self, data, compfile=None):
"""Private method called to parse the template text and compile it
into a runtime form.
Creates and delegates a template.parser.Parser object to handle
the compilation, or uses the object passed in PARSER. On success,
the compiled template is stored in the 'data' attribute of the
'data' object and returned. On error, an exception is raised, or
None is returned if the TOLERANT flag is set. The optional
'compiled' parameter may be passed to specify the name of a
compiled template file to which the generated Python code should
be written. Errors are (for now...) silently ignored, assuming
that failures to open a file for writing are intentional (e.g
directory write permission).
"""
if data is None:
return None
text = data.text
error = None
if not self.__parser:
self.__parser = Config.parser(self.__params)
# discard the template text - we don't need it any more
# del data.text
parsedoc = self.__parser.parse(text, data)
parsedoc["METADATA"].setdefault("name", data.name)
parsedoc["METADATA"].setdefault("modtime", data.time)
# write the Python code to the file compfile, if defined
if compfile:
basedir = os.path.dirname(compfile)
if not os.path.isdir(basedir):
try:
os.makedirs(basedir)
except IOError as e:
error = Error("failed to create compiled templates "
"directory: %s (%s)" % (basedir, e))
if not error:
try:
self.__document.write_python_file(compfile, parsedoc)
except Exception as e:
error = Error("cache failed to write %s: %s" % (
os.path.basename(compfile), e))
if error is None and data.time is not None:
if not compfile:
raise Error("invalid null filename")
ctime = int(data.time)
os.utime(compfile, (ctime, ctime))
if not error:
data.data = Document(parsedoc)
return data
if self.__tolerant:
return None
else:
raise error
def _compiled_is_current(self, template_name):
"""Returns True if template_name and its compiled name exists and
they have the same mtime.
"""
compiled_name = self._compiled_filename(template_name)
if not compiled_name:
return False
compiled_mtime = modtime(compiled_name)
if not compiled_mtime:
return False
template_mtime = self._template_modified(template_name)
if not template_mtime:
return False
return compiled_mtime == template_mtime
def _template_modified(self, path):
"""Returns the last modified time of the given path, or None if the
path does not exist.
Override if templates are not on disk, for example.
"""
if path:
return modtime(path)
else:
return None
def _template_content(self, path, modtime=None):
"""Fetches content pointed to by 'path'.
Stores the modification time of the file in the "modtime" attribute
of the 'modtime' argument, if it is present.
"""
if not path:
raise Error("No path specified to fetch content from")
f = None
try:
f = open(path)
if modtime is not None:
modtime.modtime = os.fstat(f.fileno()).st_mtime
return f.read()
finally:
if f:
f.close()
def _fetch_path(self, name):
"""Fetch a file from cache or disk by specification of an absolute
cache name (e.g. 'header') or filename relative to one of the
INCLUDE_PATH directories.
If the file isn't already cached and can be found and loaded, it
is compiled and cached under the full filename.
"""
# The template may have been stored using a non-filename name
# so look for the plain name in the cache first.
slot = self.__lookup.get(name)
if slot:
# Cached entry exists, so refresh slot and extract data.
self._refresh(slot)
return slot.data
paths = self.paths()
# Search the INCLUDE_PATH for the file, in cache or on disk.
for dir in paths:
path = os.path.join(dir, name)
data = self._fetch(path, name)
if data:
return data
# Not found in INCLUDE_PATH, now try DEFAULT.
if self.__default is not None and name != self.__default:
return self._fetch_path(self.__default)
# We could not handle this template name.
return None
def _compiled_filename(self, path):
if not (self.__compile_ext or self.__compile_dir):
return None
if os.name == "nt":
path = path.replace(":", "")
compiled = "%s%s" % (path, self.__compile_ext)
if self.__compile_dir:
# Can't use os.path.join here; compiled may be absolute.
compiled = "%s%s%s" % (self.__compile_dir, os.path.sep, compiled)
return compiled
def _modified(self, name, time=None):
"""When called with a single argument, it returns the modification
time of the named template. When called with a second argument it
returns true if 'name' has been modified since 'time'.
"""
load = self._template_modified(name)
if not load:
return int(bool(time))
if time:
return int(load > time)
else:
return load
def _refresh(self, slot):
"""Private method called to mark a cache slot as most recently used.
A reference to the slot list should be passed by parameter. The
slot is relocated to the head of the linked list. If the file
from which the data was loaded has been updated since it was
compiled, then it is re-loaded from disk and re-compiled.
"""
data = None
now = time.time()
expires_in_sec = slot.stat + self.__stat_ttl - now
if expires_in_sec <= 0:
slot.stat = now
template_mtime = self._template_modified(slot.name)
if template_mtime is None or template_mtime != slot.load:
try:
data = self._load(slot.name, slot.data.name)
data = self._compile(data)
except:
slot.stat = 0
raise
else:
slot.data = data.data
slot.load = data.time
if self.__head is not slot:
# remove existing slot from usage chain...
if hasattr(slot, '__next__'):
slot_next = getattr(slot, '__next__')
elif hasattr(slot, 'next'):
slot_next = getattr(slot, 'next')
if slot.prev:
slot.prev.next = slot_next
else:
self.__head = slot_next
if slot_next:
slot_next.prev = slot.prev
else:
self.__tail = slot.prev
# ...and add to start of list
head = self.__head
if head:
head.prev = slot
slot.prev = None
slot_next = head
self.__head = slot
return data
def _load_compiled(self, path):
try:
return Document.evaluate_file(path)
except TemplateException as e:
raise Error("compiled template %s: %s" % (path, e))
def _store(self, name, data, compfile=None):
"""Private method called to add a data item to the cache.
If the cache size limit has been reached then the oldest entry at
the tail of the list is removed and its slot relocated to the head
of the list and reused for the new data item. If the cache is
under the size limit, or if no size limit is defined, then the
item is added to the head of the list.
Returns compiled template.
"""
# Return if memory cache disabled.
if self.__size is not None and not self.__size:
return data.data
# Extract the compiled template from the data object.
data = data.data
# Check the modification time -- extra stat here.
load = self._modified(name)
if self.__size is not None and self.__slots >= self.__size:
# cache has reached size limit, so reuse oldest entry
# remove entry from tail or list
slot = self.__tail
slot.prev.next = None
self.__tail = slot.prev
# remove name lookup for old node
del self.__lookup[slot.name]
# add modified node to head of list
head = self.__head
if head:
head.prev = slot
slot.reset(name, data, load, time.time(), None, head)
self.__head = slot
# add name lookup for new node
self.__lookup[name] = slot
else:
# cache is under size limit, or none is defined
head = self.__head
slot = Slot(name, data, load, time.time(), None, head)
if head:
head.prev = slot
self.__head = slot
if not self.__tail:
self.__tail = slot
# add lookup from name to slot and increment nslots
self.__lookup[name] = slot
self.__slots += 1
return data
def paths(self):
"""Evaluates the INCLUDE_PATH list, ignoring any blank entries,
and calling any callable or objects to return dynamically
generated path lists.
Returns a new list of paths or raises an exception on error.
"""
ipaths = self.__include_path[:]
opaths = []
count = self.MAX_DIRS
while ipaths and count > 0:
count -= 1
dir = ipaths.pop(0)
if not dir:
continue
# dir can be a sub or object ref which returns a reference
# to a dynamically generated list of search paths
if isinstance(dir, collections.Callable):
ipaths[:0] = dir()
else:
try:
paths = dir.paths
except AttributeError:
pass
else:
if isinstance(paths, collections.Callable):
ipaths[:0] = paths()
continue
opaths.append(dir)
if ipaths:
raise Error("INCLUDE_PATH exceeds %d directories" % (self.MAX_DIRS,))
else:
return opaths
def store(self, name, data):
"""Store a compiled template 'data' in the cache as 'name'.
Returns compiled template.
"""
return self._store(name, Data(data=data, load=0))
def load(self, name, prefix=None):
"""Load a template without parsing/compiling it, suitable for use
with the INSERT directive.
There's some duplication with fetch() and at some point this could
be reworked to integrate them a little closer.
"""
path = name
error = None
if os.path.isabs(name):
if not self.__absolute:
error = ("%s: absolute paths are not allowed (set ABSOLUTE option)"
% name)
elif RELATIVE_PATH.search(name):
if not self.__relative:
error = ("%s: relative paths are not allowed (set RELATIVE option)"
% name)
else:
for dir in self.paths():
path = os.path.join(dir, name)
if self._template_modified(path):
break
else:
path = None
if path and not error:
try:
data = self._template_content(path)
except IOError as e:
error = "%s: %s" % (name, e)
if error:
if not self.__tolerant:
raise Error(error)
elif path is not None:
return data
return None
def include_path(self, path=None):
"""Accessor method for the INCLUDE_PATH setting.
If called with an argument, this method will replace the existing
INCLUDE_PATH with the new value.
"""
if path:
self.__include_path = path
return self.__include_path
def parser(self):
return self.__parser
def tolerant(self):
return self.__tolerant
class Data:
def __init__(self, text=None, name=None, alt=None, when=None, path=None,
load=None, data=None):
self.text = text
if name is not None:
self.name = name
else:
self.name = alt
if when is not None:
self.time = when
else:
self.time = time.time()
if path is not None:
self.path = path
else:
self.path = self.name
self.load = load
self.data = data
def __repr__(self):
return "Data(text=%r, name=%r, time=%r, path=%r, load=%r, data=%r)" % (
self.text, self.name, self.time, self.path, self.load, self.data)
class Slot:
def __init__(self, name, data, load, stat, prev=None, next=None):
self.reset(name, data, load, stat, prev, next)
def reset(self, name, data, load, stat, prev, next):
self.name = name
self.data = data
self.load = load
self.stat = stat
self.prev = prev
self.next = next
| 35.001932
| 86
| 0.648108
|
e4085074ce997f91c9d98960c474e8539c07777d
| 2,502
|
py
|
Python
|
CNN/main_new.py
|
junmadlab/Learn_to_compose_IDETC_2020
|
edbaaccf51b195a60f672cac4609a56f16f277c8
|
[
"MIT"
] | null | null | null |
CNN/main_new.py
|
junmadlab/Learn_to_compose_IDETC_2020
|
edbaaccf51b195a60f672cac4609a56f16f277c8
|
[
"MIT"
] | null | null | null |
CNN/main_new.py
|
junmadlab/Learn_to_compose_IDETC_2020
|
edbaaccf51b195a60f672cac4609a56f16f277c8
|
[
"MIT"
] | null | null | null |
"""
Trains a model, and visulizes results
Author(s): Wei Chen (wchen459@umd.edu)
Jun Wang (jwang38@umd.edu)
"""
import argparse
import os
import glob
import numpy as np
from importlib import import_module
import shutil
import h5py
from visualization import visualize_samples, visualize
if __name__ == "__main__":
# Arguments
parser = argparse.ArgumentParser(description='Train/Evaluate')
parser.add_argument('mode', type=str, default='startover', help='startover or evaluate')
parser.add_argument('model', type=str, default='ae_new', help='model')
parser.add_argument('--batch_size', type=int, default=32, help='batch_size')
parser.add_argument('--save_interval', type=int, default=500, help='save interval')
parser.add_argument('--train_steps', type=int, default=200000, help='training steps')
args = parser.parse_args()
assert args.mode in ['startover', 'evaluate']
if args.mode == 'startover':
training_dir = './saved_model_StrL_dis'
if not os.path.exists(training_dir):
os.makedirs(training_dir)
log_dir = '{}/{}/logs'.format(training_dir, args.model)
if os.path.exists(log_dir):
shutil.rmtree(log_dir)
results_dir = './results_StrL_dis'
if not os.path.exists(results_dir):
os.makedirs(results_dir)
hf_train = h5py.File('./DeCNN_database_StrL_dis/train.h5', 'r')
hf_test = h5py.File('./DeCNN_database_StrL_dis/test.h5', 'r')
X_train = np.array(hf_train.get('train_x'))
Y_train = np.array(hf_train.get('train_y'))
X_test = np.array(hf_test.get('test_x'))
Y_test = np.array(hf_test.get('test_y'))
rez = X_train.shape[1]
# print(rez)
input_channel = X_train.shape[3]
# print(input_channel)
output_channel = Y_train.shape[3]
# print(output_channel)
# Train/Evaluate
m = import_module(args.model)
model = m.Model(rez, input_channel, output_channel)
model_dir = './saved_model_StrL_dis'
if args.mode == 'startover':
print('Start training ...')
model.train(X_train, Y_train, X_test, Y_test, batch_size=args.batch_size, train_steps=args.train_steps,
save_interval=args.save_interval, save_dir=model_dir)
else:
print('Evaluating ...')
model.restore(model_dir)
# Visulize Results
print('Plotting results ...')
visualize(20, X_test, Y_test, model, results_dir)
print('All completed :)')
| 32.921053
| 112
| 0.663469
|
b8146ee022e51ab39dd7e7eec1bf24115f013df1
| 5,358
|
py
|
Python
|
allennlp/modules/token_embedders/elmo_token_embedder_2.py
|
asiddhant/taskonomy-nlp
|
34042cce5d77a13dd4f3128234d242b7ed14d4ec
|
[
"Apache-2.0"
] | 3
|
2019-06-17T21:09:07.000Z
|
2022-03-18T05:19:31.000Z
|
allennlp/modules/token_embedders/elmo_token_embedder_2.py
|
asiddhant/taskonomy-nlp
|
34042cce5d77a13dd4f3128234d242b7ed14d4ec
|
[
"Apache-2.0"
] | null | null | null |
allennlp/modules/token_embedders/elmo_token_embedder_2.py
|
asiddhant/taskonomy-nlp
|
34042cce5d77a13dd4f3128234d242b7ed14d4ec
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
import torch
from allennlp.common import Params
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.modules.elmo_2 import Elmo2
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.data import Vocabulary
@TokenEmbedder.register("elmo_token_embedder_2")
class ElmoTokenEmbedder2(TokenEmbedder):
"""
Compute a single layer of ELMo representations.
This class serves as a convenience when you only want to use one layer of
ELMo representations at the input of your network. It's essentially a wrapper
around Elmo(num_output_representations=1, ...)
Parameters
----------
options_file : ``str``, required.
An ELMo JSON options file.
weight_file : ``str``, required.
An ELMo hdf5 weight file.
do_layer_norm : ``bool``, optional.
Should we apply layer normalization (passed to ``ScalarMix``)?
dropout : ``float``, optional.
The dropout value to be applied to the ELMo representations.
requires_grad : ``bool``, optional
If True, compute gradient of ELMo parameters for fine tuning.
projection_dim : ``int``, optional
If given, we will project the ELMo embedding down to this dimension. We recommend that you
try using ELMo with a lot of dropout and no projection first, but we have found a few cases
where projection helps (particulary where there is very limited training data).
vocab_to_cache : ``List[str]``, optional, (default = 0.5).
A list of words to pre-compute and cache character convolutions
for. If you use this option, the ElmoTokenEmbedder expects that you pass word
indices of shape (batch_size, timesteps) to forward, instead
of character indices. If you use this option and pass a word which
wasn't pre-cached, this will break.
"""
def __init__(self,
options_file: str,
weight_file: str,
do_layer_norm: bool = False,
dropout: float = 0.5,
requires_grad: bool = False,
projection_dim: int = None,
vocab_to_cache: List[str] = None) -> None:
super(ElmoTokenEmbedder2, self).__init__()
self._elmo = Elmo2(options_file,
weight_file,
1,
do_layer_norm=do_layer_norm,
dropout=dropout,
requires_grad=requires_grad,
vocab_to_cache=vocab_to_cache)
if projection_dim:
self._projection = torch.nn.Linear(self._elmo.get_output_dim(), projection_dim)
else:
self._projection = None
def get_output_dim(self):
return self._elmo.get_output_dim()
def forward(self, # pylint: disable=arguments-differ
inputs: torch.Tensor,
word_inputs: torch.Tensor = None) -> torch.Tensor:
"""
Parameters
----------
inputs: ``torch.Tensor``
Shape ``(batch_size, timesteps, 50)`` of character ids representing the current batch.
word_inputs : ``torch.Tensor``, optional.
If you passed a cached vocab, you can in addition pass a tensor of shape
``(batch_size, timesteps)``, which represent word ids which have been pre-cached.
Returns
-------
The ELMo representations for the input sequence, shape
``(batch_size, timesteps, embedding_dim)``
"""
elmo_output = self._elmo(inputs, word_inputs)
elmo_representations = elmo_output['elmo_representations'][0]
elmo_lstm_output = elmo_output['elmo_lstm_output']
if self._projection:
projection = self._projection
for _ in range(elmo_representations.dim() - 2):
projection = TimeDistributed(projection)
elmo_representations = projection(elmo_representations)
return elmo_representations, elmo_lstm_output
# Custom vocab_to_cache logic requires a from_params implementation.
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> 'ElmoTokenEmbedder': # type: ignore
# pylint: disable=arguments-differ
params.add_file_to_archive('options_file')
params.add_file_to_archive('weight_file')
options_file = params.pop('options_file')
weight_file = params.pop('weight_file')
requires_grad = params.pop('requires_grad', False)
do_layer_norm = params.pop_bool('do_layer_norm', False)
dropout = params.pop_float("dropout", 0.5)
namespace_to_cache = params.pop("namespace_to_cache", None)
if namespace_to_cache is not None:
vocab_to_cache = list(vocab.get_token_to_index_vocabulary(namespace_to_cache).keys())
else:
vocab_to_cache = None
projection_dim = params.pop_int("projection_dim", None)
params.assert_empty(cls.__name__)
return cls(options_file=options_file,
weight_file=weight_file,
do_layer_norm=do_layer_norm,
dropout=dropout,
requires_grad=requires_grad,
projection_dim=projection_dim,
vocab_to_cache=vocab_to_cache)
| 44.65
| 99
| 0.642591
|
1c55fde6c03688a8e1fe6fe20ae030939d540e43
| 4,128
|
py
|
Python
|
tests/parsers/mac_wifi.py
|
Defense-Cyber-Crime-Center/plaso
|
4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47
|
[
"Apache-2.0"
] | 2
|
2016-02-18T12:46:29.000Z
|
2022-03-13T03:04:59.000Z
|
tests/parsers/mac_wifi.py
|
Defense-Cyber-Crime-Center/plaso
|
4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47
|
[
"Apache-2.0"
] | null | null | null |
tests/parsers/mac_wifi.py
|
Defense-Cyber-Crime-Center/plaso
|
4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47
|
[
"Apache-2.0"
] | 6
|
2016-12-18T08:05:36.000Z
|
2021-04-06T14:19:11.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Mac wifi.log parser."""
import unittest
from plaso.formatters import mac_wifi as _ # pylint: disable=unused-import
from plaso.lib import timelib
from plaso.parsers import mac_wifi
from tests.parsers import test_lib
class MacWifiUnitTest(test_lib.ParserTestCase):
"""Tests for the Mac wifi.log parser."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._parser = mac_wifi.MacWifiLogParser()
def testParse(self):
"""Tests the Parse function."""
knowledge_base_values = {u'year': 2013}
test_file = self._GetTestFilePath([u'wifi.log'])
event_queue_consumer = self._ParseFile(
self._parser, test_file, knowledge_base_values=knowledge_base_values)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEqual(len(event_objects), 9)
event_object = event_objects[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-11-14 20:36:37.222')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.agent, u'airportd[88]')
self.assertEqual(event_object.function, u'airportdProcessDLILEvent')
self.assertEqual(event_object.action, u'Interface en0 turn up.')
self.assertEqual(event_object.text, u'en0 attached (up)')
expected_msg = (
u'Action: Interface en0 turn up. '
u'(airportdProcessDLILEvent) '
u'Log: en0 attached (up)')
expected_msg_short = (
u'Action: Interface en0 turn up.')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
event_object = event_objects[1]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-11-14 20:36:43.818')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.agent, u'airportd[88]')
self.assertEqual(event_object.function, u'_doAutoJoin')
self.assertEqual(event_object.action, u'Wifi connected to SSID CampusNet')
expected_text = (
u'Already associated to \u201cCampusNet\u201d. Bailing on auto-join.')
self.assertEqual(event_object.text, expected_text)
event_object = event_objects[2]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-11-14 21:50:52.395')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.agent, u'airportd[88]')
self.assertEqual(event_object.function, u'_handleLinkEvent')
expected_string = (
u'Unable to process link event, op mode request returned -3903 '
u'(Operation not supported)')
self.assertEqual(event_object.action, expected_string)
self.assertEqual(event_object.text, expected_string)
event_object = event_objects[5]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-11-14 21:52:09.883')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(u'airportd[88]', event_object.agent)
self.assertEqual(u'_processSystemPSKAssoc', event_object.function)
expected_action = (
u'New wifi configured. BSSID: 88:30:8a:7a:61:88, SSID: AndroidAP, '
u'Security: WPA2 Personal.')
self.assertEqual(event_object.action, expected_action)
expected_text = (
u'No password for network <CWNetwork: 0x7fdfe970b250> '
u'[ssid=AndroidAP, bssid=88:30:8a:7a:61:88, security=WPA2 '
u'Personal, rssi=-21, channel=<CWChannel: 0x7fdfe9712870> '
u'[channelNumber=11(2GHz), channelWidth={20MHz}], ibss=0] '
u'in the system keychain')
self.assertEqual(event_object.text, expected_text)
event_object = event_objects[7]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-12-31 23:59:38.165')
self.assertEqual(event_object.timestamp, expected_timestamp)
event_object = event_objects[8]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2014-01-01 01:12:17.311')
self.assertEqual(event_object.timestamp, expected_timestamp)
if __name__ == '__main__':
unittest.main()
| 34.4
| 79
| 0.722626
|
f7ad2f4c9393b1cd648ece9064f3bd6e632c0bdb
| 1,159
|
py
|
Python
|
amos/tests.py
|
chuckoy/grabyaya
|
5fbd7135cc74bdbb29c6d86b1b202b37b94e1660
|
[
"MIT"
] | null | null | null |
amos/tests.py
|
chuckoy/grabyaya
|
5fbd7135cc74bdbb29c6d86b1b202b37b94e1660
|
[
"MIT"
] | null | null | null |
amos/tests.py
|
chuckoy/grabyaya
|
5fbd7135cc74bdbb29c6d86b1b202b37b94e1660
|
[
"MIT"
] | null | null | null |
# django
from django.core.urlresolvers import reverse
from django.test import TestCase, RequestFactory, Client
# own modules
from .factories import UserFactory
from .utils import url_with_querystring
from django.contrib.auth.models import User
class HomePageTest(TestCase):
"""
Tests for home page
"""
def setUp(self):
self.user = UserFactory(username='test_user')
self.factory = RequestFactory()
self.client = Client()
def test_home_page_denies_anonymous(self):
target_url = reverse('amos_index')
redirect_url = url_with_querystring(reverse('login'), 'next=/amos/')
response = self.client.get(target_url, follow=True)
self.assertRedirects(response, redirect_url)
response = self.client.post(target_url, follow=True)
self.assertRedirects(response, redirect_url)
def test_home_page_accepts_user(self):
self.client.force_login(self.user)
target_url = reverse('amos_index')
response = self.client.get(target_url, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'index.html')
| 31.324324
| 76
| 0.707506
|
03f8d3ddde0b3fb404828b615b8c46f1efa8ae33
| 17,484
|
py
|
Python
|
py_cui/dialogs/form.py
|
jupiterbjy/py_cui
|
3f03af05ecc29f04cc07ee6b62c8ac2a76de8a68
|
[
"BSD-3-Clause"
] | null | null | null |
py_cui/dialogs/form.py
|
jupiterbjy/py_cui
|
3f03af05ecc29f04cc07ee6b62c8ac2a76de8a68
|
[
"BSD-3-Clause"
] | null | null | null |
py_cui/dialogs/form.py
|
jupiterbjy/py_cui
|
3f03af05ecc29f04cc07ee6b62c8ac2a76de8a68
|
[
"BSD-3-Clause"
] | 1
|
2021-06-16T08:56:36.000Z
|
2021-06-16T08:56:36.000Z
|
"""Form widget for py_cui. Allows for giving user several fillable text fields in one block
"""
import py_cui.ui
import py_cui.widgets
import py_cui.popups
class DuplicateFormKeyError(Exception):
"""Error thrown when a duplicate form field key is passed
"""
pass
class FormField(py_cui.ui.TextBoxImplementation):
"""Class containing basic logic of a field in a form
Attributes
----------
_fieldname : str
Title of the field
_required : bool
Toggle for making the field be required
"""
def __init__(self, fieldname, initial_text, password, required, logger):
"""Initializer for base FormFields
"""
super().__init__(initial_text, password, logger)
self._fieldname = fieldname
self._required = required
def get_fieldname(self):
"""Getter for field name
Returns
-------
fieldname : str
Title of the field
"""
return self._fieldname
def is_valid(self):
"""Function that checks if field is valid.
This function can be implemented by subclasses to support different
field types (ex. emails etc.)
Returns
-------
is_valid : bool
True of valid conditions are met, false otherwise
msg : str
Message explaining problem. None if valid
"""
msg = None
if len(self._text) == 0 and self.is_required():
msg = 'Field <{}> cannot be empty!'.format(self.get_fieldname())
return msg is None, msg
def is_required(self):
"""Checks if field is required
Returns
-------
required : bool
True if required, false otherwise
"""
return self._required
class FormFieldElement(py_cui.ui.UIElement, FormField):
"""Extension of UI element representing an individual field in the form
Attributes
----------
_field_index : int
The index of the field in the form
_parent_form : FormPopup / Form
The parent UI Element that contains the form element
"""
def __init__(self, parent_form, field_index, field, init_text, passwd, required, renderer, logger):
"""Initializer for the FormFieldElement class
"""
self._parent_form = parent_form
self._field_index = field_index
py_cui.ui.UIElement.__init__(self, 0, field, renderer, logger)
FormField.__init__(self, field, init_text, passwd, required, logger)
self._help_text = 'Press Tab to move to the next field, or Enter to submit.'
self._padx = 0
self._pady = 0
self._selected = False
self.update_height_width()
def get_absolute_start_pos(self):
"""Override of base function. Uses the parent element do compute start position
Returns
-------
field_start_x, field_start_y : int, int
The position in characters in the terminal window to start the Field element
"""
container_height, _ = self._parent_form.get_absolute_dimensions()
single_field_height = int((container_height - 1 - self._parent_form._pady) / self._parent_form.get_num_fields())
parent_start_x, parent_start_y = self._parent_form.get_start_position()
field_start_x = (parent_start_x + 3 + self._parent_form._padx)
field_start_y = (parent_start_y + 1 + self._parent_form._pady + (single_field_height * self._field_index))
return field_start_x, field_start_y
def get_absolute_stop_pos(self):
"""Override of base function. Uses the parent element do compute stop position
Returns
-------
field_stop_x, field_stop_y : int, int
The position in characters in the terminal window to stop the Field element
"""
container_height, _ = self._parent_form.get_absolute_dimensions()
single_field_height = int((container_height - 1 - self._parent_form._pady) / self._parent_form.get_num_fields())
_, parent_start_y = self._parent_form.get_start_position()
parent_stop_x, _ = self._parent_form.get_stop_position()
field_stop_x = (parent_stop_x - 3 - self._parent_form._padx)
field_stop_y = (parent_start_y + 1 + self._parent_form._pady + (single_field_height * (self._field_index + 1)) -1)
return field_stop_x, field_stop_y
def update_height_width(self):
"""Override of base class. Updates text field variables for form field
"""
super().update_height_width()
padx, pady = self.get_padding()
start_x, start_y = self.get_start_position()
height, width = self.get_absolute_dimensions()
self._cursor_text_pos = 0
self._cursor_x = start_x + 2 + padx
self._cursor_max_left = self._cursor_x
self._cursor_max_right = start_x + width - 1 - pady
self._cursor_y = start_y + int(height / 2) + 1
self._viewport_width = self._cursor_max_right - self._cursor_max_left
def _handle_key_press(self, key_pressed):
"""Handles text input for the field. Called by parent
"""
if key_pressed == py_cui.keys.KEY_LEFT_ARROW:
self._move_left()
elif key_pressed == py_cui.keys.KEY_RIGHT_ARROW:
self._move_right()
elif key_pressed == py_cui.keys.KEY_BACKSPACE:
self._erase_char()
elif key_pressed == py_cui.keys.KEY_DELETE:
self._delete_char()
elif key_pressed == py_cui.keys.KEY_HOME:
self._jump_to_start()
elif key_pressed == py_cui.keys.KEY_END:
self._jump_to_end()
elif key_pressed > 31 and key_pressed < 128:
self._insert_char(key_pressed)
def _draw(self):
"""Draw function for the field. Called from parent. Essentially the same as a TextboxPopup
"""
self._renderer.set_color_mode(self._parent_form._color)
self._renderer.set_color_rules([])
self._renderer.draw_text(self, self._title, self._cursor_y - 2, bordered=False, selected=self._selected)
self._renderer.draw_border(self, fill=False, with_title=False)
render_text = self._text
if len(self._text) >self._viewport_width:
end = len(self._text) - (self._viewport_width)
if self._cursor_text_pos < end:
render_text = self._text[self._cursor_text_pos:self._cursor_text_pos + (self._viewport_width)]
else:
render_text = self._text[end:]
if self._password:
temp = '*' * len(render_text)
render_text = temp
self._renderer.draw_text(self, render_text, self._cursor_y, selected=self._selected)
if self._selected:
self._renderer.draw_cursor(self._cursor_y, self._cursor_x)
else:
self._renderer.reset_cursor(self, fill=False)
self._renderer.unset_color_mode(self._color)
class FormImplementation(py_cui.ui.UIImplementation):
"""Main implementation class for the form widget/popup
Attriubutes
-----------
_form_fields : List[FormField]
The current fields in the form
_required_fields : List[str]
List for identifying required fields
_selected_form_index : int
Index of currently selected form
_on_submit_action : no-arg or lambda function
Function fired when submit is called
"""
def __init__(self, field_implementations, required_fields, logger):
"""Initializer for the FormImplemnentation class
"""
super().__init__(logger)
self._form_fields = field_implementations
self._required_fields = required_fields
self._selected_form_index = 0
self._on_submit_action = None
def get_selected_form_index(self):
"""Getter for selected form index
Returns
-------
selected_form_index : int
the index of currently selected field
"""
return self._selected_form_index
def set_selected_form_index(self, form_index):
"""Setter for selected form index
Parameters
----------
selected_form_index : int
the index of the new selected field
"""
self._selected_form_index = form_index
def set_on_submit_action(self, on_submit_action):
"""Setter for callback on submit
Parameters
----------
on_submit_action : no-arg or lambda function
Function fired when user 'submits' form
"""
self._on_submit_action = on_submit_action
def jump_to_next_field(self):
"""Function used to jump between form fields
"""
if self.get_selected_form_index() < (len(self._form_fields) - 1):
self.set_selected_form_index(self.get_selected_form_index() + 1)
else:
self.set_selected_form_index(0)
def is_submission_valid(self):
"""Function that checks if all fields are filled out correctly
Returns
-------
is_valid : bool
True of valid conditions are met, false otherwise
msg : str
Message explaining problem. None if valid
"""
for form_field in self._form_fields:
valid, err_msg = form_field.is_valid()
if not valid:
return False, err_msg
return True, None
def get(self):
"""Gets values entered into field as a dictionary
Returns
-------
field_entries : dict
A dictionary mapping field names to user inputs
"""
output = {}
for form_field in self._form_fields:
output[form_field.get_fieldname()] = form_field.get()
return output
class Form(py_cui.widgets.Widget, FormImplementation):
"""Main Widget class extending the FormImplementation. TODO
"""
pass
class InternalFormPopup(py_cui.popups.MessagePopup):
"""A helper class for abstracting a message popup tied to a parent popup
Attributes
----------
parent : FormPopup
The parent form popup that spawned the message popup
"""
def __init__(self, parent, *args):
"""Initializer for Internal form Popup
"""
super().__init__(*args)
self._parent = parent
def _handle_key_press(self, key_pressed):
"""Override of base class, close in parent instead of root
"""
if key_pressed in self._close_keys:
self._parent._internal_popup = None
class FormPopup(py_cui.popups.Popup, FormImplementation):
"""Main Popup extension class for forms.
Attributes
----------
num_fields : int
Number of fields added to form
form_fields : List[FormFieldElement]
individual form field ui element objects
internal_popup : InternalFormPopup
A popup spawned in the event of an invalid submission
"""
def __init__(self, root, fields, passwd_fields, required_fields, fields_init_text, title, color, renderer, logger):
self._num_fields = len(fields)
if self._num_fields != len(set(fields)):
raise DuplicateFormKeyError('PyCUI forms cannot have duplicate fields.')
py_cui.popups.Popup.__init__(self, root, title, '', color, renderer, logger)
self._form_fields = []
for i, field in enumerate(fields):
init_text = ''
if field in fields_init_text:
init_text = fields_init_text[field]
self._form_fields.append(FormFieldElement(self,
i,
field,
init_text,
(field in passwd_fields),
(field in required_fields),
renderer,
logger))
self._form_fields[0].set_selected(True)
FormImplementation.__init__(self, self._form_fields, required_fields, logger)
self._internal_popup = None
def get_num_fields(self):
"""Getter for number of fields
Returns
-------
num_fields : int
Number of fields in form
"""
return self._num_fields
def get_absolute_start_pos(self):
"""Override of base class, computes position based on root dimensions
Returns
-------
start_x, start_y : int
The coords of the upper-left corner of the popup
"""
root_height, root_width = self._root.get_absolute_size()
min_required_x = 80
if root_width < 80:
min_required_x = root_width - 6
min_required_y = 4 + (2 * self._pady) + 5 * self._num_fields
if root_height < min_required_y:
min_required_y = root_height
form_start_x = int(root_width / 2) - int(min_required_x / 2)
form_start_y = int(root_height / 2) - int(min_required_y / 2)
return form_start_x, form_start_y
def get_absolute_stop_pos(self):
"""Override of base class, computes position based on root dimensions
Returns
-------
stop_x, stop_y : int
The coords of the lower-right corner of the popup
"""
root_height, root_width = self._root.get_absolute_size()
min_required_x = 80
if root_width < 80:
min_required_x = root_width - 6
min_required_y = 4 + (2 * self._pady) + 5 * self._num_fields
if root_height < min_required_y:
min_required_y = root_height
form_stop_x = int(root_width / 2) + int(min_required_x / 2)
form_stop_y = int(root_height / 2) + int(min_required_y / 2)
return form_stop_x, form_stop_y
def update_height_width(self):
"""Override of base class function
Also updates all form field elements in the form
"""
super().update_height_width()
try:
for element in self._form_fields:
element.update_height_width()
except AttributeError:
pass
def _handle_key_press(self, key_pressed):
"""Override of base class. Here, we handle tabs, enters, and escapes
All other key presses are passed to the currently selected field element
Parameters
----------
key_pressed : int
Key code of pressed key
"""
if self._internal_popup is None:
if key_pressed == py_cui.keys.KEY_TAB:
self._form_fields[self.get_selected_form_index()].set_selected(False)
self.jump_to_next_field()
self._form_fields[self.get_selected_form_index()].set_selected(True)
elif key_pressed == py_cui.keys.KEY_ENTER:
valid, err_msg = self.is_submission_valid()
if valid:
self._root.close_popup()
self._on_submit_action(self.get())
else:
self._internal_popup = InternalFormPopup(self,
self._root,
err_msg,
'Required fields: {}'.format(str(self._required_fields)),
py_cui.YELLOW_ON_BLACK,
self._renderer,
self._logger)
elif key_pressed == py_cui.keys.KEY_ESCAPE:
self._root.close_popup()
else:
if self.get_selected_form_index() < len(self._form_fields):
self._form_fields[self.get_selected_form_index()]._handle_key_press(key_pressed)
else:
self._internal_popup._handle_key_press(key_pressed)
def _handle_mouse_press(self, x, y):
"""Override of base class function
Simply enters the appropriate field when mouse is pressed on it
Parameters
----------
x, y : int, int
Coordinates of the mouse press
"""
super()._handle_mouse_press(x, y)
for i, field in enumerate(self._form_fields):
if field._contains_position(x, y):
self._form_fields[self.get_selected_form_index()].set_selected(False)
self.set_selected_form_index(i)
self._form_fields[self.get_selected_form_index()].set_selected(True)
break
def _draw(self):
"""Override of base class.
Here, we only draw a border, and then the individual form elements
"""
self._renderer.set_color_mode(self._color)
self._renderer.set_color_rules([])
self._renderer.draw_border(self)
for i, form_field in enumerate(self._form_fields):
if i != self.get_selected_form_index():
form_field._draw()
self._form_fields[self.get_selected_form_index()]._draw()
if self._internal_popup is not None:
self._internal_popup._draw()
| 32.680374
| 122
| 0.599062
|
711d2b75dd24c4ecab70ea3286916dc43edffc1c
| 6,976
|
py
|
Python
|
examples/vyasarayani2011.py
|
humphreysb/opty
|
4eacf7cadf61a339be6cebfa3e32ef41d2563151
|
[
"BSD-2-Clause-FreeBSD"
] | 63
|
2015-03-07T19:38:10.000Z
|
2022-03-31T17:17:53.000Z
|
examples/vyasarayani2011.py
|
humphreysb/opty
|
4eacf7cadf61a339be6cebfa3e32ef41d2563151
|
[
"BSD-2-Clause-FreeBSD"
] | 52
|
2015-02-15T17:24:03.000Z
|
2021-06-18T16:43:45.000Z
|
examples/vyasarayani2011.py
|
humphreysb/opty
|
4eacf7cadf61a339be6cebfa3e32ef41d2563151
|
[
"BSD-2-Clause-FreeBSD"
] | 22
|
2015-05-25T21:28:16.000Z
|
2022-03-14T03:55:57.000Z
|
"""This example is taken from the following paper:
Vyasarayani, Chandrika P., Thomas Uchida, Ashwin Carvalho, and John McPhee.
"Parameter Identification in Dynamic Systems Using the Homotopy Optimization
Approach". Multibody System Dynamics 26, no. 4 (2011): 411-24.
In Section 3.1 there is a simple example of a single pendulum parameter
identification that has many local minima.
For the following differential equations that describe a single pendulum
acting under the influence of gravity, the goals is to identify the
parameter p given noisy measurements of the angle, y1.
-- -- -- --
| y1' | | y2 |
y' = f(y, t) = | | = | |
| y2' | | -p * sin(y1) |
-- -- -- --
To run:
$ python vyasarayani2011.py
Options can be viewed with:
$ python vyasarayani2011.py -h
"""
import numpy as np
import sympy as sym
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from opty.direct_collocation import Problem
from opty.utils import building_docs
def main(initial_guess, do_plot=False):
if building_docs():
do_plot = True
# Specify the symbolic equations of motion.
p, t = sym.symbols('p, t')
y1, y2 = [f(t) for f in sym.symbols('y1, y2', cls=sym.Function)]
y = sym.Matrix([y1, y2])
f = sym.Matrix([y2, -p * sym.sin(y1)])
eom = y.diff(t) - f
# Generate some data by integrating the equations of motion.
duration = 50.0
num_nodes = 5000
interval = duration / (num_nodes - 1)
time = np.linspace(0.0, duration, num=num_nodes)
p_val = 10.0
y0 = [np.pi / 6.0, 0.0]
def eval_f(y, t, p):
return np.array([y[1], -p * np.sin(y[0])])
y_meas = odeint(eval_f, y0, time, args=(p_val,))
y1_meas = y_meas[:, 0]
y2_meas = y_meas[:, 1]
# Add measurement noise.
y1_meas += np.random.normal(scale=0.05, size=y1_meas.shape)
y2_meas += np.random.normal(scale=0.1, size=y2_meas.shape)
# Setup the optimization problem to minimize the error in the simulated
# angle and the measured angle.
def obj(free):
"""Minimize the error in the angle, y1."""
return interval * np.sum((y1_meas - free[:num_nodes])**2)
def obj_grad(free):
grad = np.zeros_like(free)
grad[:num_nodes] = 2.0 * interval * (free[:num_nodes] - y1_meas)
return grad
# The midpoint integration method is preferable to the backward Euler
# method because no artificial damping is introduced.
prob = Problem(obj, obj_grad, eom, (y1, y2), num_nodes, interval,
integration_method='midpoint')
num_states = len(y)
if initial_guess == 'zero':
print('Using all zeros for the initial guess.')
# All zeros.
initial_guess = np.zeros(num_states * num_nodes + 1)
elif initial_guess == 'randompar':
print(('Using all zeros for the trajectories initial guess and a '
'random positive value for the parameter.'))
# Zeros for the state trajectories and a random positive value for
# the parameter.
initial_guess = np.hstack((np.zeros(num_states * num_nodes), 50.0 *
np.random.random(1)))
elif initial_guess == 'random':
print('Using random values for the initial guess.')
# Random values for all unknowns.
initial_guess = np.hstack((np.random.normal(scale=5.0,
size=num_states *
num_nodes), 50.0 *
np.random.random(1)))
elif initial_guess == 'sysid':
print(('Using noisy measurements for the trajectory initial guess and '
'a random positive value for the parameter.'))
# Give noisy measurements as the initial state guess and a random
# positive values as the parameter guess.
initial_guess = np.hstack((y1_meas, y2_meas, 100.0 *
np.random.random(1)))
elif initial_guess == 'known':
print('Using the known solution as the initial guess.')
# Known solution as initial guess.
initial_guess = np.hstack((y1_meas, y2_meas, 10.0))
# Find the optimal solution.
solution, info = prob.solve(initial_guess)
p_sol = solution[-1]
# Print the result.
known_msg = "Known value of p = {}".format(p_val)
guess_msg = "Initial guess for p = {}".format(initial_guess[-1])
identified_msg = "Identified value of p = {}".format(p_sol)
divider = '=' * max(len(known_msg), len(identified_msg))
print(divider)
print(known_msg)
print(guess_msg)
print(identified_msg)
print(divider)
# Simulate with the identified parameter.
y_sim = odeint(eval_f, y0, time, args=(p_sol,))
y1_sim = y_sim[:, 0]
y2_sim = y_sim[:, 1]
if do_plot:
# Plot results
fig_y1, axes_y1 = plt.subplots(3, 1)
legend = ['measured', 'initial guess',
'direct collocation solution', 'identified simulated']
axes_y1[0].plot(time, y1_meas, '.k',
time, initial_guess[:num_nodes], '.b',
time, solution[:num_nodes], '.r',
time, y1_sim, 'g')
axes_y1[0].set_xlabel('Time [s]')
axes_y1[0].set_ylabel('y1 [rad]')
axes_y1[0].legend(legend)
axes_y1[1].set_title('Initial Guess Constraint Violations')
axes_y1[1].plot(prob.con(initial_guess)[:num_nodes - 1])
axes_y1[2].set_title('Solution Constraint Violations')
axes_y1[2].plot(prob.con(solution)[:num_nodes - 1])
plt.tight_layout()
fig_y2, axes_y2 = plt.subplots(3, 1)
axes_y2[0].plot(time, y2_meas, '.k',
time, initial_guess[num_nodes:-1], '.b',
time, solution[num_nodes:-1], '.r',
time, y2_sim, 'g')
axes_y2[0].set_xlabel('Time [s]')
axes_y2[0].set_ylabel('y2 [rad]')
axes_y2[0].legend(legend)
axes_y2[1].set_title('Initial Guess Constraint Violations')
axes_y2[1].plot(prob.con(initial_guess)[num_nodes - 1:])
axes_y2[2].set_title('Solution Constraint Violations')
axes_y2[2].plot(prob.con(solution)[num_nodes - 1:])
plt.tight_layout()
plt.show()
if __name__ == "__main__":
import argparse
desc = "Run the pendulum parameter identification."
parser = argparse.ArgumentParser(description=desc)
msg = "The type of initial guess: sysid, random, known, randompar, zero."
parser.add_argument('-i', '--initialguess', type=str, help=msg,
default='sysid')
parser.add_argument('-p', '--plot', action="store_true",
help="Show result plots.")
args = parser.parse_args()
main(args.initialguess, do_plot=args.plot)
| 35.055276
| 79
| 0.598624
|
4c6a2ce79f709bc04aa6f021b017848a0d2a417a
| 5,776
|
py
|
Python
|
wrangling_scripts/figure1.py
|
DanielDaCosta/dashboardapp-daily-report
|
beb336d7993558c17fada8b1675b6982dc4c1f62
|
[
"MIT"
] | 7
|
2020-05-03T00:55:42.000Z
|
2021-04-15T01:07:38.000Z
|
wrangling_scripts/figure1.py
|
DanielDaCosta/dashboardapp-daily-report
|
beb336d7993558c17fada8b1675b6982dc4c1f62
|
[
"MIT"
] | null | null | null |
wrangling_scripts/figure1.py
|
DanielDaCosta/dashboardapp-daily-report
|
beb336d7993558c17fada8b1675b6982dc4c1f62
|
[
"MIT"
] | null | null | null |
import pandas as pd
from datetime import timedelta
import warnings
import plotly.graph_objs as go
warnings.filterwarnings("ignore")
def daily_average(path_csv):
"""Return the daily average of a specific region
Params:
path_csv (str): CSV path
Returns:
(dict): Dictionary containing the items:
- figures: array with figure object
- high_aglom: data of locations with higher agglomerations
- low_var: data of locations with lower variations
- high_var: data of locations with higher variations
"""
history_daily = pd.read_csv(f'data/{path_csv}.csv')
BAIRROS_FOR_STUDY = ['Haight-Ashbury', 'San Francisco', 'The Castro',
'Others', 'Union Square', 'Chinatown',
'Alamo Square', 'Mission District',
'SoMa', 'Fishermanโs wharf']
# Data Preprocessing
history_daily = history_daily.loc[history_daily['bairro']
.isin(BAIRROS_FOR_STUDY)]
history_daily['dia'] = pd.to_datetime(history_daily['dia'])
history_daily['day_of_week'] = history_daily['dia'].dt.day_name()
# Analysis starts from the last 7 days
last_record = max(history_daily['dia'])
start_time = last_record - timedelta(days=7)
start_time = start_time.strftime('%Y-%m-%d')
week_now = history_daily.loc[history_daily['dia'] >= start_time]
week_now['Dia'] = week_now['dia'].apply(lambda x: str(x.strftime('%d/%m')))
# Legend
week_now['proporcao_relacao_media_dia_da_semana_legend'] = \
week_now['proporcao_media_dia_semana'].apply(lambda x: str(round(x))
+ '%')
week_now['day_of_week_initial'] = \
week_now.day_of_week.apply(lambda x: ' (' + str(x)[0].upper() + ')')
week_now['day_of_week_legend'] = week_now['Dia'] \
+ week_now['day_of_week_initial']
# Generating Graph 1
bairro_graph = 'San Francisco'
week_graph = week_now.loc[week_now['bairro'] == bairro_graph][:-1]
week_graph.rename(columns={'pessoas_contadas': 'Pessoas Contadas',
'media_pessoas_contadas':
'Mรฉdia do Dia da Semana'}, inplace=True)
figure_1 = go.Figure(
data=[
go.Bar(
name="Counted People",
x=week_graph['day_of_week_legend'],
y=week_graph['Pessoas Contadas'],
text=week_graph[('proporcao_relacao_'
'media_dia_da_semana_legend')],
textposition='outside',
offsetgroup=0
),
go.Bar(
name="Average Day of Week",
x=week_graph['day_of_week_legend'],
y=week_graph['Mรฉdia do Dia da Semana'],
offsetgroup=1
)
],
layout=go.Layout(
title=(f'{bairro_graph}:'
'Average Number of People Per Day'),
title_x=0.5,
yaxis_title="Population",
plot_bgcolor='rgba(0,0,0,0)',
# width=800,
)
)
figure_1.update_yaxes(showgrid=True, gridwidth=1, gridcolor='black')
# General Analysis: Neighborhoods with the greatest agglomerations
# Order by number of people in the last day
columns = ['bairro', 'pessoas_contadas']
last_day = week_now.dia.unique()[-2] # -2 -> yesterday
high_aglom = week_now.loc[(week_now['dia'] == last_day)
& (~week_now.bairro.isin(['Rio_de_Janeiro',
'sem_bairro'])),
columns].sort_values(by='pessoas_contadas',
ascending=False)[:3]
high_aglom['pessoas_contadas'] = high_aglom.pessoas_contadas.apply(lambda x: round(x))
high_aglom = high_aglom.to_dict('list')
# General Analysis: Neighborhoods with the greatest variations
# Order by querda_proporcional_dia_semana in the last day, in
# ascending order
columns = ['bairro', 'queda_proporcional_dia_semana']
last_day = week_now.dia.unique()[-2]
low_variations = week_now\
.loc[(week_now['dia'] == last_day)
& (~week_now.bairro.isin(['Rio_de_Janeiro', 'sem_bairro'])),
columns].sort_values(by='queda_proporcional_dia_semana')[:3]
low_variations['queda_proporcional_dia_semana'] = low_variations\
.queda_proporcional_dia_semana.apply(lambda x: round(x))
low_variations = low_variations.to_dict('list')
# General Analysis: Neighborhoods with the smallest variations
# Order by querda_proporcional_dia_semana in the last day,
# in descrescing order
columns = ['bairro', 'queda_proporcional_dia_semana']
last_day = week_now.dia.unique()[-2]
high_variations = week_now\
.loc[(week_now['dia'] == last_day)
& (~week_now.bairro.isin(['San Francisco', 'Others'])),
columns].sort_values(by='queda_proporcional_dia_semana',
ascending=False)[:3]
high_variations['queda_proporcional_dia_semana'] = high_variations\
.queda_proporcional_dia_semana.apply(lambda x: round(x))
high_variations = high_variations.to_dict('list')
# Results
results = {'figure': dict(data=figure_1.data, layout=figure_1.layout),
'high_aglom': high_aglom,
'low_variations': low_variations,
'high_variations': high_variations,
'date_string': last_day.strftime('%Y-%m-%d')}
return results
# if __name__ == '__main__':
# path_csv = '20200416_2'
# figures = []
# dict_results = daily_average(path_csv)
| 41.553957
| 90
| 0.596607
|
b26c8f4cfd5bfec6bf681941f0672fc58010b70c
| 20,403
|
py
|
Python
|
flask_user/user_manager.py
|
Jonazon/Flask-User
|
6f74fd0dd52858b18af95f70e2ef8a888451fe6c
|
[
"MIT"
] | null | null | null |
flask_user/user_manager.py
|
Jonazon/Flask-User
|
6f74fd0dd52858b18af95f70e2ef8a888451fe6c
|
[
"MIT"
] | null | null | null |
flask_user/user_manager.py
|
Jonazon/Flask-User
|
6f74fd0dd52858b18af95f70e2ef8a888451fe6c
|
[
"MIT"
] | null | null | null |
"""This module implements the main UserManager class for Flask-User.
"""
# Author: Ling Thio <ling.thio@gmail.com>
# Copyright (c) 2013 Ling Thio'
import datetime
from flask import abort, Blueprint, current_app, Flask, session
from flask_login import LoginManager
from wtforms import ValidationError
from . import ConfigError
from . import forms
from .db_manager import DBManager
from .email_manager import EmailManager
from .password_manager import PasswordManager
from .token_manager import TokenManager
from .translation_utils import lazy_gettext as _ # map _() to lazy_gettext()
from .user_manager__settings import UserManager__Settings
from .user_manager__utils import UserManager__Utils
from .user_manager__views import UserManager__Views
# The UserManager is implemented across several source code files.
# Mixins are used to aggregate all member functions into the one UserManager class for ease of customization.
class UserManager(UserManager__Settings, UserManager__Utils, UserManager__Views):
""" Customizable User Authentication and Management.
"""
def __init__(self, app, db, UserClass, **kwargs):
"""
Args:
app(Flask): The Flask application instance.
db: An Object-Database Mapper instance such as SQLAlchemy or MongoEngine.
UserClass: The User class (*not* an instance!).
Keyword Args:
UserEmailClass: The optional UserEmail class (*not* an instance!).
Required for the 'multiple emails per user' feature.
UserInvitationClass: The optional UserInvitation class (*not* an instance!).
Required for the 'register by invitation' feature.
Example:
``user_manager = UserManager(app, db, User, UserEmailClass=UserEmail)``
.. This hack shows a header above the _next_ section
.. code-block:: none
Customizable UserManager methods
"""
#see http://flask.pocoo.org/docs/0.12/extensiondev/#the-extension-code """
self.app = app
if app:
self.init_app(app, db, UserClass, **kwargs)
def init_app(
self, app, db, UserClass,
UserInvitationClass=None,
UserEmailClass=None,
RoleClass=None, # Only used for testing
):
# See http://flask.pocoo.org/docs/0.12/extensiondev/#the-extension-code
# Perform Class type checking
if not isinstance(app, Flask):
raise TypeError("flask_user.UserManager.init_app(): Parameter 'app' is an instance of class '%s' "
"instead of a subclass of class 'flask.Flask'."
% app.__class__.__name__)
# Bind Flask-User to app
app.user_manager = self
# Remember all data-models
# ------------------------
self.db = db
# self.db_manager.UserClass = UserClass
# self.db_manager.UserEmailClass = UserEmailClass
# self.UserInvitationClass = UserInvitationClass
# self.RoleClass=RoleClass
# Load app config settings
# ------------------------
# For each 'UserManager.USER_...' property: load settings from the app config.
for attrib_name in dir(self):
if attrib_name[0:5] == 'USER_':
default_value = getattr(UserManager, attrib_name)
setattr(self, attrib_name, app.config.get(attrib_name, default_value))
# If USER_EMAIL_SENDER_EMAIL is not set, try to construct it from
# MAIL_DEFAULT_SENDER or DEFAULT_MAIL_SENDER
if not self.USER_EMAIL_SENDER_EMAIL:
default_sender = app.config.get('DEFAULT_MAIL_SENDER', None)
default_sender = app.config.get('MAIL_DEFAULT_SENDER', default_sender)
if default_sender:
# Accept two formats: '{name}<{email}>' or plain '{email}'
if default_sender[-1:] == '>':
start = default_sender.rfind('<')
if start >= 1:
self.USER_EMAIL_SENDER_EMAIL = default_sender[start + 1:-1]
if not self.USER_EMAIL_SENDER_NAME:
self.USER_EMAIL_SENDER_NAME = default_sender[0:start].strip(' "')
else:
self.USER_EMAIL_SENDER_EMAIL = default_sender
# If USER_EMAIL_SENDER_NAME is not set, default it to USER_APP_NAME
if not self.USER_EMAIL_SENDER_NAME:
self.USER_EMAIL_SENDER_NAME = self.USER_APP_NAME
# Configure Flask session behavior
# --------------------------------
if self.USER_USER_SESSION_EXPIRATION:
app.permanent_session_lifetime = datetime.timedelta(seconds=self.USER_USER_SESSION_EXPIRATION)
@app.before_request
def advance_session_timeout():
session.permanent = True # Timeout after app.permanent_session_lifetime period
session.modified = True # Advance session timeout each time a user visits a page
# Configure Flask-Login
# --------------------
# Setup default LoginManager using Flask-Login
self.login_manager = LoginManager(app)
self.login_manager.login_view = 'user.login'
# Flask-Login calls this function to retrieve a User record by token.
@self.login_manager.user_loader
def load_user_by_user_token(user_token):
user = self.db_manager.UserClass.get_user_by_token(user_token)
return user
# Configure Flask-BabelEx
# -----------------------
self.babel = app.extensions.get('babel', None)
from .translation_utils import init_translations
init_translations(self.babel)
# Configure Jinja2
# ----------------
# If the application has not initialized BabelEx,
# we must provide a NULL translation to Jinja2
if not hasattr(app.jinja_env, 'install_gettext_callables'):
app.jinja_env.add_extension('jinja2.ext.i18n')
app.jinja_env.install_null_translations()
# Define a context processor to provide custom variable and functions available to Jinja2 templates
def flask_user_context_processor():
# In Flask-Login 0.2 ``is_authenticated`` and ``is_active`` were implemented as functions,
# while in 0.3+ they are implemented as properties.
def call_or_get(function_or_property):
return function_or_property() if callable(function_or_property) else function_or_property
return dict(
user_manager=current_app.user_manager,
call_or_get=call_or_get,
)
# Register context processor with Jinja2
app.context_processor(flask_user_context_processor)
# Create a dummy Blueprint to add the app/templates/flask_user dir to the template search path
blueprint = Blueprint('flask_user', __name__, template_folder='templates')
app.register_blueprint(blueprint)
# Set default form classes
# ------------------------
self.AddEmailFormClass = forms.AddEmailForm
self.ChangePasswordFormClass = forms.ChangePasswordForm
self.ChangeUsernameFormClass = forms.ChangeUsernameForm
self.EditUserProfileFormClass = forms.EditUserProfileForm
self.ForgotPasswordFormClass = forms.ForgotPasswordForm
self.InviteUserFormClass = forms.InviteUserForm
self.LoginFormClass = forms.LoginForm
self.RegisterFormClass = forms.RegisterForm
self.ResendEmailConfirmationFormClass = forms.ResendEmailConfirmationForm
self.ResetPasswordFormClass = forms.ResetPasswordForm
# Set default managers
# --------------------
# Setup DBManager
self.db_manager = DBManager(app, db, UserClass, UserEmailClass, UserInvitationClass, RoleClass)
# Setup PasswordManager
self.password_manager = PasswordManager(app)
# Set default EmailAdapter
if self.USER_ENABLE_EMAIL:
from .email_adapters.smtp_email_adapter import SMTPEmailAdapter
self.email_adapter = SMTPEmailAdapter(app)
# Setup EmailManager
if self.USER_ENABLE_EMAIL:
self.email_manager = EmailManager(app)
# Setup TokenManager
self.token_manager = TokenManager(app)
# Allow developers to customize UserManager
self.customize(app)
# Make sure the settings are valid -- raise ConfigError if not
self._check_settings(app)
# Configure a list of URLs to route to their corresponding view method.
self._add_url_routes(app)
def customize(self, app):
""" Override this method to customize properties.
Example::
# Customize Flask-User
class CustomUserManager(UserManager):
def customize(self, app):
# Add custom managers and email mailers here
self.email_manager = CustomEmailManager(app)
self.password_manager = CustomPasswordManager(app)
self.token_manager = CustomTokenManager(app)
self.email_adapter = CustomEmailAdapter(app)
# Setup Flask-User
user_manager = CustomUserManager(app, db, User)
"""
def password_validator(self, form, field):
"""Ensure that passwords have at least 6 characters with one lowercase letter, one uppercase letter and one number.
Override this method to customize the password validator.
"""
# Convert string to list of characters
password = list(field.data)
password_length = len(password)
# Count lowercase, uppercase and numbers
lowers = uppers = digits = 0
for ch in password:
if ch.islower(): lowers += 1
if ch.isupper(): uppers += 1
if ch.isdigit(): digits += 1
# Password must have one lowercase letter, one uppercase letter and one digit
is_valid = password_length >= 6 and lowers and uppers and digits
if not is_valid:
raise ValidationError(
_('Password must have at least 6 characters with one lowercase letter, one uppercase letter and one number'))
# If you prefer using Regex:
# from re import compile
# PASSWORD_REGEX = compile(r'\A(?=\S*?\d)(?=\S*?[A-Z])(?=\S*?[a-z])\S{6,}\Z')
# def password_is_valid(password):
# return PASSWORD_REGEX.match(password) is not None
def username_validator(self, form, field):
"""Ensure that Usernames contains at least 3 alphanumeric characters.
Override this method to customize the username validator.
"""
username = field.data
if len(username) < 3:
raise ValidationError(
_('Username must be at least 3 characters long'))
valid_chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-._'
chars = list(username)
for char in chars:
if char not in valid_chars:
raise ValidationError(
_("Username may only contain letters, numbers, '-', '.' and '_'"))
# If you prefer using Regex:
# from re import compile
# USERNAME_REGEX = compile(r'\A[\w\-\.]{3,}\Z')
# def username_is_valid(username):
# return USERNAME_REGEX.match(username) is not None
# ***** Private methods *****
def _check_settings(self, app):
"""Verify required settings. Produce a helpful error messages for incorrect settings."""
# Check for invalid settings
# --------------------------
# Check self.UserInvitationClass and USER_ENABLE_INVITE_USER
if self.USER_ENABLE_INVITE_USER and not self.db_manager.UserInvitationClass:
raise ConfigError(
'UserInvitationClass is missing while USER_ENABLE_INVITE_USER is True.' \
' Specify UserInvitationClass with UserManager(app, db, User, UserInvitationClass=...' \
' or set USER_ENABLE_INVITE_USER=False.')
# Check for deprecated settings
# -----------------------------
# Check for deprecated USER_ENABLE_CONFIRM_EMAIL
setting = app.config.get('USER_ENABLE_LOGIN_WITHOUT_CONFIRM_EMAIL', None)
if setting is not None:
print(
'Deprecation warning: USER_ENABLE_LOGIN_WITHOUT_CONFIRM_EMAIL'\
' will be deprecated.' \
' It has been replaced by USER_ALLOW_LOGIN_WITHOUT_CONFIRMED_EMAIL.'\
' Please change this as soon as possible.')
self.USER_ALLOW_LOGIN_WITHOUT_CONFIRMED_EMAIL = setting
# Check for deprecated USER_ENABLE_RETYPE_PASSWORD
setting = app.config.get('USER_ENABLE_RETYPE_PASSWORD', None)
if setting is not None:
print(
'Deprecation warning: USER_ENABLE_RETYPE_PASSWORD'\
' will be deprecated.' \
' It has been replaced with USER_REQUIRE_RETYPE_PASSWORD.'\
' Please change this as soon as possible.')
self.USER_REQUIRE_RETYPE_PASSWORD = setting
# Check for deprecated USER_SHOW_USERNAME_EMAIL_DOES_NOT_EXIST
setting = app.config.get('USER_SHOW_USERNAME_EMAIL_DOES_NOT_EXIST', None)
if setting is not None:
print(
'Deprecation warning: USER_SHOW_USERNAME_EMAIL_DOES_NOT_EXIST' \
' will be deprecated.' \
' It has been replaced with USER_SHOW_USERNAME_DOES_NOT_EXIST'
' and USER_SHOW_EMAIL_DOES_NOT_EXIST.'
' Please change this as soon as possible.')
self.USER_SHOW_USERNAME_DOES_NOT_EXIST = setting
self.USER_SHOW_EMAIL_DOES_NOT_EXIST = setting
# Check for deprecated USER_PASSWORD_HASH
setting = app.config.get('USER_PASSWORD_HASH', None)
if setting is not None:
print(
"Deprecation warning: USER_PASSWORD_HASH=<string>"\
" will be deprecated."\
" It has been replaced with USER_PASSLIB_CRYPTCONTEXT_SCHEMES=<list>."
" Please change USER_PASSWORD_HASH='something' to"\
" USER_PASSLIB_CRYPTCONTEXT_SCHEMES=['something'] as soon as possible.")
self.USER_PASSLIB_CRYPTCONTEXT_SCHEMES = [setting]
# Check that USER_EMAIL_SENDER_EMAIL is set when USER_ENABLE_EMAIL is True
if not self.USER_EMAIL_SENDER_EMAIL and self.USER_ENABLE_EMAIL:
raise ConfigError(
'USER_EMAIL_SENDER_EMAIL is missing while USER_ENABLE_EMAIL is True.'\
' specify USER_EMAIL_SENDER_EMAIL (and USER_EMAIL_SENDER_NAME) or set USER_ENABLE_EMAIL to False.')
# Disable settings that rely on a feature setting that's not enabled
# ------------------------------------------------------------------
# USER_ENABLE_REGISTER=True must have USER_ENABLE_USERNAME=True or USER_ENABLE_EMAIL=True.
if not self.USER_ENABLE_USERNAME and not self.USER_ENABLE_EMAIL:
self.USER_ENABLE_REGISTER = False
# Settings that depend on USER_ENABLE_EMAIL
if not self.USER_ENABLE_EMAIL:
self.USER_ENABLE_CONFIRM_EMAIL = False
self.USER_ENABLE_MULTIPLE_EMAILS = False
self.USER_ENABLE_FORGOT_PASSWORD = False
self.USER_SEND_PASSWORD_CHANGED_EMAIL = False
self.USER_SEND_REGISTERED_EMAIL = False
self.USER_SEND_USERNAME_CHANGED_EMAIL = False
self.USER_REQUIRE_INVITATION = False
# Settings that depend on USER_ENABLE_USERNAME
if not self.USER_ENABLE_USERNAME:
self.USER_ENABLE_CHANGE_USERNAME = False
def _add_url_routes(self, app):
"""Configure a list of URLs to route to their corresponding view method.."""
# Because methods contain an extra ``self`` parameter, URL routes are mapped
# to stub functions, which simply call the corresponding method.
# For testing purposes, we map all available URLs to stubs, but the stubs
# contain config checks to return 404 when a feature is disabled.
# Define the stubs
# ----------------
# def auth0_callback_stub():
# if not self.USER_ENABLE_AUTH0: abort(404)
# return self.auth0_callback_view()
def change_password_stub():
if not self.USER_ENABLE_CHANGE_PASSWORD: abort(404)
return self.change_password_view()
def change_username_stub():
if not self.USER_ENABLE_CHANGE_USERNAME: abort(404)
return self.change_username_view()
def confirm_email_stub(token):
if not self.USER_ENABLE_CONFIRM_EMAIL: abort(404)
return self.confirm_email_view(token)
def edit_user_profile_stub():
return self.edit_user_profile_view()
def email_action_stub(id, action):
if not self.USER_ENABLE_MULTIPLE_EMAILS or not self.db_manager.UserEmailClass: abort(404)
return self.email_action_view(id, action)
def forgot_password_stub():
if not self.USER_ENABLE_FORGOT_PASSWORD: abort(404)
return self.forgot_password_view()
def manage_emails_stub():
if not self.USER_ENABLE_MULTIPLE_EMAILS or not self.db_manager.UserEmailClass: abort(404)
return self.manage_emails_view()
def invite_user_stub():
if not self.USER_ENABLE_INVITE_USER: abort(404)
return self.invite_user_view()
def login_stub():
return self.login_view()
def logout_stub():
return self.logout_view()
def register_stub():
if not self.USER_ENABLE_REGISTER: abort(404)
return self.register_view()
def resend_email_confirmation_stub():
if not self.USER_ENABLE_CONFIRM_EMAIL: abort(404)
return self.resend_email_confirmation_view()
def reset_password_stub(token):
if not self.USER_ENABLE_FORGOT_PASSWORD: abort(404)
return self.reset_password_view(token)
# def unconfirmed_email_stub():
# return self.unconfirmed_email_view()
def unauthorized_stub():
return self.unauthorized_view()
# Add the URL routes
# ------------------
# app.add_url_rule('/callbacks/auth0', 'user.auth0_callback', auth0_callback_stub)
app.add_url_rule(self.USER_CHANGE_PASSWORD_URL, 'user.change_password', change_password_stub,
methods=['GET', 'POST'])
app.add_url_rule(self.USER_CHANGE_USERNAME_URL, 'user.change_username', change_username_stub,
methods=['GET', 'POST'])
app.add_url_rule(self.USER_CONFIRM_EMAIL_URL, 'user.confirm_email', confirm_email_stub)
app.add_url_rule(self.USER_EDIT_USER_PROFILE_URL, 'user.edit_user_profile', edit_user_profile_stub,
methods=['GET', 'POST'])
app.add_url_rule(self.USER_EMAIL_ACTION_URL, 'user.email_action', email_action_stub)
app.add_url_rule(self.USER_FORGOT_PASSWORD_URL, 'user.forgot_password', forgot_password_stub,
methods=['GET', 'POST'])
app.add_url_rule(self.USER_INVITE_USER_URL, 'user.invite_user', invite_user_stub,
methods=['GET', 'POST'])
app.add_url_rule(self.USER_LOGIN_URL, 'user.login', login_stub,
methods=['GET', 'POST'])
app.add_url_rule(self.USER_LOGOUT_URL, 'user.logout', logout_stub,
methods=['GET', 'POST'])
app.add_url_rule(self.USER_MANAGE_EMAILS_URL, 'user.manage_emails', manage_emails_stub,
methods=['GET', 'POST'])
app.add_url_rule(self.USER_REGISTER_URL, 'user.register', register_stub,
methods=['GET', 'POST'])
app.add_url_rule(self.USER_RESEND_EMAIL_CONFIRMATION_URL, 'user.resend_email_confirmation',
resend_email_confirmation_stub,
methods=['GET', 'POST'])
app.add_url_rule(self.USER_RESET_PASSWORD_URL, 'user.reset_password', reset_password_stub,
methods=['GET', 'POST'])
| 43.226695
| 125
| 0.638681
|
2c00fcea97861e86dc948bdc282bc69c33bd95c3
| 632
|
py
|
Python
|
src/genGraph_MPL.py
|
QuangVuong85/Dijkstra.GUI.PyQt5
|
835ea0b6a059c78036d2ded63c56d7d3be5864bb
|
[
"bzip2-1.0.6"
] | 1
|
2020-06-16T11:23:58.000Z
|
2020-06-16T11:23:58.000Z
|
src/genGraph_MPL.py
|
QuangVuong85/Dijkstra.GUI.PyQt5
|
835ea0b6a059c78036d2ded63c56d7d3be5864bb
|
[
"bzip2-1.0.6"
] | null | null | null |
src/genGraph_MPL.py
|
QuangVuong85/Dijkstra.GUI.PyQt5
|
835ea0b6a059c78036d2ded63c56d7d3be5864bb
|
[
"bzip2-1.0.6"
] | null | null | null |
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
G = nx.Graph() # Create empty graph
G.add_nodes_from(["A", "B", "C"]) # Add nodes
# Add edges
G.add_edge("A", "B", weight=5)
G.add_edge("B", "C", weight=7)
G.add_edge("C", "A", weight=2)
# Create drawing
pos = nx.spring_layout(G) # List of positions of nodes
weights = nx.get_edge_attributes(G, "weight") # List of weights
nx.draw_networkx(G, pos, with_labels=True)
nx.draw_networkx_edge_labels(G, pos, edge_labels=weights)
plt.title("Basic Graphs with Networkx")
plt.gcf().canvas.set_window_title("") # Hide window title
# Display Graph
plt.show()
| 25.28
| 63
| 0.712025
|
5dca669a19f6be67bb22f3024d4e8dc3e975cec3
| 959
|
py
|
Python
|
ch03-first-api/main.py
|
hedrickbt/talkpython-fastapi
|
633ee7b6ebfa78933b14fceed0c62884382363a1
|
[
"MIT"
] | null | null | null |
ch03-first-api/main.py
|
hedrickbt/talkpython-fastapi
|
633ee7b6ebfa78933b14fceed0c62884382363a1
|
[
"MIT"
] | null | null | null |
ch03-first-api/main.py
|
hedrickbt/talkpython-fastapi
|
633ee7b6ebfa78933b14fceed0c62884382363a1
|
[
"MIT"
] | null | null | null |
import json
from typing import Optional
import fastapi
import uvicorn
api = fastapi.FastAPI()
@api.get('/')
def index():
body = """
<html>
<body style='padding: 10px;'>
<h1>Welcome to the API</h1>
<div>
Try it: <a href='/api/calculate/?x=7&y=11'>/api/calculate/?x=7&y=11</a>
</div>
</body>
</html>
"""
return fastapi.responses.HTMLResponse(content=body)
@api.get('/api/calculate')
def calculate(x: int, y: int, z: Optional[int] = None):
if z == 0:
error = {
'error': 'ERROR: z cannot be 0.'
}
# return fastapi.Response(content=json.dumps(error), media_type='application/json', status_code=400)
return fastapi.responses.JSONResponse(content=error, status_code=400)
value = (x + y)
if z is not None:
value /= z
return {
'x': x,
'y': y,
'z': z,
'value': value,
}
uvicorn.run(api, port=8001, host='127.0.0.1')
| 20.847826
| 108
| 0.567258
|
87fc76c329f2e8847e137d56bf7cb0db55137664
| 1,028
|
py
|
Python
|
nodes/Object/GetObjectVisibility.py
|
atticus-lv/RenderNode
|
8a4797a2186b76fedebc5d634cff298e69089474
|
[
"Apache-2.0"
] | 17
|
2021-11-21T09:26:55.000Z
|
2022-03-09T06:56:01.000Z
|
nodes/Object/GetObjectVisibility.py
|
atticus-lv/RenderNode
|
8a4797a2186b76fedebc5d634cff298e69089474
|
[
"Apache-2.0"
] | 1
|
2021-12-05T13:02:48.000Z
|
2021-12-06T08:02:34.000Z
|
nodes/Object/GetObjectVisibility.py
|
atticus-lv/RenderNode
|
8a4797a2186b76fedebc5d634cff298e69089474
|
[
"Apache-2.0"
] | 4
|
2021-11-23T14:49:34.000Z
|
2021-12-30T15:04:58.000Z
|
import bpy
from bpy.props import *
from ...nodes.BASE.node_base import RenderNodeBase
class RenderNodeGetObjectVisibility(RenderNodeBase):
bl_idname = 'RenderNodeGetObjectVisibility'
bl_label = 'Get Object Visibility'
def init(self, context):
self.create_input('RenderNodeSocketObject', 'object', 'Object', show_text=False)
self.create_output('RenderNodeSocketBool', 'hide_viewport', 'Show In Viewports',
default_value=True) # invert bool
self.create_output('RenderNodeSocketBool', 'hide_render', 'Show In Render', default_value=True) # invert bool
def process(self, context, id, path):
ob = self.inputs['object'].get_value()
if ob:
self.outputs['hide_viewport'].set_value(not ob.hide_viewport)
self.outputs['hide_render'].set_value(not ob.hide_render)
def register():
bpy.utils.register_class(RenderNodeGetObjectVisibility)
def unregister():
bpy.utils.unregister_class(RenderNodeGetObjectVisibility)
| 33.16129
| 118
| 0.707198
|
77d901fe533ab9577189b18398e74e3ccc566d95
| 17,159
|
bzl
|
Python
|
tools/cpp/windows_cc_configure.bzl
|
keithkml/bazel
|
174d14fda916c89fbd8a3c9411588bbe87826a83
|
[
"Apache-2.0"
] | null | null | null |
tools/cpp/windows_cc_configure.bzl
|
keithkml/bazel
|
174d14fda916c89fbd8a3c9411588bbe87826a83
|
[
"Apache-2.0"
] | null | null | null |
tools/cpp/windows_cc_configure.bzl
|
keithkml/bazel
|
174d14fda916c89fbd8a3c9411588bbe87826a83
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=g-bad-file-header
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuring the C++ toolchain on Windows."""
load(
"@bazel_tools//tools/cpp:lib_cc_configure.bzl",
"auto_configure_fail",
"auto_configure_warning",
"escape_string",
"execute",
"get_env_var",
"is_cc_configure_debug",
"resolve_labels",
)
def _auto_configure_warning_maybe(repository_ctx, msg):
"""Output warning message when CC_CONFIGURE_DEBUG is enabled."""
if is_cc_configure_debug(repository_ctx):
auto_configure_warning(msg)
def _get_escaped_windows_msys_crosstool_content(repository_ctx, use_mingw = False):
"""Return the content of msys crosstool which is still the default CROSSTOOL on Windows."""
bazel_sh = get_env_var(repository_ctx, "BAZEL_SH").replace("\\", "/").lower()
tokens = bazel_sh.rsplit("/", 1)
prefix = "mingw64" if use_mingw else "usr"
msys_root = None
if tokens[0].endswith("/usr/bin"):
msys_root = tokens[0][:len(tokens[0]) - len("usr/bin")]
elif tokens[0].endswith("/bin"):
msys_root = tokens[0][:len(tokens[0]) - len("bin")]
if not msys_root:
auto_configure_fail(
"Could not determine MSYS/Cygwin root from BAZEL_SH (%s)" % bazel_sh,
)
escaped_msys_root = escape_string(msys_root)
return (((
' abi_version: "local"\n' +
' abi_libc_version: "local"\n' +
' builtin_sysroot: ""\n' +
' compiler: "msys-gcc"\n' +
' host_system_name: "local"\n' +
" needsPic: false\n" +
' target_libc: "msys"\n' +
' target_cpu: "x64_windows"\n' +
' target_system_name: "local"\n'
) if not use_mingw else "") +
' tool_path { name: "ar" path: "%s%s/bin/ar" }\n' % (escaped_msys_root, prefix) +
' tool_path { name: "compat-ld" path: "%s%s/bin/ld" }\n' % (escaped_msys_root, prefix) +
' tool_path { name: "cpp" path: "%s%s/bin/cpp" }\n' % (escaped_msys_root, prefix) +
' tool_path { name: "dwp" path: "%s%s/bin/dwp" }\n' % (escaped_msys_root, prefix) +
' tool_path { name: "gcc" path: "%s%s/bin/gcc" }\n' % (escaped_msys_root, prefix) +
' artifact_name_pattern { category_name: "executable" prefix: "" extension: ".exe"}\n' +
' cxx_flag: "-std=gnu++0x"\n' +
' linker_flag: "-lstdc++"\n' +
' cxx_builtin_include_directory: "%s%s/"\n' % (escaped_msys_root, prefix) +
' tool_path { name: "gcov" path: "%s%s/bin/gcov" }\n' % (escaped_msys_root, prefix) +
' tool_path { name: "ld" path: "%s%s/bin/ld" }\n' % (escaped_msys_root, prefix) +
' tool_path { name: "nm" path: "%s%s/bin/nm" }\n' % (escaped_msys_root, prefix) +
' tool_path { name: "objcopy" path: "%s%s/bin/objcopy" }\n' % (escaped_msys_root, prefix) +
' objcopy_embed_flag: "-I"\n' +
' objcopy_embed_flag: "binary"\n' +
' tool_path { name: "objdump" path: "%s%s/bin/objdump" }\n' % (escaped_msys_root, prefix) +
' tool_path { name: "strip" path: "%s%s/bin/strip" }' % (escaped_msys_root, prefix) +
' feature { name: "targets_windows" implies: "copy_dynamic_libraries_to_binary" enabled: true }' +
' feature { name: "copy_dynamic_libraries_to_binary" }')
def _get_system_root(repository_ctx):
"""Get System root path on Windows, default is C:\\\Windows. Doesn't %-escape the result."""
if "SYSTEMROOT" in repository_ctx.os.environ:
return escape_string(repository_ctx.os.environ["SYSTEMROOT"])
_auto_configure_warning_maybe(repository_ctx, "SYSTEMROOT is not set, using default SYSTEMROOT=C:\\Windows")
return "C:\\Windows"
def _add_system_root(repository_ctx, env):
"""Running VCVARSALL.BAT and VCVARSQUERYREGISTRY.BAT need %SYSTEMROOT%\\\\system32 in PATH."""
if "PATH" not in env:
env["PATH"] = ""
env["PATH"] = env["PATH"] + ";" + _get_system_root(repository_ctx) + "\\system32"
return env
def find_vc_path(repository_ctx):
"""Find Visual C++ build tools install path. Doesn't %-escape the result."""
# 1. Check if BAZEL_VC or BAZEL_VS is already set by user.
if "BAZEL_VC" in repository_ctx.os.environ:
return repository_ctx.os.environ["BAZEL_VC"]
if "BAZEL_VS" in repository_ctx.os.environ:
return repository_ctx.os.environ["BAZEL_VS"] + "\\VC\\"
_auto_configure_warning_maybe(repository_ctx, "'BAZEL_VC' is not set, " +
"start looking for the latest Visual C++ installed.")
# 2. Check if VS%VS_VERSION%COMNTOOLS is set, if true then try to find and use
# vcvarsqueryregistry.bat to detect VC++.
_auto_configure_warning_maybe(repository_ctx, "Looking for VS%VERSION%COMNTOOLS environment variables, " +
"eg. VS140COMNTOOLS")
for vscommontools_env in [
"VS140COMNTOOLS",
"VS120COMNTOOLS",
"VS110COMNTOOLS",
"VS100COMNTOOLS",
"VS90COMNTOOLS",
]:
if vscommontools_env not in repository_ctx.os.environ:
continue
vcvarsqueryregistry = repository_ctx.os.environ[vscommontools_env] + "\\vcvarsqueryregistry.bat"
if not repository_ctx.path(vcvarsqueryregistry).exists:
continue
repository_ctx.file(
"get_vc_dir.bat",
"@echo off\n" +
"call \"" + vcvarsqueryregistry + "\"\n" +
"echo %VCINSTALLDIR%",
True,
)
env = _add_system_root(repository_ctx, repository_ctx.os.environ)
vc_dir = execute(repository_ctx, ["./get_vc_dir.bat"], environment = env)
_auto_configure_warning_maybe(repository_ctx, "Visual C++ build tools found at %s" % vc_dir)
return vc_dir
# 3. User might clean up all environment variables, if so looking for Visual C++ through registry.
# Works for all VS versions, including Visual Studio 2017.
_auto_configure_warning_maybe(repository_ctx, "Looking for Visual C++ through registry")
reg_binary = _get_system_root(repository_ctx) + "\\system32\\reg.exe"
vc_dir = None
for key, suffix in (("VC7", ""), ("VS7", "\\VC")):
for version in ["15.0", "14.0", "12.0", "11.0", "10.0", "9.0", "8.0"]:
if vc_dir:
break
result = repository_ctx.execute([reg_binary, "query", "HKEY_LOCAL_MACHINE\\SOFTWARE\\Wow6432Node\\Microsoft\\VisualStudio\\SxS\\" + key, "/v", version])
_auto_configure_warning_maybe(repository_ctx, "registry query result for VC %s:\n\nSTDOUT(start)\n%s\nSTDOUT(end)\nSTDERR(start):\n%s\nSTDERR(end)\n" %
(version, result.stdout, result.stderr))
if not result.stderr:
for line in result.stdout.split("\n"):
line = line.strip()
if line.startswith(version) and line.find("REG_SZ") != -1:
vc_dir = line[line.find("REG_SZ") + len("REG_SZ"):].strip() + suffix
# 4. Check default directories for VC installation
_auto_configure_warning_maybe(repository_ctx, "Looking for default Visual C++ installation directory")
program_files_dir = get_env_var(repository_ctx, "PROGRAMFILES(X86)", default = "C:\\Program Files (x86)", enable_warning = True)
for path in [
"Microsoft Visual Studio\\2017\\BuildTools\\VC",
"Microsoft Visual Studio\\2017\\Community\\VC",
"Microsoft Visual Studio\\2017\\Professional\\VC",
"Microsoft Visual Studio\\2017\\Enterprise\\VC",
"Microsoft Visual Studio 14.0\\VC",
]:
path = program_files_dir + "\\" + path
if repository_ctx.path(path).exists:
vc_dir = path
break
if not vc_dir:
return None
_auto_configure_warning_maybe(repository_ctx, "Visual C++ build tools found at %s" % vc_dir)
return vc_dir
def _is_vs_2017(vc_path):
"""Check if the installed VS version is Visual Studio 2017."""
# In VS 2017, the location of VC is like:
# C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\
# In VS 2015 or older version, it is like:
# C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\
return vc_path.find("2017") != -1
def _find_vcvarsall_bat_script(repository_ctx, vc_path):
"""Find vcvarsall.bat script. Doesn't %-escape the result."""
if _is_vs_2017(vc_path):
vcvarsall = vc_path + "\\Auxiliary\\Build\\VCVARSALL.BAT"
else:
vcvarsall = vc_path + "\\VCVARSALL.BAT"
if not repository_ctx.path(vcvarsall).exists:
return None
return vcvarsall
def setup_vc_env_vars(repository_ctx, vc_path):
"""Get environment variables set by VCVARSALL.BAT. Doesn't %-escape the result!"""
vcvarsall = _find_vcvarsall_bat_script(repository_ctx, vc_path)
if not vcvarsall:
return None
repository_ctx.file(
"get_env.bat",
"@echo off\n" +
"call \"" + vcvarsall + "\" amd64 > NUL \n" +
"echo PATH=%PATH%,INCLUDE=%INCLUDE%,LIB=%LIB%,WINDOWSSDKDIR=%WINDOWSSDKDIR% \n",
True,
)
env = _add_system_root(
repository_ctx,
{"PATH": "", "INCLUDE": "", "LIB": "", "WINDOWSSDKDIR": ""},
)
envs = execute(repository_ctx, ["./get_env.bat"], environment = env).split(",")
env_map = {}
for env in envs:
key, value = env.split("=", 1)
env_map[key] = escape_string(value.replace("\\", "\\\\"))
return env_map
def find_msvc_tool(repository_ctx, vc_path, tool):
"""Find the exact path of a specific build tool in MSVC. Doesn't %-escape the result."""
tool_path = ""
if _is_vs_2017(vc_path):
# For VS 2017, the tools are under a directory like:
# C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\Tools\MSVC\14.10.24930\bin\HostX64\x64
dirs = repository_ctx.path(vc_path + "\\Tools\\MSVC").readdir()
if len(dirs) < 1:
return None
# Normally there should be only one child directory under %VC_PATH%\TOOLS\MSVC,
# but iterate every directory to be more robust.
for path in dirs:
tool_path = str(path) + "\\bin\\HostX64\\x64\\" + tool
if repository_ctx.path(tool_path).exists:
break
else:
# For VS 2015 and older version, the tools are under:
# C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\amd64
tool_path = vc_path + "\\bin\\amd64\\" + tool
if not repository_ctx.path(tool_path).exists:
return None
return tool_path
def _find_missing_vc_tools(repository_ctx, vc_path):
"""Check if any required tool is missing under given VC path."""
missing_tools = []
if not _find_vcvarsall_bat_script(repository_ctx, vc_path):
missing_tools.append("VCVARSALL.BAT")
for tool in ["cl.exe", "link.exe", "lib.exe", "ml64.exe"]:
if not find_msvc_tool(repository_ctx, vc_path, tool):
missing_tools.append(tool)
return missing_tools
def _is_support_debug_fastlink(repository_ctx, vc_path):
"""Run MSVC linker alone to see if it supports /DEBUG:FASTLINK."""
linker = find_msvc_tool(repository_ctx, vc_path, "link.exe")
result = execute(repository_ctx, [linker], expect_failure = True)
return result.find("/DEBUG[:{FASTLINK|FULL|NONE}]") != -1
def configure_windows_toolchain(repository_ctx):
"""Configure C++ toolchain on Windows."""
paths = resolve_labels(repository_ctx, [
"@bazel_tools//tools/cpp:BUILD.static.windows",
"@bazel_tools//tools/cpp:CROSSTOOL",
"@bazel_tools//tools/cpp:CROSSTOOL.tpl",
"@bazel_tools//tools/cpp:vc_installation_error.bat.tpl",
])
repository_ctx.symlink(paths["@bazel_tools//tools/cpp:BUILD.static.windows"], "BUILD")
vc_path = find_vc_path(repository_ctx)
missing_tools = None
if not vc_path:
repository_ctx.template(
"vc_installation_error.bat",
paths["@bazel_tools//tools/cpp:vc_installation_error.bat.tpl"],
{"%{vc_error_message}": ""},
)
else:
missing_tools = _find_missing_vc_tools(repository_ctx, vc_path)
if missing_tools:
message = "\r\n".join([
"echo. 1>&2",
"echo Visual C++ build tools seems to be installed at %s 1>&2" % vc_path,
"echo But Bazel can't find the following tools: 1>&2",
"echo %s 1>&2" % ", ".join(missing_tools),
"echo. 1>&2",
])
repository_ctx.template(
"vc_installation_error.bat",
paths["@bazel_tools//tools/cpp:vc_installation_error.bat.tpl"],
{"%{vc_error_message}": message},
)
if not vc_path or missing_tools:
repository_ctx.template(
"CROSSTOOL",
paths["@bazel_tools//tools/cpp:CROSSTOOL.tpl"],
{
"%{cpu}": "x64_windows",
"%{default_toolchain_name}": "msvc_x64",
"%{toolchain_name}": "msys_x64",
"%{msvc_env_tmp}": "",
"%{msvc_env_path}": "",
"%{msvc_env_include}": "",
"%{msvc_env_lib}": "",
"%{msvc_cl_path}": "vc_installation_error.bat",
"%{msvc_ml_path}": "vc_installation_error.bat",
"%{msvc_link_path}": "vc_installation_error.bat",
"%{msvc_lib_path}": "vc_installation_error.bat",
"%{dbg_mode_debug}": "/DEBUG",
"%{fastbuild_mode_debug}": "/DEBUG",
"%{compilation_mode_content}": "",
"%{content}": _get_escaped_windows_msys_crosstool_content(repository_ctx),
"%{msys_x64_mingw_content}": _get_escaped_windows_msys_crosstool_content(repository_ctx, use_mingw = True),
"%{opt_content}": "",
"%{dbg_content}": "",
"%{link_content}": "",
"%{cxx_builtin_include_directory}": "",
"%{coverage}": "",
},
)
return
env = setup_vc_env_vars(repository_ctx, vc_path)
escaped_paths = escape_string(env["PATH"])
escaped_include_paths = escape_string(env["INCLUDE"])
escaped_lib_paths = escape_string(env["LIB"])
escaped_tmp_dir = escape_string(
get_env_var(repository_ctx, "TMP", "C:\\Windows\\Temp").replace("\\", "\\\\"),
)
msvc_cl_path = find_msvc_tool(repository_ctx, vc_path, "cl.exe").replace("\\", "/")
msvc_ml_path = find_msvc_tool(repository_ctx, vc_path, "ml64.exe").replace("\\", "/")
msvc_link_path = find_msvc_tool(repository_ctx, vc_path, "link.exe").replace("\\", "/")
msvc_lib_path = find_msvc_tool(repository_ctx, vc_path, "lib.exe").replace("\\", "/")
escaped_cxx_include_directories = []
for path in escaped_include_paths.split(";"):
if path:
escaped_cxx_include_directories.append("cxx_builtin_include_directory: \"%s\"" % path)
support_debug_fastlink = _is_support_debug_fastlink(repository_ctx, vc_path)
repository_ctx.template(
"CROSSTOOL",
paths["@bazel_tools//tools/cpp:CROSSTOOL.tpl"],
{
"%{cpu}": "x64_windows",
"%{default_toolchain_name}": "msvc_x64",
"%{toolchain_name}": "msys_x64",
"%{msvc_env_tmp}": escaped_tmp_dir,
"%{msvc_env_path}": escaped_paths,
"%{msvc_env_include}": escaped_include_paths,
"%{msvc_env_lib}": escaped_lib_paths,
"%{msvc_cl_path}": msvc_cl_path,
"%{msvc_ml_path}": msvc_ml_path,
"%{msvc_link_path}": msvc_link_path,
"%{msvc_lib_path}": msvc_lib_path,
"%{dbg_mode_debug}": "/DEBUG:FULL" if support_debug_fastlink else "/DEBUG",
"%{fastbuild_mode_debug}": "/DEBUG:FASTLINK" if support_debug_fastlink else "/DEBUG",
"%{content}": _get_escaped_windows_msys_crosstool_content(repository_ctx),
"%{msys_x64_mingw_content}": _get_escaped_windows_msys_crosstool_content(repository_ctx, use_mingw = True),
"%{opt_content}": "",
"%{dbg_content}": "",
"%{link_content}": "",
"%{cxx_builtin_include_directory}": "\n".join(escaped_cxx_include_directories),
"%{coverage}": "",
},
)
| 46.375676
| 164
| 0.612157
|
8ccc8b3d27312583a70a938e6d75532553fbcce0
| 39,129
|
py
|
Python
|
chronolapsegui.py
|
ZirconCode/chronolapse
|
2cdc68e9615ab5bb1dc7e7f0f20006f32b373cfe
|
[
"MIT"
] | 1
|
2021-02-10T17:35:43.000Z
|
2021-02-10T17:35:43.000Z
|
chronolapsegui.py
|
ZirconCode/chronolapse
|
2cdc68e9615ab5bb1dc7e7f0f20006f32b373cfe
|
[
"MIT"
] | null | null | null |
chronolapsegui.py
|
ZirconCode/chronolapse
|
2cdc68e9615ab5bb1dc7e7f0f20006f32b373cfe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: ISO-8859-15 -*-
#
# generated by wxGlade 0.6.8 (standalone edition) on Sun Oct 02 12:29:06 2016
#
import wx
# begin wxGlade: dependencies
import gettext
from gettext import gettext as _
# end wxGlade
# begin wxGlade: extracode
class ProgressPanel(wx.Panel):
def __init__(self, *args, **kwds):
wx.Panel.__init__(self, *args, **kwds)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.progress = 0
def setProgress(self, progress):
self.progress = progress
dc = wx.WindowDC(self)
dc.SetPen(wx.Pen(wx.Colour(0,0,255,255)))
dc.SetBrush(wx.Brush(wx.Colour(0,0,255,220)))
# build rect
width,height = self.GetSizeTuple()
size = max(2, (width-10)*self.progress)
rect = wx.Rect(5,8, size ,5)
# draw rect
dc.Clear()
dc.DrawRoundedRectangleRect(rect, 2)
def OnPaint(self, evt):
# this doesnt appear to work at all...
width,height = self.GetSizeTuple()
# get drawing shit
dc = wx.PaintDC(self)
dc.SetPen(wx.Pen(wx.Colour(0,0,255,255)))
dc.SetBrush(wx.Brush(wx.Colour(0,0,255,220)))
# build rect
size = max(2, (width-10)*self.progress)
rect = wx.Rect(5,8, size ,5)
# draw rect
dc.Clear()
dc.BeginDrawing()
dc.DrawRoundedRectangleRect(rect, 2)
dc.EndDrawing()
# end wxGlade
class chronoFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: chronoFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
# Menu Bar
self.chronoframe_menubar = wx.MenuBar()
self.file = wx.Menu()
self.exitmenuitem = wx.MenuItem(self.file, wx.ID_EXIT, _("Exit Chronolapse"), "", wx.ITEM_NORMAL)
self.file.AppendItem(self.exitmenuitem)
self.chronoframe_menubar.Append(self.file, _("File"))
self.aboutmenu = wx.Menu()
self.aboutmenuitem = wx.MenuItem(self.aboutmenu, wx.ID_ANY, _("About"), _("About Chronolapse"), wx.ITEM_NORMAL)
self.aboutmenu.AppendItem(self.aboutmenuitem)
self.chronoframe_menubar.Append(self.aboutmenu, _("About"))
self.SetMenuBar(self.chronoframe_menubar)
# Menu Bar end
self.notebook_1 = wx.Notebook(self, wx.ID_ANY, style=0)
self.notebook_1_capturepane = wx.Panel(self.notebook_1, wx.ID_ANY)
self.label_3 = wx.StaticText(self.notebook_1_capturepane, wx.ID_ANY, _("Capture:"))
self.screenshotcheck = wx.CheckBox(self.notebook_1_capturepane, wx.ID_ANY, _("Screenshots"))
self.screenshotconfigurebutton = wx.Button(self.notebook_1_capturepane, wx.ID_ANY, _("Configure"))
self.webcamcheck = wx.CheckBox(self.notebook_1_capturepane, wx.ID_ANY, _("Camera"))
self.configurewebcambutton = wx.Button(self.notebook_1_capturepane, wx.ID_ANY, _("Configure"))
self.filename_format_timestamp = wx.RadioButton(self.notebook_1_capturepane, wx.ID_ANY, _("Timestamp Filenames"), style=wx.RB_GROUP)
self.filename_format_sequential = wx.RadioButton(self.notebook_1_capturepane, wx.ID_ANY, _("Sequential Filenames"))
self.label_2 = wx.StaticText(self.notebook_1_capturepane, wx.ID_ANY, _("Seconds Between Captures:"))
self.frequencytext = wx.TextCtrl(self.notebook_1_capturepane, wx.ID_ANY, _("60"))
self.ignoreidlecheck = wx.CheckBox(self.notebook_1_capturepane, wx.ID_ANY, _("Skip Capture if Idle"))
self.startbutton = wx.Button(self.notebook_1_capturepane, wx.ID_ANY, _("Start Capture"))
self.forcecapturebutton = wx.Button(self.notebook_1_capturepane, wx.ID_ANY, _("Force Capture"))
self.progresspanel = ProgressPanel(self.notebook_1_capturepane, wx.ID_ANY)
self.notebook_1_pippane = wx.Panel(self.notebook_1, wx.ID_ANY)
self.label_1 = wx.StaticText(self.notebook_1_pippane, wx.ID_ANY, _("Picture in Picture:"))
self.label_4 = wx.StaticText(self.notebook_1_pippane, wx.ID_ANY, _("Main Image Folder:"))
self.pipmainimagefoldertext = wx.TextCtrl(self.notebook_1_pippane, wx.ID_ANY, "")
self.pipmainimagefolderbrowse = wx.Button(self.notebook_1_pippane, wx.ID_ANY, _("..."))
self.label_12 = wx.StaticText(self.notebook_1_pippane, wx.ID_ANY, _("PIP Image Folder:"))
self.pippipimagefoldertext = wx.TextCtrl(self.notebook_1_pippane, wx.ID_ANY, "")
self.pippipimagefolderbrowse = wx.Button(self.notebook_1_pippane, wx.ID_ANY, _("..."))
self.label_13 = wx.StaticText(self.notebook_1_pippane, wx.ID_ANY, _("Output Folder:"))
self.pipoutputimagefoldertext = wx.TextCtrl(self.notebook_1_pippane, wx.ID_ANY, "")
self.pipoutputimagefolderbrowse = wx.Button(self.notebook_1_pippane, wx.ID_ANY, _("..."))
self.label_14 = wx.StaticText(self.notebook_1_pippane, wx.ID_ANY, _("PIP Size:"))
self.pipsizecombo = wx.ComboBox(self.notebook_1_pippane, wx.ID_ANY, choices=[_("Small"), _("Medium"), _("Large")], style=wx.CB_DROPDOWN)
self.label_15 = wx.StaticText(self.notebook_1_pippane, wx.ID_ANY, _("PIP Position:"))
self.pippositioncombo = wx.ComboBox(self.notebook_1_pippane, wx.ID_ANY, choices=[_("Top"), _("Top-Right"), _("Right"), _("Bottom-Right"), _("Bottom"), _("Bottom-Left"), _("Left"), _("Top-Left")], style=wx.CB_DROPDOWN)
self.pipignoreunmatchedcheck = wx.CheckBox(self.notebook_1_pippane, wx.ID_ANY, _("Ignore un-matched images"))
self.pipcreatebutton = wx.Button(self.notebook_1_pippane, wx.ID_ANY, _("Create PIP"))
self.notebook_1_videopane = wx.Panel(self.notebook_1, wx.ID_ANY)
self.VideoLabel = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Video:"))
self.label_22 = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Source Images:"))
self.videosourcetext = wx.TextCtrl(self.notebook_1_videopane, wx.ID_ANY, "")
self.videosourcebrowse = wx.Button(self.notebook_1_videopane, wx.ID_ANY, _("..."))
self.label_23 = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Destination Folder:"))
self.videodestinationtext = wx.TextCtrl(self.notebook_1_videopane, wx.ID_ANY, "")
self.videodestinationbrowse = wx.Button(self.notebook_1_videopane, wx.ID_ANY, _("..."))
self.label_26 = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("MEncoder Path:"))
self.mencoderpathtext = wx.TextCtrl(self.notebook_1_videopane, wx.ID_ANY, "")
self.mencoderpathbrowse = wx.Button(self.notebook_1_videopane, wx.ID_ANY, _("..."))
self.label_25 = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Video Codec:"))
self.videocodeccombo = wx.ComboBox(self.notebook_1_videopane, wx.ID_ANY, choices=[_("mpeg4"), _("mpeg2video"), _("wmv1"), _("wmv2")], style=wx.CB_DROPDOWN)
self.randomname = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Frame Rate:"))
self.videoframeratetext = wx.TextCtrl(self.notebook_1_videopane, wx.ID_ANY, _("25"))
self.movielengthlabel = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Estimated Movie Length: 0 m 0 s"))
self.videocreatebutton = wx.Button(self.notebook_1_videopane, wx.ID_ANY, _("Encode Video"))
self.static_line_1 = wx.StaticLine(self.notebook_1_videopane, wx.ID_ANY)
self.AudioLabel = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Audio:"))
self.label_22_copy = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Video Source:"))
self.audiosourcevideotext = wx.TextCtrl(self.notebook_1_videopane, wx.ID_ANY, "")
self.audiosourcevideobrowse = wx.Button(self.notebook_1_videopane, wx.ID_ANY, _("..."))
self.label_23_copy = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Audio Source:"))
self.audiosourcetext = wx.TextCtrl(self.notebook_1_videopane, wx.ID_ANY, "")
self.audiosourcebrowse = wx.Button(self.notebook_1_videopane, wx.ID_ANY, _("..."))
self.label_26_copy = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Output Folder:"))
self.audiooutputfoldertext = wx.TextCtrl(self.notebook_1_videopane, wx.ID_ANY, "")
self.audiooutputfolderbrowse = wx.Button(self.notebook_1_videopane, wx.ID_ANY, _("..."))
self.createaudiobutton = wx.Button(self.notebook_1_videopane, wx.ID_ANY, _("Add Audio"))
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_MENU, self.exitMenuClicked, self.exitmenuitem)
self.Bind(wx.EVT_MENU, self.aboutMenuClicked, self.aboutmenuitem)
self.Bind(wx.EVT_BUTTON, self.screenshotConfigurePressed, self.screenshotconfigurebutton)
self.Bind(wx.EVT_BUTTON, self.webcamConfigurePressed, self.configurewebcambutton)
self.Bind(wx.EVT_BUTTON, self.startCapturePressed, self.startbutton)
self.Bind(wx.EVT_BUTTON, self.forceCapturePressed, self.forcecapturebutton)
self.Bind(wx.EVT_BUTTON, self.pipMainImageBrowsePressed, self.pipmainimagefolderbrowse)
self.Bind(wx.EVT_BUTTON, self.pipPipImageBrowsePressed, self.pippipimagefolderbrowse)
self.Bind(wx.EVT_BUTTON, self.pipOutputBrowsePressed, self.pipoutputimagefolderbrowse)
self.Bind(wx.EVT_BUTTON, self.createPipPressed, self.pipcreatebutton)
self.Bind(wx.EVT_BUTTON, self.videoSourceBrowsePressed, self.videosourcebrowse)
self.Bind(wx.EVT_BUTTON, self.videoDestinationBrowsePressed, self.videodestinationbrowse)
self.Bind(wx.EVT_BUTTON, self.mencoderPathBrowsePressed, self.mencoderpathbrowse)
self.Bind(wx.EVT_TEXT, self.framerateTextChanged, self.videoframeratetext)
self.Bind(wx.EVT_BUTTON, self.createVideoPressed, self.videocreatebutton)
self.Bind(wx.EVT_BUTTON, self.audioSourceVideoBrowsePressed, self.audiosourcevideobrowse)
self.Bind(wx.EVT_BUTTON, self.audioSourceBrowsePressed, self.audiosourcebrowse)
self.Bind(wx.EVT_BUTTON, self.audioOutputFolderBrowsePressed, self.audiooutputfolderbrowse)
self.Bind(wx.EVT_BUTTON, self.createAudioPressed, self.createaudiobutton)
# end wxGlade
def __set_properties(self):
# begin wxGlade: chronoFrame.__set_properties
self.SetTitle(_("ChronoLapse by Keeyai"))
self.SetSize((511, 438))
self.label_3.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "MS Shell Dlg 2"))
self.screenshotcheck.SetToolTipString(_("Check this to capture screenshots"))
self.screenshotcheck.SetValue(1)
self.screenshotconfigurebutton.SetToolTipString(_("Click to configure screenshot captures"))
self.webcamcheck.SetToolTipString(_("Check to enable webcam captures"))
self.webcamcheck.SetValue(1)
self.configurewebcambutton.SetToolTipString(_("Click to configure camera captures"))
self.filename_format_timestamp.SetToolTipString(_("Saves screenshots and camera captures with the timestamp in the filename."))
self.filename_format_timestamp.SetValue(1)
self.filename_format_sequential.SetToolTipString(_("Saves screenshots and camera captures as sequential numbers. Required by some external encoding libraries."))
self.frequencytext.SetToolTipString(_("The number of seconds in between captures. Set to 0 for no automatic capturing."))
self.ignoreidlecheck.SetToolTipString(_("Check this to skip capturing if no recent activity detected"))
self.startbutton.SetToolTipString(_("Click to start/stop capturing"))
self.forcecapturebutton.SetToolTipString(_("Click to force CL to capture right now. Use for important frames or for creating stop motions."))
self.label_1.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "MS Shell Dlg 2"))
self.pipmainimagefoldertext.SetMinSize((200, -1))
self.pipmainimagefolderbrowse.SetMinSize((25, -1))
self.pipmainimagefolderbrowse.SetToolTipString(_("Click to Browse"))
self.pippipimagefoldertext.SetMinSize((200, -1))
self.pippipimagefolderbrowse.SetMinSize((25, -1))
self.pippipimagefolderbrowse.SetToolTipString(_("Click to Browse"))
self.pipoutputimagefoldertext.SetMinSize((25, -1))
self.pipoutputimagefolderbrowse.SetMinSize((25, -1))
self.pipoutputimagefolderbrowse.SetToolTipString(_("Click to Browse"))
self.pipsizecombo.SetToolTipString(_("Select the size of the smaller image"))
self.pipsizecombo.SetSelection(0)
self.pippositioncombo.SetToolTipString(_("Select the position of the smaller image"))
self.pippositioncombo.SetSelection(1)
self.pipignoreunmatchedcheck.SetToolTipString(_("Check to ignore image names that are in one folder but not the other"))
self.pipignoreunmatchedcheck.Hide()
self.pipignoreunmatchedcheck.SetValue(1)
self.pipcreatebutton.SetToolTipString(_("Create PIP"))
self.VideoLabel.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "MS Shell Dlg 2"))
self.videosourcetext.SetMinSize((200, -1))
self.videosourcebrowse.SetMinSize((25, -1))
self.videosourcebrowse.SetToolTipString(_("Click to Browse"))
self.videodestinationtext.SetMinSize((200, -1))
self.videodestinationbrowse.SetMinSize((25, -1))
self.videodestinationbrowse.SetToolTipString(_("Click to Browse"))
self.mencoderpathtext.SetMinSize((200, -1))
self.mencoderpathtext.SetToolTipString(_("Set this to the MEncoder executable"))
self.mencoderpathbrowse.SetMinSize((25, -1))
self.mencoderpathbrowse.SetToolTipString(_("Click to Browse"))
self.videocodeccombo.SetToolTipString(_("Select which codec to use when encoding your video"))
self.videocodeccombo.SetSelection(0)
self.videoframeratetext.SetMinSize((25, -1))
self.videoframeratetext.SetToolTipString(_("Set how many images per second you want to show in your movie"))
self.videocreatebutton.SetToolTipString(_("Create the Video"))
self.AudioLabel.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "MS Shell Dlg 2"))
self.audiosourcevideotext.SetMinSize((200, -1))
self.audiosourcevideobrowse.SetMinSize((25, -1))
self.audiosourcevideobrowse.SetToolTipString(_("Click to Browse"))
self.audiosourcetext.SetMinSize((200, -1))
self.audiosourcebrowse.SetMinSize((25, -1))
self.audiosourcebrowse.SetToolTipString(_("Click to Browse"))
self.audiooutputfoldertext.SetMinSize((200, -1))
self.audiooutputfoldertext.SetToolTipString(_("Set this to the folder where you want the finished video"))
self.audiooutputfolderbrowse.SetMinSize((25, -1))
self.audiooutputfolderbrowse.SetToolTipString(_("Click to Browse"))
# end wxGlade
def __do_layout(self):
# begin wxGlade: chronoFrame.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_18 = wx.FlexGridSizer(10, 1, 0, 0)
grid_sizer_18_copy_copy = wx.FlexGridSizer(5, 2, 0, 0)
grid_sizer_34 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_35 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_31 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_23 = wx.FlexGridSizer(1, 3, 0, 5)
grid_sizer_4 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_3 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_18_copy = wx.FlexGridSizer(3, 2, 0, 0)
grid_sizer_30 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_28 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_27 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_5 = wx.FlexGridSizer(5, 1, 0, 0)
grid_sizer_13 = wx.FlexGridSizer(3, 2, 0, 0)
grid_sizer_17 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_14 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_12 = wx.FlexGridSizer(5, 2, 0, 0)
grid_sizer_25 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_22 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_21 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_1 = wx.FlexGridSizer(4, 1, 0, 0)
grid_sizer_26 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_15 = wx.FlexGridSizer(4, 2, 0, 0)
grid_sizer_20 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_16 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_1.Add(self.label_3, 0, 0, 0)
grid_sizer_16.Add(self.screenshotcheck, 0, 0, 0)
grid_sizer_16.Add(self.screenshotconfigurebutton, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_16.AddGrowableCol(0)
grid_sizer_16.AddGrowableCol(1)
grid_sizer_15.Add(grid_sizer_16, 1, wx.EXPAND, 0)
grid_sizer_20.Add(self.webcamcheck, 0, 0, 0)
grid_sizer_20.Add(self.configurewebcambutton, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_20.AddGrowableCol(0)
grid_sizer_20.AddGrowableCol(1)
grid_sizer_15.Add(grid_sizer_20, 1, wx.EXPAND, 0)
grid_sizer_15.Add(self.filename_format_timestamp, 0, 0, 0)
grid_sizer_15.Add(self.filename_format_sequential, 0, 0, 0)
grid_sizer_15.Add(self.label_2, 0, 0, 0)
grid_sizer_15.Add(self.frequencytext, 0, 0, 0)
grid_sizer_15.Add(self.ignoreidlecheck, 0, 0, 0)
grid_sizer_15.Add((20, 20), 0, 0, 0)
grid_sizer_15.AddGrowableCol(0)
grid_sizer_15.AddGrowableCol(1)
grid_sizer_1.Add(grid_sizer_15, 1, wx.EXPAND, 0)
grid_sizer_26.Add(self.startbutton, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_26.Add(self.forcecapturebutton, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_26.AddGrowableCol(0)
grid_sizer_26.AddGrowableCol(1)
grid_sizer_1.Add(grid_sizer_26, 1, wx.EXPAND, 0)
grid_sizer_1.Add(self.progresspanel, 1, wx.EXPAND, 0)
self.notebook_1_capturepane.SetSizer(grid_sizer_1)
grid_sizer_1.AddGrowableRow(2)
grid_sizer_1.AddGrowableCol(0)
grid_sizer_5.Add(self.label_1, 0, 0, 0)
grid_sizer_12.Add(self.label_4, 0, 0, 0)
grid_sizer_21.Add(self.pipmainimagefoldertext, 0, wx.EXPAND, 0)
grid_sizer_21.Add(self.pipmainimagefolderbrowse, 0, 0, 0)
grid_sizer_21.AddGrowableCol(0)
grid_sizer_12.Add(grid_sizer_21, 1, wx.EXPAND, 0)
grid_sizer_12.Add(self.label_12, 0, 0, 0)
grid_sizer_22.Add(self.pippipimagefoldertext, 0, wx.EXPAND, 0)
grid_sizer_22.Add(self.pippipimagefolderbrowse, 0, 0, 0)
grid_sizer_22.AddGrowableCol(0)
grid_sizer_12.Add(grid_sizer_22, 1, wx.EXPAND, 0)
grid_sizer_12.Add(self.label_13, 0, 0, 0)
grid_sizer_25.Add(self.pipoutputimagefoldertext, 0, wx.EXPAND, 0)
grid_sizer_25.Add(self.pipoutputimagefolderbrowse, 0, 0, 0)
grid_sizer_25.AddGrowableCol(0)
grid_sizer_12.Add(grid_sizer_25, 1, wx.EXPAND, 0)
grid_sizer_12.AddGrowableCol(0)
grid_sizer_12.AddGrowableCol(1)
grid_sizer_5.Add(grid_sizer_12, 1, wx.EXPAND, 0)
grid_sizer_14.Add(self.label_14, 0, 0, 0)
grid_sizer_14.Add(self.pipsizecombo, 0, 0, 0)
grid_sizer_14.AddGrowableCol(0)
grid_sizer_14.AddGrowableCol(1)
grid_sizer_13.Add(grid_sizer_14, 1, wx.EXPAND, 0)
grid_sizer_17.Add(self.label_15, 0, 0, 0)
grid_sizer_17.Add(self.pippositioncombo, 0, 0, 0)
grid_sizer_17.AddGrowableCol(0)
grid_sizer_17.AddGrowableCol(1)
grid_sizer_13.Add(grid_sizer_17, 1, wx.EXPAND, 0)
grid_sizer_13.AddGrowableCol(0)
grid_sizer_13.AddGrowableCol(1)
grid_sizer_5.Add(grid_sizer_13, 1, wx.EXPAND, 0)
grid_sizer_5.Add(self.pipignoreunmatchedcheck, 0, 0, 0)
grid_sizer_5.Add(self.pipcreatebutton, 0, wx.ALIGN_BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, 0)
self.notebook_1_pippane.SetSizer(grid_sizer_5)
grid_sizer_5.AddGrowableRow(4)
grid_sizer_5.AddGrowableCol(0)
grid_sizer_18.Add(self.VideoLabel, 0, 0, 0)
grid_sizer_18_copy.Add(self.label_22, 0, 0, 0)
grid_sizer_27.Add(self.videosourcetext, 0, wx.EXPAND, 0)
grid_sizer_27.Add(self.videosourcebrowse, 0, 0, 0)
grid_sizer_27.AddGrowableCol(0)
grid_sizer_18_copy.Add(grid_sizer_27, 1, wx.EXPAND, 0)
grid_sizer_18_copy.Add(self.label_23, 0, 0, 0)
grid_sizer_28.Add(self.videodestinationtext, 0, wx.EXPAND, 0)
grid_sizer_28.Add(self.videodestinationbrowse, 0, 0, 0)
grid_sizer_28.AddGrowableCol(0)
grid_sizer_18_copy.Add(grid_sizer_28, 1, wx.EXPAND, 0)
grid_sizer_18_copy.Add(self.label_26, 0, 0, 0)
grid_sizer_30.Add(self.mencoderpathtext, 0, wx.EXPAND, 0)
grid_sizer_30.Add(self.mencoderpathbrowse, 0, 0, 0)
grid_sizer_30.AddGrowableCol(0)
grid_sizer_18_copy.Add(grid_sizer_30, 1, wx.EXPAND, 0)
grid_sizer_18_copy.AddGrowableRow(2)
grid_sizer_18_copy.AddGrowableCol(1)
grid_sizer_18.Add(grid_sizer_18_copy, 1, wx.EXPAND, 0)
grid_sizer_3.Add(self.label_25, 0, 0, 0)
grid_sizer_3.Add(self.videocodeccombo, 0, 0, 0)
grid_sizer_3.AddGrowableCol(1)
grid_sizer_23.Add(grid_sizer_3, 1, wx.EXPAND, 0)
grid_sizer_4.Add(self.randomname, 0, 0, 0)
grid_sizer_4.Add(self.videoframeratetext, 0, 0, 0)
grid_sizer_4.AddGrowableCol(1)
grid_sizer_23.Add(grid_sizer_4, 1, wx.EXPAND, 0)
grid_sizer_23.Add(self.movielengthlabel, 0, 0, 0)
grid_sizer_23.AddGrowableCol(0)
grid_sizer_23.AddGrowableCol(1)
grid_sizer_23.AddGrowableCol(2)
grid_sizer_18.Add(grid_sizer_23, 1, wx.EXPAND, 0)
grid_sizer_18.Add(self.videocreatebutton, 0, wx.ALIGN_BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_18.Add((20, 20), 0, 0, 0)
grid_sizer_18.Add(self.static_line_1, 0, wx.EXPAND, 0)
grid_sizer_18.Add(self.AudioLabel, 0, 0, 0)
grid_sizer_18_copy_copy.Add(self.label_22_copy, 0, 0, 0)
grid_sizer_31.Add(self.audiosourcevideotext, 0, wx.EXPAND, 0)
grid_sizer_31.Add(self.audiosourcevideobrowse, 0, 0, 0)
grid_sizer_31.AddGrowableCol(0)
grid_sizer_18_copy_copy.Add(grid_sizer_31, 1, wx.EXPAND, 0)
grid_sizer_18_copy_copy.Add(self.label_23_copy, 0, 0, 0)
grid_sizer_35.Add(self.audiosourcetext, 0, wx.EXPAND, 0)
grid_sizer_35.Add(self.audiosourcebrowse, 0, 0, 0)
grid_sizer_35.AddGrowableCol(0)
grid_sizer_18_copy_copy.Add(grid_sizer_35, 1, wx.EXPAND, 0)
grid_sizer_18_copy_copy.Add(self.label_26_copy, 0, 0, 0)
grid_sizer_34.Add(self.audiooutputfoldertext, 0, wx.EXPAND, 0)
grid_sizer_34.Add(self.audiooutputfolderbrowse, 0, wx.ALIGN_BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_34.AddGrowableCol(0)
grid_sizer_18_copy_copy.Add(grid_sizer_34, 1, wx.EXPAND, 0)
grid_sizer_18_copy_copy.AddGrowableRow(3)
grid_sizer_18_copy_copy.AddGrowableCol(1)
grid_sizer_18.Add(grid_sizer_18_copy_copy, 1, wx.EXPAND, 0)
grid_sizer_18.Add(self.createaudiobutton, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
self.notebook_1_videopane.SetSizer(grid_sizer_18)
grid_sizer_18.AddGrowableRow(7)
grid_sizer_18.AddGrowableCol(0)
self.notebook_1.AddPage(self.notebook_1_capturepane, _("Capture"))
self.notebook_1.AddPage(self.notebook_1_pippane, _("PIP"))
self.notebook_1.AddPage(self.notebook_1_videopane, _("Video"))
sizer_1.Add(self.notebook_1, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
def exitMenuClicked(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'exitMenuClicked' not implemented!")
event.Skip()
def aboutMenuClicked(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'aboutMenuClicked' not implemented!")
event.Skip()
def screenshotConfigurePressed(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'screenshotConfigurePressed' not implemented!")
event.Skip()
def webcamConfigurePressed(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'webcamConfigurePressed' not implemented!")
event.Skip()
def startCapturePressed(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'startCapturePressed' not implemented!")
event.Skip()
def forceCapturePressed(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'forceCapturePressed' not implemented!")
event.Skip()
def pipMainImageBrowsePressed(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'pipMainImageBrowsePressed' not implemented!")
event.Skip()
def pipPipImageBrowsePressed(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'pipPipImageBrowsePressed' not implemented!")
event.Skip()
def pipOutputBrowsePressed(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'pipOutputBrowsePressed' not implemented!")
event.Skip()
def createPipPressed(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'createPipPressed' not implemented!")
event.Skip()
def videoSourceBrowsePressed(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'videoSourceBrowsePressed' not implemented!")
event.Skip()
def videoDestinationBrowsePressed(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'videoDestinationBrowsePressed' not implemented!")
event.Skip()
def mencoderPathBrowsePressed(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'mencoderPathBrowsePressed' not implemented!")
event.Skip()
def framerateTextChanged(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'framerateTextChanged' not implemented!")
event.Skip()
def createVideoPressed(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'createVideoPressed' not implemented!")
event.Skip()
def audioSourceVideoBrowsePressed(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'audioSourceVideoBrowsePressed' not implemented!")
event.Skip()
def audioSourceBrowsePressed(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'audioSourceBrowsePressed' not implemented!")
event.Skip()
def audioOutputFolderBrowsePressed(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'audioOutputFolderBrowsePressed' not implemented!")
event.Skip()
def createAudioPressed(self, event): # wxGlade: chronoFrame.<event_handler>
print("Event handler 'createAudioPressed' not implemented!")
event.Skip()
# end of class chronoFrame
class screenshotConfigDialog(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: screenshotConfigDialog.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.dualmonitorscheck = wx.CheckBox(self, wx.ID_ANY, _("Capture Dual Monitors"))
self.timestampcheck = wx.CheckBox(self, wx.ID_ANY, _("Show Timestamp"))
self.label_16 = wx.StaticText(self, wx.ID_ANY, _("Format:"))
self.screenshot_timestamp_format = wx.TextCtrl(self, wx.ID_ANY, _("%Y-%m-%d %H:%M:%S"))
self.subsectioncheck = wx.CheckBox(self, wx.ID_ANY, _("Subsection"))
self.label36 = wx.StaticText(self, wx.ID_ANY, _("Top:"))
self.subsectiontop = wx.TextCtrl(self, wx.ID_ANY, "")
self.label_36 = wx.StaticText(self, wx.ID_ANY, _("Left:"))
self.subsectionleft = wx.TextCtrl(self, wx.ID_ANY, "")
self.label_37 = wx.StaticText(self, wx.ID_ANY, _("Width:"))
self.subsectionwidth = wx.TextCtrl(self, wx.ID_ANY, "")
self.label_38 = wx.StaticText(self, wx.ID_ANY, _("Height:"))
self.subsectionheight = wx.TextCtrl(self, wx.ID_ANY, "")
self.label_5 = wx.StaticText(self, wx.ID_ANY, _("File Prefix:"))
self.screenshotprefixtext = wx.TextCtrl(self, wx.ID_ANY, _("screen_"))
self.label_6 = wx.StaticText(self, wx.ID_ANY, _("Save Folder:"))
self.screenshotsavefoldertext = wx.TextCtrl(self, wx.ID_ANY, "")
self.screenshotsavefolderbrowse = wx.Button(self, wx.ID_ANY, _("..."))
self.label_7 = wx.StaticText(self, wx.ID_ANY, _("File Format:"))
self.screenshotformatcombo = wx.ComboBox(self, wx.ID_ANY, choices=[_("jpg"), _("png"), _("gif")], style=wx.CB_DROPDOWN | wx.CB_DROPDOWN)
self.screenshotconfigsave = wx.Button(self, wx.ID_OK, "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.screenshotSaveFolderBrowse, self.screenshotsavefolderbrowse)
# end wxGlade
def __set_properties(self):
# begin wxGlade: screenshotConfigDialog.__set_properties
self.SetTitle(_("Configure Screenshots"))
self.dualmonitorscheck.SetToolTipString(_("Check to capture images from 2 monitors"))
self.timestampcheck.SetToolTipString(_("Check to have CL write a timestamp on each capture"))
self.timestampcheck.SetValue(1)
self.screenshot_timestamp_format.SetMinSize((150, -1))
self.screenshot_timestamp_format.SetToolTipString(_("The timestamp format. Passed directly to python's time.strftime function."))
self.subsectioncheck.SetToolTipString(_("Check to have CL write a timestamp on each capture"))
self.screenshotprefixtext.SetToolTipString(_("The file prefix every screenshot should start with"))
self.screenshotsavefoldertext.SetMinSize((250, -1))
self.screenshotsavefolderbrowse.SetMinSize((20, -1))
self.screenshotsavefolderbrowse.SetToolTipString(_("Click to browse directories"))
self.screenshotformatcombo.SetToolTipString(_("Select the file format in which screen captures will be saved"))
self.screenshotformatcombo.SetSelection(0)
self.screenshotconfigsave.SetToolTipString(_("Save this configuration"))
# end wxGlade
def __do_layout(self):
# begin wxGlade: screenshotConfigDialog.__do_layout
grid_sizer_2 = wx.FlexGridSizer(6, 1, 10, 0)
grid_sizer_8 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_7 = wx.FlexGridSizer(3, 3, 0, 0)
grid_sizer_32 = wx.FlexGridSizer(2, 4, 0, 0)
grid_sizer_6 = wx.FlexGridSizer(3, 2, 0, 0)
grid_sizer_29 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_6.Add(self.dualmonitorscheck, 0, 0, 0)
grid_sizer_6.Add((20, 20), 0, 0, 0)
grid_sizer_6.Add(self.timestampcheck, 0, 0, 0)
grid_sizer_29.Add(self.label_16, 0, 0, 0)
grid_sizer_29.Add(self.screenshot_timestamp_format, 0, 0, 0)
grid_sizer_29.AddGrowableCol(1)
grid_sizer_6.Add(grid_sizer_29, 1, wx.EXPAND, 0)
grid_sizer_6.Add(self.subsectioncheck, 0, 0, 0)
grid_sizer_6.Add((20, 20), 0, 0, 0)
grid_sizer_2.Add(grid_sizer_6, 1, wx.EXPAND, 0)
grid_sizer_32.Add(self.label36, 0, 0, 0)
grid_sizer_32.Add(self.subsectiontop, 0, 0, 0)
grid_sizer_32.Add(self.label_36, 0, 0, 0)
grid_sizer_32.Add(self.subsectionleft, 0, 0, 0)
grid_sizer_32.Add(self.label_37, 0, 0, 0)
grid_sizer_32.Add(self.subsectionwidth, 0, 0, 0)
grid_sizer_32.Add(self.label_38, 0, 0, 0)
grid_sizer_32.Add(self.subsectionheight, 0, 0, 0)
grid_sizer_2.Add(grid_sizer_32, 1, wx.EXPAND, 0)
grid_sizer_7.Add(self.label_5, 0, 0, 0)
grid_sizer_7.Add(self.screenshotprefixtext, 0, 0, 0)
grid_sizer_7.Add((20, 20), 0, 0, 0)
grid_sizer_7.Add(self.label_6, 0, 0, 0)
grid_sizer_7.Add(self.screenshotsavefoldertext, 0, 0, 0)
grid_sizer_7.Add(self.screenshotsavefolderbrowse, 0, 0, 0)
grid_sizer_7.Add(self.label_7, 0, 0, 0)
grid_sizer_7.Add(self.screenshotformatcombo, 0, 0, 0)
grid_sizer_7.Add((20, 20), 0, 0, 0)
grid_sizer_2.Add(grid_sizer_7, 1, wx.EXPAND, 0)
grid_sizer_8.Add(self.screenshotconfigsave, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_8.AddGrowableCol(0)
grid_sizer_8.AddGrowableCol(1)
grid_sizer_2.Add(grid_sizer_8, 1, wx.EXPAND, 0)
self.SetSizer(grid_sizer_2)
grid_sizer_2.Fit(self)
grid_sizer_2.AddGrowableCol(0)
self.Layout()
self.Centre()
# end wxGlade
def screenshotSaveFolderBrowse(self, event): # wxGlade: screenshotConfigDialog.<event_handler>
print("Event handler 'screenshotSaveFolderBrowse' not implemented!")
event.Skip()
# end of class screenshotConfigDialog
class webcamConfigDialog(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: webcamConfigDialog.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.testwebcambutton = wx.Button(self, wx.ID_ANY, _("Test Webcam"))
self.webcamtimestampcheck = wx.CheckBox(self, wx.ID_ANY, _("Show Timestamp"))
self.label_16 = wx.StaticText(self, wx.ID_ANY, _("Format:"))
self.webcam_timestamp_format = wx.TextCtrl(self, wx.ID_ANY, _("%Y-%m-%d %H:%M:%S"))
self.label_9 = wx.StaticText(self, wx.ID_ANY, _("File Prefix:"))
self.webcamprefixtext = wx.TextCtrl(self, wx.ID_ANY, _("cam_"))
self.label_10 = wx.StaticText(self, wx.ID_ANY, _("Save Folder:"))
self.webcamsavefoldertext = wx.TextCtrl(self, wx.ID_ANY, "")
self.webcamsavefolderbrowse = wx.Button(self, wx.ID_ANY, _("..."))
self.label_11 = wx.StaticText(self, wx.ID_ANY, _("File Format:"))
self.webcamformatcombo = wx.ComboBox(self, wx.ID_ANY, choices=[_("jpg"), _("png"), _("gif")], style=wx.CB_DROPDOWN | wx.CB_DROPDOWN)
self.webcamsavebutton = wx.Button(self, wx.ID_OK, "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.testWebcamPressed, self.testwebcambutton)
self.Bind(wx.EVT_BUTTON, self.webcamSaveFolderBrowse, self.webcamsavefolderbrowse)
# end wxGlade
def __set_properties(self):
# begin wxGlade: webcamConfigDialog.__set_properties
self.SetTitle(_("Configure Webcam"))
self.testwebcambutton.SetToolTipString(_("Click to test your webcam"))
self.webcamtimestampcheck.SetToolTipString(_("Check to write a timestamp on each webcam capture"))
self.webcam_timestamp_format.SetMinSize((150, -1))
self.webcam_timestamp_format.SetToolTipString(_("The timestamp format. Passed directly to python's time.strftime function."))
self.webcamsavefoldertext.SetMinSize((250, -1))
self.webcamsavefolderbrowse.SetMinSize((20, -1))
self.webcamformatcombo.SetToolTipString(_("Select the file format in which webcam captures will be saved"))
self.webcamformatcombo.SetSelection(0)
self.webcamsavebutton.SetToolTipString(_("Save this configuration"))
# end wxGlade
def __do_layout(self):
# begin wxGlade: webcamConfigDialog.__do_layout
grid_sizer_9 = wx.FlexGridSizer(4, 1, 10, 0)
grid_sizer_11 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_10 = wx.FlexGridSizer(4, 3, 0, 0)
grid_sizer_19 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_29 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_9.Add(self.testwebcambutton, 0, 0, 0)
grid_sizer_19.Add(self.webcamtimestampcheck, 0, 0, 0)
grid_sizer_29.Add(self.label_16, 0, 0, 0)
grid_sizer_29.Add(self.webcam_timestamp_format, 0, 0, 0)
grid_sizer_29.AddGrowableCol(1)
grid_sizer_19.Add(grid_sizer_29, 1, wx.EXPAND, 0)
grid_sizer_19.AddGrowableCol(0)
grid_sizer_19.AddGrowableCol(1)
grid_sizer_9.Add(grid_sizer_19, 1, wx.EXPAND, 0)
grid_sizer_10.Add(self.label_9, 0, 0, 0)
grid_sizer_10.Add(self.webcamprefixtext, 0, 0, 0)
grid_sizer_10.Add((20, 20), 0, 0, 0)
grid_sizer_10.Add(self.label_10, 0, 0, 0)
grid_sizer_10.Add(self.webcamsavefoldertext, 0, 0, 0)
grid_sizer_10.Add(self.webcamsavefolderbrowse, 0, 0, 0)
grid_sizer_10.Add(self.label_11, 0, 0, 0)
grid_sizer_10.Add(self.webcamformatcombo, 0, 0, 0)
grid_sizer_10.Add((20, 20), 0, 0, 0)
grid_sizer_9.Add(grid_sizer_10, 1, wx.EXPAND, 0)
grid_sizer_11.Add(self.webcamsavebutton, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_11.AddGrowableCol(0)
grid_sizer_11.AddGrowableCol(1)
grid_sizer_9.Add(grid_sizer_11, 1, wx.EXPAND, 0)
self.SetSizer(grid_sizer_9)
grid_sizer_9.Fit(self)
grid_sizer_9.AddGrowableCol(0)
self.Layout()
self.Centre()
# end wxGlade
def testWebcamPressed(self, event): # wxGlade: webcamConfigDialog.<event_handler>
print("Event handler 'testWebcamPressed' not implemented!")
event.Skip()
def webcamSaveFolderBrowse(self, event): # wxGlade: webcamConfigDialog.<event_handler>
print("Event handler 'webcamSaveFolderBrowse' not implemented!")
event.Skip()
# end of class webcamConfigDialog
class webcamPreviewDialog(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: webcamPreviewDialog.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER | wx.THICK_FRAME
wx.Dialog.__init__(self, *args, **kwds)
self.panel_1 = wx.ScrolledWindow(self, wx.ID_ANY, style=wx.TAB_TRAVERSAL)
self.previewbitmap = wx.StaticBitmap(self.panel_1, wx.ID_ANY, wx.NullBitmap)
self.previewokbutton = wx.Button(self, wx.ID_OK, "")
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: webcamPreviewDialog.__set_properties
self.SetTitle(_("Webcam Preview"))
self.SetSize((500, 400))
self.previewbitmap.SetMinSize((-1, -1))
self.panel_1.SetScrollRate(10, 10)
# end wxGlade
def __do_layout(self):
# begin wxGlade: webcamPreviewDialog.__do_layout
grid_sizer_24 = wx.FlexGridSizer(2, 1, 0, 0)
sizer_3 = wx.FlexGridSizer(1, 1, 0, 0)
sizer_3.Add(self.previewbitmap, 0, wx.EXPAND, 0)
self.panel_1.SetSizer(sizer_3)
sizer_3.AddGrowableRow(0)
sizer_3.AddGrowableCol(0)
grid_sizer_24.Add(self.panel_1, 1, wx.EXPAND, 0)
grid_sizer_24.Add(self.previewokbutton, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
self.SetSizer(grid_sizer_24)
grid_sizer_24.AddGrowableRow(0)
grid_sizer_24.AddGrowableCol(0)
self.Layout()
# end wxGlade
# end of class webcamPreviewDialog
if __name__ == "__main__":
gettext.install("app") # replace with the appropriate catalog name
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
chronoframe = chronoFrame(None, wx.ID_ANY, "")
app.SetTopWindow(chronoframe)
chronoframe.Show()
app.MainLoop()
| 55.34512
| 225
| 0.69092
|
a733a21c979d47ef4c2df8b2c22c64ef087f45db
| 626
|
py
|
Python
|
anytask/schools/models.py
|
bcskda/anytask
|
5a359dcb669b689fc5a4f1705f2c88cd031ab37d
|
[
"MIT"
] | null | null | null |
anytask/schools/models.py
|
bcskda/anytask
|
5a359dcb669b689fc5a4f1705f2c88cd031ab37d
|
[
"MIT"
] | 1
|
2021-03-01T07:42:38.000Z
|
2021-03-01T09:14:10.000Z
|
anytask/schools/models.py
|
bcskda/anytask
|
5a359dcb669b689fc5a4f1705f2c88cd031ab37d
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.core.urlresolvers import reverse
from courses.models import Course
# Create your models here.
class School(models.Model):
name = models.CharField(max_length=191, db_index=True, null=False, blank=False)
link = models.CharField(max_length=191, db_index=False, null=False, blank=False)
courses = models.ManyToManyField(Course, blank=True)
def __unicode__(self):
return unicode(self.name)
def get_full_name(self):
return unicode(self.name)
def get_absolute_url(self):
return reverse('schools.views.school_page', args=[str(self.link)])
| 29.809524
| 84
| 0.731629
|
1aad2b6f042c9f6b4c05a2958fdef275ce029673
| 44
|
py
|
Python
|
cb_scripts/ifttt/__init__.py
|
christopher-burke/python-scripts
|
bdbea2456130e0958b6a6ab8d138f4f19b39b934
|
[
"MIT"
] | 1
|
2022-02-05T06:39:05.000Z
|
2022-02-05T06:39:05.000Z
|
cb_scripts/ifttt/__init__.py
|
christopher-burke/python-scripts
|
bdbea2456130e0958b6a6ab8d138f4f19b39b934
|
[
"MIT"
] | null | null | null |
cb_scripts/ifttt/__init__.py
|
christopher-burke/python-scripts
|
bdbea2456130e0958b6a6ab8d138f4f19b39b934
|
[
"MIT"
] | 1
|
2021-06-10T22:04:35.000Z
|
2021-06-10T22:04:35.000Z
|
from .ifttt_event_trigger import post_ifttt
| 22
| 43
| 0.886364
|
96a97c075a94bc135b10cf3aa77374ebe692e250
| 1,833
|
py
|
Python
|
taiga/front/sitemaps/__init__.py
|
threefoldtech/Threefold-Circles
|
cbc433796b25cf7af9a295af65d665a4a279e2d6
|
[
"Apache-2.0"
] | null | null | null |
taiga/front/sitemaps/__init__.py
|
threefoldtech/Threefold-Circles
|
cbc433796b25cf7af9a295af65d665a4a279e2d6
|
[
"Apache-2.0"
] | 12
|
2019-11-25T14:08:32.000Z
|
2021-06-24T10:35:51.000Z
|
taiga/front/sitemaps/__init__.py
|
threefoldtech/Threefold-Circles
|
cbc433796b25cf7af9a295af65d665a4a279e2d6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesรบs Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragรกn <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict
from .generics import GenericSitemap
from .projects import ProjectsSitemap
from .projects import ProjectBacklogsSitemap
from .projects import ProjectKanbansSitemap
from .epics import EpicsSitemap
from .milestones import MilestonesSitemap
from .userstories import UserStoriesSitemap
from .tasks import TasksSitemap
from .issues import IssuesSitemap
from .wiki import WikiPagesSitemap
from .users import UsersSitemap
sitemaps = OrderedDict([
("generics", GenericSitemap),
("projects", ProjectsSitemap),
("project-backlogs", ProjectBacklogsSitemap),
("project-kanbans", ProjectKanbansSitemap),
("epics", EpicsSitemap),
("milestones", MilestonesSitemap),
("userstories", UserStoriesSitemap),
("tasks", TasksSitemap),
("issues", IssuesSitemap),
("wikipages", WikiPagesSitemap),
("users", UsersSitemap)
])
| 29.095238
| 74
| 0.756137
|
13ee9fa78dd76b287cdd70f300a95e0f3b9ddc60
| 2,031
|
py
|
Python
|
test/functional/rpc_getaddressinfo_labels_purpose_deprecation.py
|
asuka431/fujicoin-0.20.0
|
82b1f5dc5480f6585a90ef9cc47fd9cd6c55def2
|
[
"MIT"
] | 1
|
2022-03-16T02:39:39.000Z
|
2022-03-16T02:39:39.000Z
|
test/functional/rpc_getaddressinfo_labels_purpose_deprecation.py
|
asuka431/fujicoin-0.20.0
|
82b1f5dc5480f6585a90ef9cc47fd9cd6c55def2
|
[
"MIT"
] | null | null | null |
test/functional/rpc_getaddressinfo_labels_purpose_deprecation.py
|
asuka431/fujicoin-0.20.0
|
82b1f5dc5480f6585a90ef9cc47fd9cd6c55def2
|
[
"MIT"
] | 1
|
2022-03-26T10:30:47.000Z
|
2022-03-26T10:30:47.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2020-2019 The Baricoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test deprecation of RPC getaddressinfo `labels` returning an array
containing a JSON object of `name` and purpose` key-value pairs. It now
returns an array containing only the label name.
"""
from test_framework.test_framework import BaricoinTestFramework
from test_framework.util import assert_equal
LABELS_TO_TEST = frozenset({"" , "New ๐
ก $<#>&!ััะฑะฐ Label"})
class GetAddressInfoLabelsPurposeDeprecationTest(BaricoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = False
# Start node[0] with -deprecatedrpc=labelspurpose and node[1] without.
self.extra_args = [["-deprecatedrpc=labelspurpose"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_labels(self, node_num, label_name, expected_value):
node = self.nodes[node_num]
address = node.getnewaddress()
if label_name != "":
node.setlabel(address, label_name)
self.log.info(" set label to {}".format(label_name))
labels = node.getaddressinfo(address)["labels"]
self.log.info(" labels = {}".format(labels))
assert_equal(labels, expected_value)
def run_test(self):
"""Test getaddressinfo labels with and without -deprecatedrpc flag."""
self.log.info("Test getaddressinfo labels with -deprecatedrpc flag")
for label in LABELS_TO_TEST:
self.test_labels(node_num=0, label_name=label, expected_value=[{"name": label, "purpose": "receive"}])
self.log.info("Test getaddressinfo labels without -deprecatedrpc flag")
for label in LABELS_TO_TEST:
self.test_labels(node_num=1, label_name=label, expected_value=[label])
if __name__ == '__main__':
GetAddressInfoLabelsPurposeDeprecationTest().main()
| 41.44898
| 114
| 0.706548
|
4523188e880314a4982c26c9b50e7faf0c67555b
| 9,897
|
py
|
Python
|
Source/chrome/build/android/pylib/constants/__init__.py
|
yury-s/v8-inspector
|
0ab4779e0909d387f243f41ca2621237cdb0c7fe
|
[
"BSD-3-Clause"
] | 20
|
2015-08-26T06:46:00.000Z
|
2019-02-27T09:05:58.000Z
|
Source/chrome/build/android/pylib/constants/__init__.py
|
yury-s/v8-inspector
|
0ab4779e0909d387f243f41ca2621237cdb0c7fe
|
[
"BSD-3-Clause"
] | null | null | null |
Source/chrome/build/android/pylib/constants/__init__.py
|
yury-s/v8-inspector
|
0ab4779e0909d387f243f41ca2621237cdb0c7fe
|
[
"BSD-3-Clause"
] | 2
|
2015-08-26T05:49:35.000Z
|
2020-02-03T20:22:43.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines a set of constants shared by test runners and other scripts."""
# TODO(jbudorick): Split these constants into coherent modules.
# pylint: disable=W0212
import collections
import logging
import os
import subprocess
DIR_SOURCE_ROOT = os.environ.get('CHECKOUT_SOURCE_ROOT',
os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, os.pardir)))
ISOLATE_DEPS_DIR = os.path.join(DIR_SOURCE_ROOT, 'isolate_deps_dir')
CHROME_SHELL_HOST_DRIVEN_DIR = os.path.join(
DIR_SOURCE_ROOT, 'chrome', 'android')
PackageInfo = collections.namedtuple('PackageInfo',
['package', 'activity', 'cmdline_file', 'devtools_socket',
'test_package'])
PACKAGE_INFO = {
'chrome_document': PackageInfo(
'com.google.android.apps.chrome.document',
'com.google.android.apps.chrome.document.ChromeLauncherActivity',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome': PackageInfo(
'com.google.android.apps.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
'com.google.android.apps.chrome.tests'),
'chrome_beta': PackageInfo(
'com.chrome.beta',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_stable': PackageInfo(
'com.android.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_dev': PackageInfo(
'com.chrome.dev',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_canary': PackageInfo(
'com.chrome.canary',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_work': PackageInfo(
'com.chrome.work',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'legacy_browser': PackageInfo(
'com.google.android.browser',
'com.android.browser.BrowserActivity',
None,
None,
None),
'chromecast_shell': PackageInfo(
'com.google.android.apps.mediashell',
'com.google.android.apps.mediashell.MediaShellActivity',
'/data/local/tmp/castshell-command-line',
None,
None),
'content_shell': PackageInfo(
'org.chromium.content_shell_apk',
'org.chromium.content_shell_apk.ContentShellActivity',
'/data/local/tmp/content-shell-command-line',
None,
'org.chromium.content_shell_apk.tests'),
'chrome_shell': PackageInfo(
'org.chromium.chrome.shell',
'org.chromium.chrome.shell.ChromeShellActivity',
'/data/local/tmp/chrome-shell-command-line',
'chrome_shell_devtools_remote',
'org.chromium.chrome.shell.tests'),
'android_webview_shell': PackageInfo(
'org.chromium.android_webview.shell',
'org.chromium.android_webview.shell.AwShellActivity',
'/data/local/tmp/android-webview-command-line',
None,
'org.chromium.android_webview.test'),
'gtest': PackageInfo(
'org.chromium.native_test',
'org.chromium.native_test.NativeUnitTestActivity',
'/data/local/tmp/chrome-native-tests-command-line',
None,
None),
'components_browsertests': PackageInfo(
'org.chromium.components_browsertests_apk',
('org.chromium.components_browsertests_apk' +
'.ComponentsBrowserTestsActivity'),
'/data/local/tmp/chrome-native-tests-command-line',
None,
None),
'content_browsertests': PackageInfo(
'org.chromium.content_browsertests_apk',
'org.chromium.content_browsertests_apk.ContentBrowserTestsActivity',
'/data/local/tmp/chrome-native-tests-command-line',
None,
None),
'chromedriver_webview_shell': PackageInfo(
'org.chromium.chromedriver_webview_shell',
'org.chromium.chromedriver_webview_shell.Main',
None,
None,
None),
}
# Ports arrangement for various test servers used in Chrome for Android.
# Lighttpd server will attempt to use 9000 as default port, if unavailable it
# will find a free port from 8001 - 8999.
LIGHTTPD_DEFAULT_PORT = 9000
LIGHTTPD_RANDOM_PORT_FIRST = 8001
LIGHTTPD_RANDOM_PORT_LAST = 8999
TEST_SYNC_SERVER_PORT = 9031
TEST_SEARCH_BY_IMAGE_SERVER_PORT = 9041
TEST_POLICY_SERVER_PORT = 9051
# The net test server is started from port 10201.
# TODO(pliard): http://crbug.com/239014. Remove this dirty workaround once
# http://crbug.com/239014 is fixed properly.
TEST_SERVER_PORT_FIRST = 10201
TEST_SERVER_PORT_LAST = 30000
# A file to record next valid port of test server.
TEST_SERVER_PORT_FILE = '/tmp/test_server_port'
TEST_SERVER_PORT_LOCKFILE = '/tmp/test_server_port.lock'
TEST_EXECUTABLE_DIR = '/data/local/tmp'
# Directories for common java libraries for SDK build.
# These constants are defined in build/android/ant/common.xml
SDK_BUILD_JAVALIB_DIR = 'lib.java'
SDK_BUILD_TEST_JAVALIB_DIR = 'test.lib.java'
SDK_BUILD_APKS_DIR = 'apks'
ADB_KEYS_FILE = '/data/misc/adb/adb_keys'
PERF_OUTPUT_DIR = os.path.join(DIR_SOURCE_ROOT, 'out', 'step_results')
# The directory on the device where perf test output gets saved to.
DEVICE_PERF_OUTPUT_DIR = (
'/data/data/' + PACKAGE_INFO['chrome'].package + '/files')
SCREENSHOTS_DIR = os.path.join(DIR_SOURCE_ROOT, 'out_screenshots')
class ANDROID_SDK_VERSION_CODES(object):
"""Android SDK version codes.
http://developer.android.com/reference/android/os/Build.VERSION_CODES.html
"""
JELLY_BEAN = 16
JELLY_BEAN_MR1 = 17
JELLY_BEAN_MR2 = 18
KITKAT = 19
KITKAT_WATCH = 20
LOLLIPOP = 21
LOLLIPOP_MR1 = 22
ANDROID_SDK_VERSION = ANDROID_SDK_VERSION_CODES.LOLLIPOP_MR1
ANDROID_SDK_BUILD_TOOLS_VERSION = '22.0.0'
ANDROID_SDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
'third_party/android_tools/sdk')
ANDROID_SDK_TOOLS = os.path.join(ANDROID_SDK_ROOT,
'build-tools', ANDROID_SDK_BUILD_TOOLS_VERSION)
ANDROID_NDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
'third_party/android_tools/ndk')
EMULATOR_SDK_ROOT = os.environ.get('ANDROID_EMULATOR_SDK_ROOT',
os.path.join(DIR_SOURCE_ROOT,
'android_emulator_sdk'))
BAD_DEVICES_JSON = os.path.join(DIR_SOURCE_ROOT,
os.environ.get('CHROMIUM_OUT_DIR', 'out'),
'bad_devices.json')
UPSTREAM_FLAKINESS_SERVER = 'test-results.appspot.com'
DEVICE_LOCAL_PROPERTIES_PATH = '/data/local.prop'
PYTHON_UNIT_TEST_SUITES = {
'pylib_py_unittests': {
'path': os.path.join(DIR_SOURCE_ROOT, 'build', 'android'),
'test_modules': [
'pylib.cmd_helper_test',
'pylib.device.device_utils_test',
'pylib.results.json_results_test',
'pylib.utils.md5sum_test',
]
},
'gyp_py_unittests': {
'path': os.path.join(DIR_SOURCE_ROOT, 'build', 'android', 'gyp'),
'test_modules': [
'java_cpp_enum_tests',
]
},
}
LOCAL_MACHINE_TESTS = ['junit', 'python']
VALID_ENVIRONMENTS = ['local', 'remote_device']
VALID_TEST_TYPES = ['gtest', 'instrumentation', 'junit', 'linker', 'monkey',
'perf', 'python', 'uiautomator', 'uirobot']
VALID_DEVICE_TYPES = ['Android', 'iOS']
def GetBuildType():
try:
return os.environ['BUILDTYPE']
except KeyError:
raise EnvironmentError(
'The BUILDTYPE environment variable has not been set')
def SetBuildType(build_type):
os.environ['BUILDTYPE'] = build_type
def SetBuildDirectory(build_directory):
os.environ['CHROMIUM_OUT_DIR'] = build_directory
def SetOutputDirectory(output_directory):
os.environ['CHROMIUM_OUTPUT_DIR'] = output_directory
def GetOutDirectory(build_type=None):
"""Returns the out directory where the output binaries are built.
Args:
build_type: Build type, generally 'Debug' or 'Release'. Defaults to the
globally set build type environment variable BUILDTYPE.
"""
if 'CHROMIUM_OUTPUT_DIR' in os.environ:
return os.path.abspath(os.path.join(
DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUTPUT_DIR')))
return os.path.abspath(os.path.join(
DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUT_DIR', 'out'),
GetBuildType() if build_type is None else build_type))
def _Memoize(func):
def Wrapper():
try:
return func._result
except AttributeError:
func._result = func()
return func._result
return Wrapper
def SetAdbPath(adb_path):
os.environ['ADB_PATH'] = adb_path
def GetAdbPath():
# Check if a custom adb path as been set. If not, try to find adb
# on the system.
if os.environ.get('ADB_PATH'):
return os.environ.get('ADB_PATH')
else:
return _FindAdbPath()
@_Memoize
def _FindAdbPath():
if os.environ.get('ANDROID_SDK_ROOT'):
return 'adb'
# If envsetup.sh hasn't been sourced and there's no adb in the path,
# set it here.
try:
with file(os.devnull, 'w') as devnull:
subprocess.call(['adb', 'version'], stdout=devnull, stderr=devnull)
return 'adb'
except OSError:
logging.debug('No adb found in $PATH, fallback to checked in binary.')
return os.path.join(ANDROID_SDK_ROOT, 'platform-tools', 'adb')
# Exit codes
ERROR_EXIT_CODE = 1
INFRA_EXIT_CODE = 87
WARNING_EXIT_CODE = 88
| 32.663366
| 80
| 0.68061
|
91f773ac7cd5264e0c54f0bc660fa782c38280d4
| 546
|
py
|
Python
|
recipes/Python/252130_Write_a_plugin_for_ImageJ/recipe-252130.py
|
tdiprima/code
|
61a74f5f93da087d27c70b2efe779ac6bd2a3b4f
|
[
"MIT"
] | 2,023
|
2017-07-29T09:34:46.000Z
|
2022-03-24T08:00:45.000Z
|
recipes/Python/252130_Write_a_plugin_for_ImageJ/recipe-252130.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 32
|
2017-09-02T17:20:08.000Z
|
2022-02-11T17:49:37.000Z
|
recipes/Python/252130_Write_a_plugin_for_ImageJ/recipe-252130.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 780
|
2017-07-28T19:23:28.000Z
|
2022-03-25T20:39:41.000Z
|
import ij
class Inverter_py(ij.plugin.filter.PlugInFilter):
def setup(self,arg, imp):
"""@sig public int setup(String arg, ij.ImagePlus imp)"""
return ij.plugin.filter.PlugInFilter.DOES_8G
def run(self,ip):
"""@sig public void run(ij.process.ImageProcessor ip)"""
pixels = ip.getPixels()
width = ip.getWidth()
r = ip.getRoi()
for y in range(r.y,r.y+r.height):
for x in range(r.x,r.x+r.width):
i = y*width + x;
pixels[i] = 255-pixels[i]
| 32.117647
| 65
| 0.565934
|
3eb005492896b716fb6755b13ed0fb17aab84128
| 292
|
py
|
Python
|
tests/challenges/test_array_reverse.py
|
aghyadalbalkhi-ASAC/data-structures-and-algorithms-python-401
|
a7ffc3cbb1ffdead1e5a42733eba9f876e0fe57d
|
[
"MIT"
] | null | null | null |
tests/challenges/test_array_reverse.py
|
aghyadalbalkhi-ASAC/data-structures-and-algorithms-python-401
|
a7ffc3cbb1ffdead1e5a42733eba9f876e0fe57d
|
[
"MIT"
] | null | null | null |
tests/challenges/test_array_reverse.py
|
aghyadalbalkhi-ASAC/data-structures-and-algorithms-python-401
|
a7ffc3cbb1ffdead1e5a42733eba9f876e0fe57d
|
[
"MIT"
] | 5
|
2020-11-29T20:49:33.000Z
|
2022-03-20T20:57:38.000Z
|
# put your array_reverse challenge tests here
from data_structures_and_algorithms.challenges.array_reverse.array_reverse import (
reverse_array,
)
# here's a test to get you started
def test_leave_as_is():
actual = reverse_array([1])
expected = [1]
assert actual == expected
| 26.545455
| 83
| 0.753425
|
7f495e63c10abc04d27b4b8aaca8ae60a28f3276
| 1,491
|
py
|
Python
|
opsmgr/common/constants.py
|
open-power-ref-design-toolkit/opsmgr
|
ce9d5dc8a4038f22302a168288a6a4f6683dcd45
|
[
"Apache-2.0"
] | 5
|
2017-05-10T00:32:37.000Z
|
2019-08-21T09:32:01.000Z
|
opsmgr/common/constants.py
|
open-power-ref-design-toolkit/opsmgr
|
ce9d5dc8a4038f22302a168288a6a4f6683dcd45
|
[
"Apache-2.0"
] | null | null | null |
opsmgr/common/constants.py
|
open-power-ref-design-toolkit/opsmgr
|
ce9d5dc8a4038f22302a168288a6a4f6683dcd45
|
[
"Apache-2.0"
] | 3
|
2017-05-10T00:32:40.000Z
|
2018-10-16T19:18:18.000Z
|
# Copyright 2016, IBM US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
OPSMGRLOG = "/var/log/opsmgr/opsmgr.log"
OPSMGRLOG_ERROR = "/var/log/opsmgr/opsmgr_error.log"
OPSMGR_LOG_CONF = "/etc/opsmgr/logging.yaml"
class access_status(Enum):
""" codes assigned for access status field in device_info class
"""
SUCCESS = 0
FAILED_TO_CONNECT = 1
CREDENTIALS_INVALID = 2
DEVICE_TYPE_ERROR = 3
GATHER_IN_PROGRESS = 4
NO_STATUS = 5
# TODO replace usage with exceptions
class validation_codes(Enum):
""" codes returned from the validate() call in resoure_mgr module
"""
SUCCESS = 0
FAILED_TO_CONNECT = 1
CREDENTIALS_INVALID = 2
DEVICE_TYPE_ERROR = 3
class auth_method(Enum):
""" Authentication method used to access a device
"""
USERID_PASSWORD = 0
SSH_KEY_AUTHENTICATION = 1
JSON_IDENTIFIER_ATTR = 'id'
JSON_LABEL_ATTR = 'name'
LOGGING_CAPABLE = 'Logging'
MONITORING_CAPABLE = 'Monitoring'
| 27.109091
| 74
| 0.729041
|
c699d9548fc2a247840f85ba0b784928ef0f631e
| 5,755
|
py
|
Python
|
heat/engine/clients/progress.py
|
noironetworks/heat
|
7cdadf1155f4d94cf8f967635b98e4012a7acfb7
|
[
"Apache-2.0"
] | 1
|
2020-06-18T01:05:29.000Z
|
2020-06-18T01:05:29.000Z
|
heat/engine/clients/progress.py
|
noironetworks/heat
|
7cdadf1155f4d94cf8f967635b98e4012a7acfb7
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
heat/engine/clients/progress.py
|
noironetworks/heat
|
7cdadf1155f4d94cf8f967635b98e4012a7acfb7
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Helper classes that are simple key-value storages
meant to be passed between handle_* and check_*_complete,
being mutated during subsequent check_*_complete calls.
Some of them impose restrictions on client plugin API, thus they are
put in this client-plugin-agnostic module.
"""
class ServerCreateProgress(object):
def __init__(self, server_id, complete=False):
self.complete = complete
self.server_id = server_id
class UpdateProgressBase(object):
"""Keeps track on particular server update task.
``handler`` is a method of client plugin performing
required update operation.
Its first positional argument must be ``resource_id``
and this method must be resilent to intermittent failures,
returning ``True`` if API was successfully called, ``False`` otherwise.
If result of API call is asynchronous, client plugin must have
corresponding ``check_<handler>`` method.
Its first positional argument must be ``resource_id``
and it must return ``True`` or ``False`` indicating completeness
of the update operation.
For synchronous API calls,
set ``complete`` attribute of this object to ``True``.
``[handler|checker]_extra`` arguments, if passed to constructor,
should be dictionaries of
{'args': tuple(), 'kwargs': dict()}
structure and contain parameters with which corresponding ``handler`` and
``check_<handler>`` methods of client plugin must be called.
``args`` is automatically prepended with ``resource_id``.
Missing ``args`` or ``kwargs`` are interpreted
as empty tuple/dict respectively.
Defaults are interpreted as both ``args`` and ``kwargs`` being empty.
"""
def __init__(self, resource_id, handler, complete=False, called=False,
handler_extra=None, checker_extra=None):
self.complete = complete
self.called = called
self.handler = handler
self.checker = 'check_%s' % handler
# set call arguments basing on incomplete values and defaults
hargs = handler_extra or {}
self.handler_args = (resource_id,) + (hargs.get('args') or ())
self.handler_kwargs = hargs.get('kwargs') or {}
cargs = checker_extra or {}
self.checker_args = (resource_id,) + (cargs.get('args') or ())
self.checker_kwargs = cargs.get('kwargs') or {}
class ServerUpdateProgress(UpdateProgressBase):
def __init__(self, server_id, handler, complete=False, called=False,
handler_extra=None, checker_extra=None):
super(ServerUpdateProgress, self).__init__(
server_id, handler, complete=complete, called=called,
handler_extra=handler_extra, checker_extra=checker_extra)
self.server_id = server_id
class ContainerUpdateProgress(UpdateProgressBase):
def __init__(self, container_id, handler, complete=False, called=False,
handler_extra=None, checker_extra=None):
super(ContainerUpdateProgress, self).__init__(
container_id, handler, complete=complete, called=called,
handler_extra=handler_extra, checker_extra=checker_extra)
self.container_id = container_id
class ServerDeleteProgress(object):
def __init__(self, server_id, image_id=None, image_complete=True):
self.server_id = server_id
self.image_id = image_id
self.image_complete = image_complete
class VolumeDetachProgress(object):
def __init__(self, srv_id, vol_id, attach_id, task_complete=False):
self.called = task_complete
self.cinder_complete = task_complete
self.nova_complete = task_complete
self.srv_id = srv_id
self.vol_id = vol_id
self.attach_id = attach_id
class VolumeAttachProgress(object):
def __init__(self, srv_id, vol_id, device, task_complete=False):
self.called = task_complete
self.complete = task_complete
self.srv_id = srv_id
self.vol_id = vol_id
self.device = device
class VolumeDeleteProgress(object):
def __init__(self, task_complete=False):
self.backup = {'called': task_complete,
'complete': task_complete}
self.delete = {'called': task_complete,
'complete': task_complete}
self.backup_id = None
class VolumeResizeProgress(object):
def __init__(self, task_complete=False, size=None):
self.called = task_complete
self.complete = task_complete
self.size = size
class VolumeUpdateAccessModeProgress(object):
def __init__(self, task_complete=False, read_only=None):
self.called = task_complete
self.read_only = read_only
class VolumeBackupRestoreProgress(object):
def __init__(self, vol_id, backup_id):
self.called = False
self.complete = False
self.vol_id = vol_id
self.backup_id = backup_id
class PoolDeleteProgress(object):
def __init__(self, task_complete=False):
self.pool = {'delete_called': task_complete,
'deleted': task_complete}
self.vip = {'delete_called': task_complete,
'deleted': task_complete}
| 36.424051
| 78
| 0.686707
|
a2ec550b230b5f686a09a62d3cda4bd503507549
| 3,866
|
py
|
Python
|
examples/demo/gloo/signals.py
|
shjoshi/vispy
|
2f3d169aa60c738467e766c59096f51570483d6f
|
[
"BSD-3-Clause"
] | null | null | null |
examples/demo/gloo/signals.py
|
shjoshi/vispy
|
2f3d169aa60c738467e766c59096f51570483d6f
|
[
"BSD-3-Clause"
] | null | null | null |
examples/demo/gloo/signals.py
|
shjoshi/vispy
|
2f3d169aa60c738467e766c59096f51570483d6f
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vispy: gallery 2
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Multiple digital signals.
"""
from vispy import gloo
from vispy import app
import numpy as np
import math
m = 20
n = 25000
x = np.tile(np.linspace(-1., 1., n), m)
y = .1 * np.random.randn(m, n)
y += np.arange(m).reshape((-1, 1))
data = np.zeros(n*m, dtype=[
('a_position', np.float32, 2),
('a_color', np.float32, 3),
('a_index', np.float32, 1),
])
data['a_position'] = np.zeros((n*m, 2), dtype=np.float32)
data['a_position'][:, 0] = x
data['a_position'][:, 1] = .9*(y.ravel()/y.max()*2-1)
data['a_color'] = np.repeat(np.random.uniform(size=(m, 3), low=.5, high=.9),
n, axis=0)
data['a_index'] = np.repeat(np.arange(m), n)
VERT_SHADER = """
#version 120
attribute vec2 a_position;
attribute float a_index;
varying float v_index;
attribute vec3 a_color;
varying vec3 v_color;
uniform vec2 u_pan;
uniform vec2 u_scale;
void main() {
vec2 position_tr = u_scale * (a_position + u_pan);
gl_Position = vec4(position_tr, 0.0, 1.0);
v_color = a_color;
v_index = a_index;
}
"""
FRAG_SHADER = """
#version 120
varying vec3 v_color;
varying float v_index;
void main() {
gl_FragColor = vec4(v_color, 1.0);
if ((fract(v_index) > .00001) && (fract(v_index) < .99999))
gl_FragColor.a = 0.;
}
"""
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, close_keys='escape')
self.program = gloo.Program(VERT_SHADER, FRAG_SHADER)
self.program.bind(gloo.VertexBuffer(data))
self.program['u_pan'] = (0., 0.)
self.program['u_scale'] = (1., 1.)
def on_initialize(self, event):
gloo.set_state(clear_color=(1, 1, 1, 1), blend=True,
blend_func=('src_alpha', 'one_minus_src_alpha'))
def on_resize(self, event):
self.width, self.height = event.size
gloo.set_viewport(0, 0, self.width, self.height)
def on_draw(self, event):
gloo.clear(color=(0.0, 0.0, 0.0, 1.0))
self.program.draw('line_strip')
def _normalize(self, x_y):
x, y = x_y
w, h = float(self.width), float(self.height)
return x/(w/2.)-1., y/(h/2.)-1.
def on_mouse_move(self, event):
if event.is_dragging:
x0, y0 = self._normalize(event.press_event.pos)
x1, y1 = self._normalize(event.last_event.pos)
x, y = self._normalize(event.pos)
dx, dy = x - x1, -(y - y1)
button = event.press_event.button
pan_x, pan_y = self.program['u_pan']
scale_x, scale_y = self.program['u_scale']
if button == 1:
self.program['u_pan'] = (pan_x+dx/scale_x, pan_y+dy/scale_y)
elif button == 2:
scale_x_new, scale_y_new = (scale_x * math.exp(2.5*dx),
scale_y * math.exp(2.5*dy))
self.program['u_scale'] = (scale_x_new, scale_y_new)
self.program['u_pan'] = (pan_x -
x0 * (1./scale_x - 1./scale_x_new),
pan_y +
y0 * (1./scale_y - 1./scale_y_new))
self.update()
def on_mouse_wheel(self, event):
dx = np.sign(event.delta[1])*.05
scale_x, scale_y = self.program['u_scale']
scale_x_new, scale_y_new = (scale_x * math.exp(2.5*dx),
scale_y * math.exp(2.5*dx))
self.program['u_scale'] = (scale_x_new, scale_y_new)
self.update()
if __name__ == '__main__':
c = Canvas()
c.show()
app.run()
| 29.51145
| 77
| 0.55044
|
b2c09e1dffb7a39eee87ceb9f275da7b4fefed2a
| 2,131
|
py
|
Python
|
astroquery/xmatch/tests/test_xmatch_remote.py
|
eteq/astroquery
|
70db53f8f047a2ee3481fd3242e6b364bc1ca639
|
[
"BSD-3-Clause"
] | 1
|
2021-03-20T00:07:01.000Z
|
2021-03-20T00:07:01.000Z
|
astroquery/xmatch/tests/test_xmatch_remote.py
|
eteq/astroquery
|
70db53f8f047a2ee3481fd3242e6b364bc1ca639
|
[
"BSD-3-Clause"
] | 1
|
2016-01-15T14:46:02.000Z
|
2016-01-15T14:46:02.000Z
|
astroquery/xmatch/tests/test_xmatch_remote.py
|
hamogu/astroquery
|
9a2d1a2ecc4dbfafa6a39cf7a180bcf831a6266a
|
[
"BSD-3-Clause"
] | 1
|
2021-03-20T00:07:05.000Z
|
2021-03-20T00:07:05.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os.path
from astropy.tests.helper import pytest, remote_data
from astropy.table import Table
from astropy.units import arcsec
from ...xmatch import XMatch
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
# fixture only used here to save creating XMatch instances in each
# of the following test functions
@pytest.fixture
def xmatch():
return XMatch()
@remote_data
def test_xmatch_avail_tables(xmatch):
tables = xmatch.get_available_tables()
assert tables
# those example tables are from
# http://cdsxmatch.u-strasbg.fr/xmatch/doc/API-calls.html
assert 'II/311/wise' in tables
assert 'II/246/out' in tables
@remote_data
def test_xmatch_is_avail_table(xmatch):
assert xmatch.is_table_available('II/311/wise')
assert xmatch.is_table_available('II/246/out')
assert not xmatch.is_table_available('vizier:II/311/wise')
@remote_data
def test_xmatch_query(xmatch):
with open(os.path.join(DATA_DIR, 'posList.csv')) as pos_list:
table = xmatch.query(
cat1=pos_list, cat2='vizier:II/246/out', max_distance=5 * arcsec,
colRA1='ra', colDec1='dec')
assert isinstance(table, Table)
assert table.colnames == [
'angDist', 'ra', 'dec', '2MASS', 'RAJ2000', 'DEJ2000',
'errHalfMaj', 'errHalfMin', 'errPosAng', 'Jmag', 'Hmag', 'Kmag',
'e_Jmag', 'e_Hmag', 'e_Kmag', 'Qfl', 'Rfl', 'X', 'MeasureJD']
assert len(table) == 11
@remote_data
def test_xmatch_query_astropy_table(xmatch):
datapath = os.path.join(DATA_DIR, 'posList.csv')
input_table = Table.read(datapath, names=['ra', 'dec'],
format='ascii.csv')
table = xmatch.query(
cat1=input_table, cat2='vizier:II/246/out', max_distance=5 * arcsec)
assert isinstance(table, Table)
assert table.colnames == [
'angDist', 'ra', 'dec', '2MASS', 'RAJ2000', 'DEJ2000',
'errHalfMaj', 'errHalfMin', 'errPosAng', 'Jmag', 'Hmag', 'Kmag',
'e_Jmag', 'e_Hmag', 'e_Kmag', 'Qfl', 'Rfl', 'X', 'MeasureJD']
assert len(table) == 11
| 32.784615
| 77
| 0.666823
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.