hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace4028edfb1ead3b3cd78397759ea3dbef18600 | 796 | py | Python | Chapter2/TCP_server.py | ezequielsvelez/BlackHatPython | 43a306caffb5446b37038a4187804405a2f87dab | [
"MIT"
] | null | null | null | Chapter2/TCP_server.py | ezequielsvelez/BlackHatPython | 43a306caffb5446b37038a4187804405a2f87dab | [
"MIT"
] | null | null | null | Chapter2/TCP_server.py | ezequielsvelez/BlackHatPython | 43a306caffb5446b37038a4187804405a2f87dab | [
"MIT"
] | null | null | null | import socket
import threading
bind_ip = "0.0.0.0"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
print("[*] Listening on %s:%d" % (bind_ip, bind_port))
# this is our client-handling thread
def handle_client(client_socket):
#print out what the client sends
request = client_socket.recv(1024)
print("[*] Received: %s"% request.decode())
#send back a packet
client_socket.send("ACK!".encode())
client_socket.close()
while True:
client,addr = server.accept()
print("[*] Accept connection from %s:%d"% (addr[0],addr[1]))
#spin up our client thread to handle incoming data
client_handle = threading.Thread(target=handle_client, args=(client,))
client_handle.start() | 23.411765 | 74 | 0.69598 |
ace402d786d1981c13338822ad4e9ac838fa5007 | 257 | py | Python | scripts/fix_cc.py | joshualemmon/db-error-detection-gan | 27d49e0eb0b54b0b3a03efac8803355b2c0455bd | [
"MIT"
] | 1 | 2021-01-06T16:34:48.000Z | 2021-01-06T16:34:48.000Z | scripts/fix_cc.py | joshualemmon/db-error-detection-gan | 27d49e0eb0b54b0b3a03efac8803355b2c0455bd | [
"MIT"
] | null | null | null | scripts/fix_cc.py | joshualemmon/db-error-detection-gan | 27d49e0eb0b54b0b3a03efac8803355b2c0455bd | [
"MIT"
] | null | null | null | import pandas as pd
def main():
cc = pd.read_csv('../Data/csv/creditcard.csv')
print(cc.dtypes)
cc['is_dirty'] = 0
for col in cc.columns:
cc[col] = cc[col].astype(float)
cc.to_csv('../Data/csv/cc.csv', index=False)
if __name__ == "__main__":
main() | 23.363636 | 47 | 0.657588 |
ace402e1e61e1b2806a648b0b0b335217b55e97c | 1,016 | py | Python | awacs/aps.py | alanjjenkins/awacs | 0065e1833eae6a6070edb4ab4f180fd10b26c19a | [
"BSD-2-Clause"
] | null | null | null | awacs/aps.py | alanjjenkins/awacs | 0065e1833eae6a6070edb4ab4f180fd10b26c19a | [
"BSD-2-Clause"
] | null | null | null | awacs/aps.py | alanjjenkins/awacs | 0065e1833eae6a6070edb4ab4f180fd10b26c19a | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon Managed Service for Prometheus"
prefix = "aps"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
CreateWorkspace = Action("CreateWorkspace")
DeleteWorkspace = Action("DeleteWorkspace")
DescribeWorkspace = Action("DescribeWorkspace")
GetLabels = Action("GetLabels")
GetMetricMetadata = Action("GetMetricMetadata")
GetSeries = Action("GetSeries")
ListWorkspaces = Action("ListWorkspaces")
QueryMetrics = Action("QueryMetrics")
RemoteWrite = Action("RemoteWrite")
UpdateWorkspaceAlias = Action("UpdateWorkspaceAlias")
| 29.028571 | 88 | 0.723425 |
ace403b0a6f34ea67ccb817637f5b28bd9226f0f | 1,057 | py | Python | MRPO/examples/manual.py | hai-h-nguyen/pomdp-baselines | 629180d56641810d99653a116cca41ede65172eb | [
"MIT"
] | 40 | 2021-10-15T14:53:00.000Z | 2022-03-31T02:27:20.000Z | MRPO/examples/manual.py | hai-h-nguyen/pomdp-baselines | 629180d56641810d99653a116cca41ede65172eb | [
"MIT"
] | 1 | 2022-03-13T04:02:30.000Z | 2022-03-13T04:02:30.000Z | MRPO/examples/manual.py | hai-h-nguyen/pomdp-baselines | 629180d56641810d99653a116cca41ede65172eb | [
"MIT"
] | 5 | 2021-11-28T04:08:13.000Z | 2022-03-17T02:33:51.000Z | import argparse
import gym
from gym.utils.play import play
import sunblaze_envs
def main():
parser = argparse.ArgumentParser(description=None)
parser.add_argument("--environment", type=str, default="SunblazeBreakout-v0")
parser.add_argument("--seed", type=int, default=0)
args = parser.parse_args()
episode = {"reward": 0, "initial": True}
env = gym.make(args.environment)
env.seed(args.seed)
def reporter(obs_t, obs_tp1, action, reward, done, info):
if episode["initial"]:
episode["initial"] = False
print("Environment parameters:")
for key in sorted(env.unwrapped.parameters.keys()):
print(" {}: {}".format(key, env.unwrapped.parameters[key]))
episode["reward"] += reward
if reward != 0:
print("Reward:", episode["reward"])
if done:
print("*** GAME OVER ***")
episode["reward"] = 0
episode["initial"] = True
play(env, callback=reporter)
if __name__ == "__main__":
main()
| 26.425 | 81 | 0.597919 |
ace4050ea3968b4713213d345152a99b60be24d4 | 711 | py | Python | module/please1.py | Rep0f/ewe | 23b8794823d8c6a57bb88157157333ed186c6f87 | [
"Apache-2.0"
] | null | null | null | module/please1.py | Rep0f/ewe | 23b8794823d8c6a57bb88157157333ed186c6f87 | [
"Apache-2.0"
] | null | null | null | module/please1.py | Rep0f/ewe | 23b8794823d8c6a57bb88157157333ed186c6f87 | [
"Apache-2.0"
] | null | null | null | import os, sys
print ("\033[1;32mMasukan UserName&Password:)")
print ("\033[1;31;1mKalo Gak Tau Pm Mr. REP 081906222975")
username = 'REP'
password = 'Gans'
def restart():
ngulang = sys.executable
os.execl(ngulang, ngulang, *sys.argv)
def main():
uname = raw_input("username : ")
if uname == username:
pwd = raw_input("password : ")
if pwd == password:
print "\n\033[1;34mHello Welcome To Tools REP",
sys.exit()
else:
print "\n\033[1;36mSorry Invalid Password !!!\033[00m"
print "Back Login\n"
restart()
else:
print "\n\033[1;36mSorry Invalid Username !!!\033[00m"
print "Back Login\n"
restart()
try:
main()
except KeyboardInterrupt:
os.system('clear')
restart() | 20.314286 | 58 | 0.658228 |
ace405750407350ecc74228dfbdb2b2270afc9b2 | 206 | py | Python | Maximize It!.py | devanshsharma-bigdata/HackerRank-Solutions | 74a073d250c305f6ff2f964c20405851a2e3bc2c | [
"MIT"
] | null | null | null | Maximize It!.py | devanshsharma-bigdata/HackerRank-Solutions | 74a073d250c305f6ff2f964c20405851a2e3bc2c | [
"MIT"
] | null | null | null | Maximize It!.py | devanshsharma-bigdata/HackerRank-Solutions | 74a073d250c305f6ff2f964c20405851a2e3bc2c | [
"MIT"
] | null | null | null | K,M=input().split()
sqrt_arr=[]
for i in range(int(K)):
L = list(map(int, input().split()))
N=L[0]
L.remove(L[0])
max_L=max(L)
sqrt_arr.append(max_L**2)
print(sum(sqrt_arr)%int(M))
| 18.727273 | 39 | 0.567961 |
ace405fbd36f772b5fafa1dd72887749090f52f6 | 746 | py | Python | riffdog_aws/config.py | riffdog/riffdog_aws | 9e2819b559389e6a77e3ec745fd4add183136083 | [
"MIT"
] | 1 | 2020-02-14T14:54:55.000Z | 2020-02-14T14:54:55.000Z | riffdog_aws/config.py | riffdog/riffdog_aws | 9e2819b559389e6a77e3ec745fd4add183136083 | [
"MIT"
] | 8 | 2020-02-14T16:45:05.000Z | 2020-03-07T09:54:10.000Z | riffdog_aws/config.py | riffdog/riffdog_aws | 9e2819b559389e6a77e3ec745fd4add183136083 | [
"MIT"
] | null | null | null | import os
from riffdog.config import RDConfig
DEFAULT_REGION="us-east-1"
def add_args(parser):
group = parser.add_argument_group('AWS Resource')
group.add_argument('--aws_region', help="AWS regions to use", action='append')
def config():
config = RDConfig()
config.aws_regions = get_default_regions()
def get_default_regions():
"""
Current order of precedence
- AWS_DEFAULT_REGION overrides everything else
- region_args come next
- fall back to us-east-1 I guess
"""
env_region = os.environ.get('AWS_DEFAULT_REGION', None)
regions = []
if env_region is not None and env_region:
regions.append(env_region)
else:
regions.append(DEFAULT_REGION)
return regions | 23.3125 | 82 | 0.690349 |
ace4066ae3bb32036f7ec0ac22bd279394ff33f4 | 1,289 | py | Python | tests/components/lutron_caseta/__init__.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/lutron_caseta/__init__.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | tests/components/lutron_caseta/__init__.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Tests for the Lutron Caseta integration."""
class MockBridge:
"""Mock Lutron bridge that emulates configured connected status."""
def __init__(self, can_connect=True):
"""Initialize MockBridge instance with configured mock connectivity."""
self.can_connect = can_connect
self.is_currently_connected = False
self.buttons = {}
self.areas = {}
self.occupancy_groups = {}
self.scenes = self.get_scenes()
self.devices = self.get_devices()
async def connect(self):
"""Connect the mock bridge."""
if self.can_connect:
self.is_currently_connected = True
def is_connected(self):
"""Return whether the mock bridge is connected."""
return self.is_currently_connected
def get_devices(self):
"""Return devices on the bridge."""
return {
"1": {"serial": 1234, "name": "bridge", "model": "model", "type": "type"}
}
def get_devices_by_domain(self, domain):
"""Return devices on the bridge."""
return {}
def get_scenes(self):
"""Return scenes on the bridge."""
return {}
async def close(self):
"""Close the mock bridge connection."""
self.is_currently_connected = False
| 29.976744 | 85 | 0.608223 |
ace40701a2beab1045d0c8c6fd8197f1282772df | 43 | py | Python | graphmodels/information/cpp/__init__.py | DLunin/pygraphmodels | 4ea8ebed74f3a7d5d56af4d5f189a514aab420f9 | [
"MIT"
] | null | null | null | graphmodels/information/cpp/__init__.py | DLunin/pygraphmodels | 4ea8ebed74f3a7d5d56af4d5f189a514aab420f9 | [
"MIT"
] | null | null | null | graphmodels/information/cpp/__init__.py | DLunin/pygraphmodels | 4ea8ebed74f3a7d5d56af4d5f189a514aab420f9 | [
"MIT"
] | null | null | null | from .info_calculator import InfoCalculator | 43 | 43 | 0.906977 |
ace40847cf8c2c06cf6e22db8e51641f379d89a7 | 2,254 | py | Python | src/build/linux/install-arm-sysroot.py | jxjnjjn/chromium | 435c1d02fd1b99001dc9e1e831632c894523580d | [
"Apache-2.0"
] | 9 | 2018-09-21T05:36:12.000Z | 2021-11-15T15:14:36.000Z | src/build/linux/install-arm-sysroot.py | jxjnjjn/chromium | 435c1d02fd1b99001dc9e1e831632c894523580d | [
"Apache-2.0"
] | null | null | null | src/build/linux/install-arm-sysroot.py | jxjnjjn/chromium | 435c1d02fd1b99001dc9e1e831632c894523580d | [
"Apache-2.0"
] | 3 | 2018-11-28T14:54:13.000Z | 2020-07-02T07:36:07.000Z | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Script to install arm choot image for cross building of arm chrome on linux.
# This script can be run manually but is more often run as part of gclient
# hooks. When run from hooks this script should be a no-op on non-linux
# platforms.
# The sysroot image could be constructed from scratch based on the current
# state or precise/arm but for consistency we currently use a pre-built root
# image which was originally designed for building trusted NaCl code. The image
# will normally need to be rebuilt every time chrome's build dependancies are
# changed.
import os
import shutil
import subprocess
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
URL_PREFIX = 'https://commondatastorage.googleapis.com'
URL_PATH = 'nativeclient-archive2/toolchain'
REVISION = 10991
def main(args):
if '--linux-only' in args:
# This argument is passed when run from the gclient hooks.
# In this case we return early on non-linux platforms
# or if GYP_DEFINES doesn't include target_arch=arm
if not sys.platform.startswith('linux'):
return 0
if "target_arch=arm" not in os.environ.get('GYP_DEFINES', ''):
return 0
src_root = os.path.dirname(os.path.dirname(SCRIPT_DIR))
sysroot = os.path.join(src_root, 'arm-sysroot')
url = "%s/%s/%s/naclsdk_linux_arm-trusted.tgz" % (URL_PREFIX,
URL_PATH, REVISION)
stamp = os.path.join(sysroot, ".stamp")
if os.path.exists(stamp):
with open(stamp) as s:
if s.read() == url:
print "ARM root image already up-to-date: %s" % sysroot
return 0
print "Installing ARM root image: %s" % sysroot
if os.path.isdir(sysroot):
shutil.rmtree(sysroot)
os.mkdir(sysroot)
tarball = os.path.join(sysroot, 'naclsdk_linux_arm-trusted.tgz')
subprocess.check_call(['curl', '-L', url, '-o', tarball])
subprocess.check_call(['tar', 'xf', tarball, '-C', sysroot])
os.remove(tarball)
with open(stamp, 'w') as s:
s.write(url)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 33.147059 | 79 | 0.696096 |
ace4084c1311857388d5af42e994018219ef254b | 2,697 | py | Python | stochastic/processes/diffusion/constant_elasticity_variance.py | zaczw/stochastic | 7de6ec2f9050120adfcffeebc94bfc17ec916150 | [
"MIT"
] | 268 | 2018-01-17T18:45:20.000Z | 2022-03-28T06:05:30.000Z | stochastic/processes/diffusion/constant_elasticity_variance.py | zaczw/stochastic | 7de6ec2f9050120adfcffeebc94bfc17ec916150 | [
"MIT"
] | 42 | 2018-07-11T02:17:43.000Z | 2021-11-27T03:27:32.000Z | stochastic/processes/diffusion/constant_elasticity_variance.py | zaczw/stochastic | 7de6ec2f9050120adfcffeebc94bfc17ec916150 | [
"MIT"
] | 56 | 2018-02-20T09:32:50.000Z | 2022-02-15T15:39:37.000Z | """Constant elasticity of variance (CEV) process."""
from stochastic.processes.diffusion.diffusion import DiffusionProcess
from stochastic.utils import ensure_single_arg_constant_function
from stochastic.utils import single_arg_constant_function
from stochastic.utils.validation import check_numeric
class ConstantElasticityVarianceProcess(DiffusionProcess):
r"""Constant elasticity of variance process.
.. image:: _static/constant_elasticity_variance_process.png
:scale: 50%
The process :math:`X_t` that satisfies the following stochastic
differential equation with Wiener process :math:`W_t`:
.. math::
dX_t = \mu X_t dt + \sigma X_t^\gamma dW_t
Realizations are generated using the Euler-Maruyama method.
.. note::
Since the family of diffusion processes have parameters which
generalize to functions of ``t``, parameter attributes will be returned
as callables, even if they are initialized as constants. e.g. a
``speed`` parameter of 1 accessed from an instance attribute will return
a function which accepts a single argument and always returns 1.
:param float drift: the drift coefficient, or :math:`\mu` above
:param float vol: the volatility coefficient, or :math:`\sigma` above
:param float volexp: the volatility-price exponent, or :math:`\gamma` above
:param float t: the right hand endpoint of the time interval :math:`[0,t]`
for the process
:param numpy.random.Generator rng: a custom random number generator
"""
def __init__(self, drift=1, vol=1, volexp=1, t=1, rng=None):
super().__init__(
speed=single_arg_constant_function(-drift),
mean=single_arg_constant_function(1),
vol=single_arg_constant_function(vol),
volexp=single_arg_constant_function(volexp),
t=t,
rng=rng,
)
self.drift = drift
def __str__(self):
return "Constant elasticity of variance process with drift={m}, vol={v}, volexp={e} on [0, {t}]".format(
m=str(self.drift), v=str(self.vol), e=str(self.volexp), t=str(self.t)
)
def __repr__(self):
return "ConstantElasticityVarianceProcess(drift={d}, vol={v}, volexp={e}, t={t})".format(
v=str(self.vol), d=str(self.drift), e=str(self.volexp), t=str(self.t)
)
@property
def drift(self):
"""Drift, or Mu."""
return self._drift
@drift.setter
def drift(self, value):
check_numeric(value, "Drift coefficient.")
self._drift = ensure_single_arg_constant_function(value)
self.speed = ensure_single_arg_constant_function(-value)
| 38.528571 | 112 | 0.678532 |
ace408c780f3ec32fd6ba0ec6305f75bbbc7a187 | 409 | py | Python | mouse_message_filter.py | Dreamsorcerer/bthidhub | bcee84c39e70dbb1122c47b915729c0dfa0d3923 | [
"MIT"
] | null | null | null | mouse_message_filter.py | Dreamsorcerer/bthidhub | bcee84c39e70dbb1122c47b915729c0dfa0d3923 | [
"MIT"
] | null | null | null | mouse_message_filter.py | Dreamsorcerer/bthidhub | bcee84c39e70dbb1122c47b915729c0dfa0d3923 | [
"MIT"
] | null | null | null | # Copyright (c) 2020 ruundii. All rights reserved.
#
from typing import Optional
from hid_message_filter import HIDMessageFilter
class MouseMessageFilter(HIDMessageFilter):
def filter_message_to_host(self, msg: bytes) -> Optional[bytes]:
if len(msg) != 7:
return None
return b'\xa1\x03' + msg
def filter_message_from_host(self, msg: bytes) -> None:
return None
| 25.5625 | 68 | 0.691932 |
ace4099ded08a64959db032505b6027c7120113d | 992 | py | Python | setup.py | mykytarudenko/django_dramatiq | 181979d46dfb2063dc58504d909ed4377158e724 | [
"Apache-2.0"
] | null | null | null | setup.py | mykytarudenko/django_dramatiq | 181979d46dfb2063dc58504d909ed4377158e724 | [
"Apache-2.0"
] | null | null | null | setup.py | mykytarudenko/django_dramatiq | 181979d46dfb2063dc58504d909ed4377158e724 | [
"Apache-2.0"
] | null | null | null | import os
from setuptools import setup
def rel(*xs):
return os.path.join(os.path.abspath(os.path.dirname(__file__)), *xs)
with open(rel("django_dramatiq", "__init__.py"), "r") as f:
version_marker = "__version__ = "
for line in f:
if line.startswith(version_marker):
_, version = line.split(version_marker)
version = version.strip().strip('"')
break
else:
raise RuntimeError("Version marker not found.")
setup(
name="django_dramatiq",
version=version,
description="A Django app for Dramatiq.",
long_description="Visit https://github.com/Bogdanp/django_dramatiq for more information.",
packages=[
"django_dramatiq",
"django_dramatiq.management",
"django_dramatiq.management.commands",
"django_dramatiq.migrations",
],
install_requires=[
"django>=1.11",
"dramatiq>=0.18.0",
],
python_requires=">=3.5",
include_package_data=True,
)
| 25.435897 | 94 | 0.634073 |
ace40aaaefa5a334136724f628684ffa7ad12497 | 12,475 | py | Python | code/python/QuotesAPIforDigitalPortals/v2/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response20057_data.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 6 | 2022-02-07T16:34:18.000Z | 2022-03-30T08:04:57.000Z | code/python/QuotesAPIforDigitalPortals/v2/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response20057_data.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 2 | 2022-02-07T05:25:57.000Z | 2022-03-07T14:18:04.000Z | code/python/QuotesAPIforDigitalPortals/v2/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response20057_data.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | null | null | null | """
Prime Developer Trial
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.QuotesAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.QuotesAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.QuotesAPIforDigitalPortals.model.inline_response20055_data_validity import InlineResponse20055DataValidity
from fds.sdk.QuotesAPIforDigitalPortals.model.inline_response20057_status import InlineResponse20057Status
globals()['InlineResponse20055DataValidity'] = InlineResponse20055DataValidity
globals()['InlineResponse20057Status'] = InlineResponse20057Status
class InlineResponse20057Data(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'id': (str,), # noqa: E501
'validity': (InlineResponse20055DataValidity,), # noqa: E501
'source_wkn': (str,), # noqa: E501
'status': (InlineResponse20057Status,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'validity': 'validity', # noqa: E501
'source_wkn': 'sourceWKN', # noqa: E501
'status': 'status', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse20057Data - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): Identifier of the instrument.. [optional] # noqa: E501
validity (InlineResponse20055DataValidity): [optional] # noqa: E501
source_wkn (str): The source WKN that the instrument is translated for.. [optional] # noqa: E501
status (InlineResponse20057Status): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse20057Data - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): Identifier of the instrument.. [optional] # noqa: E501
validity (InlineResponse20055DataValidity): [optional] # noqa: E501
source_wkn (str): The source WKN that the instrument is translated for.. [optional] # noqa: E501
status (InlineResponse20057Status): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 45.199275 | 124 | 0.58517 |
ace40b2efc88876ed3db3deb347aaa3169b661de | 27,720 | py | Python | stumpy/maamp.py | vaish-muk/stumpy | 978e9ff169630c9a108917dc9b06e5c40aa5f090 | [
"BSD-3-Clause"
] | null | null | null | stumpy/maamp.py | vaish-muk/stumpy | 978e9ff169630c9a108917dc9b06e5c40aa5f090 | [
"BSD-3-Clause"
] | null | null | null | stumpy/maamp.py | vaish-muk/stumpy | 978e9ff169630c9a108917dc9b06e5c40aa5f090 | [
"BSD-3-Clause"
] | null | null | null | # STUMPY
# Copyright 2019 TD Ameritrade. Released under the terms of the 3-Clause BSD license.
# STUMPY is a trademark of TD Ameritrade IP Company, Inc. All rights reserved.
import logging
import numpy as np
from numba import njit, prange
from functools import partial
from . import core, config, mstump
logger = logging.getLogger(__name__)
def _multi_mass_absolute(Q, T, m, Q_subseq_isfinite, T_subseq_isfinite):
"""
A multi-dimensional wrapper around "Mueen's Algorithm for Similarity Search"
(MASS) absolute to compute multi-dimensional non-normalized (i.e., without
z-normalization distance profile.
Parameters
----------
Q : numpy.ndarray
Query array or subsequence
T : numpy.ndarray
Time series array or sequence
m : int
Window size
Q_subseq_isfinite : numpy.ndarray
A boolean array that indicates whether a subsequence in `Q` contains a
`np.nan`/`np.inf` value (False)
T_subseq_isfinite : numpy.ndarray
A boolean array that indicates whether a subsequence in `T` contains a
`np.nan`/`np.inf` value (False)
Returns
-------
D : numpy.ndarray
Multi-dimensional non-normalized (i.e., without z-normalization) distance
profile
"""
d, n = T.shape
k = n - m + 1
D = np.empty((d, k), dtype=np.float64)
for i in range(d):
if np.any(~Q_subseq_isfinite[i]):
D[i, :] = np.inf
else:
D[i, :] = core.mass_absolute(Q[i], T[i])
D[i][~(T_subseq_isfinite[i])] = np.inf
return D
def maamp_subspace(
T,
m,
subseq_idx,
nn_idx,
k,
include=None,
discords=False,
discretize_func=None,
n_bit=8,
):
"""
Compute the k-dimensional matrix profile subspace for a given subsequence index and
its nearest neighbor index
Parameters
----------
T : numpy.ndarray
The time series or sequence for which the multi-dimensional matrix profile,
multi-dimensional matrix profile indices were computed
m : int
Window size
subseq_idx : int
The subsequence index in T
nn_idx : int
The nearest neighbor index in T
k : int
The subset number of dimensions out of `D = T.shape[0]`-dimensions to return
the subspace for
include : numpy.ndarray, default None
A list of (zero-based) indices corresponding to the dimensions in `T` that
must be included in the constrained multidimensional motif search.
For more information, see Section IV D in:
`DOI: 10.1109/ICDM.2017.66 \
<https://www.cs.ucr.edu/~eamonn/Motif_Discovery_ICDM.pdf>`__
discords : bool, default False
When set to `True`, this reverses the distance profile to favor discords rather
than motifs. Note that indices in `include` are still maintained and respected.
discretize_func : func, default None
A function for discretizing each input array. When this is `None`, an
appropriate discretization function (based on the `normalize` parameter) will
be applied.
n_bit : int, default 8
The number of bits used for discretization. For more information on an
appropriate value, see Figure 4 in:
`DOI: 10.1109/ICDM.2016.0069 \
<https://www.cs.ucr.edu/~eamonn/PID4481999_Matrix%20Profile_III.pdf>`__
and Figure 2 in:
`DOI: 10.1109/ICDM.2011.54 \
<https://www.cs.ucr.edu/~eamonn/ICDM_mdl.pdf>`__
Returns
-------
S : numpy.ndarray
An array of that contains the `k`th-dimensional subspace for the subsequence
with index equal to `motif_idx`
"""
T = T.copy()
T = core.transpose_dataframe(T)
T = np.asarray(T)
core.check_dtype(T)
core.check_window_size(m, max_size=T.shape[-1])
subseqs, _ = core.preprocess_non_normalized(T[:, subseq_idx : subseq_idx + m], m)
neighbors, _ = core.preprocess_non_normalized(T[:, nn_idx : nn_idx + m], m)
if discretize_func is None:
T_isfinite = np.isfinite(T)
T_min = T[T_isfinite].min()
T_max = T[T_isfinite].max()
discretize_func = partial(
_maamp_discretize, a_min=T_min, a_max=T_max, n_bit=n_bit
)
disc_subseqs = discretize_func(subseqs)
disc_neighbors = discretize_func(neighbors)
D = np.linalg.norm(disc_subseqs - disc_neighbors, axis=1)
S = mstump._subspace(D, k, include=include, discords=discords)
return S
def _maamp_discretize(a, a_min, a_max, n_bit=8): # pragma: no cover
"""
Discretize each row of the input array
This distribution is best suited for non-normalized time seris data
Parameters
----------
a : numpy.ndarray
The input array
a_min : float
The minimum value
a_max : float
The maximum value
n_bit : int, default 8
The number of bits to use for computing the bit size
Returns
-------
out : numpy.ndarray
Discretized array
"""
return (
np.round(((a - a_min) / (a_max - a_min)) * ((2**n_bit) - 1.0)).astype(
np.int64
)
+ 1
)
def maamp_mdl(
T,
m,
subseq_idx,
nn_idx,
include=None,
discords=False,
discretize_func=None,
n_bit=8,
):
"""
Compute the multi-dimensional number of bits needed to compress one
multi-dimensional subsequence with another along each of the k-dimensions
using the minimum description length (MDL)
Parameters
----------
T : numpy.ndarray
The time series or sequence for which the multi-dimensional matrix profile,
multi-dimensional matrix profile indices were computed
m : int
Window size
subseq_idx : numpy.ndarray
The multi-dimensional subsequence indices in T
nn_idx : numpy.ndarray
The multi-dimensional nearest neighbor index in T
include : numpy.ndarray, default None
A list of (zero-based) indices corresponding to the dimensions in `T` that
must be included in the constrained multidimensional motif search.
For more information, see Section IV D in:
`DOI: 10.1109/ICDM.2017.66 \
<https://www.cs.ucr.edu/~eamonn/Motif_Discovery_ICDM.pdf>`__
discords : bool, default False
When set to `True`, this reverses the distance profile to favor discords rather
than motifs. Note that indices in `include` are still maintained and respected.
discretize_func : func, default None
A function for discretizing each input array. When this is `None`, an
appropriate discretization function (based on the `normalization` parameter)
will be applied.
n_bit : int, default 8
The number of bits used for discretization and for computing the bit size. For
more information on an appropriate value, see Figure 4 in:
`DOI: 10.1109/ICDM.2016.0069 \
<https://www.cs.ucr.edu/~eamonn/PID4481999_Matrix%20Profile_III.pdf>`__
and Figure 2 in:
`DOI: 10.1109/ICDM.2011.54 \
<https://www.cs.ucr.edu/~eamonn/ICDM_mdl.pdf>`__
Returns
-------
bit_sizes : numpy.ndarray
The total number of bits computed from MDL for representing each pair of
multidimensional subsequences.
S : list
A list of numpy.ndarrays that contains the `k`th-dimensional subspaces
"""
T = T.copy()
T = core.transpose_dataframe(T)
T = np.asarray(T)
core.check_dtype(T)
core.check_window_size(m, max_size=T.shape[-1])
if discretize_func is None:
T_isfinite = np.isfinite(T)
T_min = T[T_isfinite].min()
T_max = T[T_isfinite].max()
discretize_func = partial(
_maamp_discretize, a_min=T_min, a_max=T_max, n_bit=n_bit
)
bit_sizes = np.empty(T.shape[0])
S = [None] * T.shape[0]
for k in range(T.shape[0]):
subseqs, _ = core.preprocess_non_normalized(
T[:, subseq_idx[k] : subseq_idx[k] + m], m
)
neighbors, _ = core.preprocess_non_normalized(
T[:, nn_idx[k] : nn_idx[k] + m], m
)
disc_subseqs = discretize_func(subseqs)
disc_neighbors = discretize_func(neighbors)
D = np.linalg.norm(disc_subseqs - disc_neighbors, axis=1)
S[k] = mstump._subspace(D, k, include=include, discords=discords)
bit_sizes[k] = mstump._mdl(disc_subseqs, disc_neighbors, S[k], n_bit=n_bit)
return bit_sizes, S
def _maamp_multi_distance_profile(
query_idx, T_A, T_B, m, excl_zone, T_B_subseq_isfinite, include=None, discords=False
):
"""
Multi-dimensional wrapper to compute the multi-dimensional non-normalized (i.e.,
without z-normalization) distance profile for a given query window within the
times series or sequence that is denoted by the `query_idx` index. Essentially,
this is a convenience wrapper around `_multi_mass_absolute`.
Parameters
----------
query_idx : int
The window index to calculate the multi-dimensional distance profile
T_A : numpy.ndarray
The time series or sequence for which the multi-dimensional distance profile
will be returned
T_B : numpy.ndarray
The time series or sequence that contains your query subsequences
m : int
Window size
excl_zone : int
The half width for the exclusion zone relative to the `query_idx`.
T_B_subseq_isfinite : numpy.ndarray
A boolean array that indicates whether a subsequence in `T_B` contains a
`np.nan`/`np.inf` value (False)
include : numpy.ndarray, default None
A list of (zero-based) indices corresponding to the dimensions in `T` that
must be included in the constrained multidimensional motif search.
For more information, see Section IV D in:
`DOI: 10.1109/ICDM.2017.66 \
<https://www.cs.ucr.edu/~eamonn/Motif_Discovery_ICDM.pdf>`__
discords : bool, default False
When set to `True`, this reverses the distance profile to favor discords rather
than motifs. Note that indices in `include` are still maintained and respected.
Returns
-------
D : numpy.ndarray
Multi-dimensional distance profile for the window with index equal to
`query_idx`
"""
d, n = T_A.shape
k = n - m + 1
start_row_idx = 0
D = _multi_mass_absolute(
T_B[:, query_idx : query_idx + m],
T_A,
m,
T_B_subseq_isfinite[:, query_idx],
T_B_subseq_isfinite,
)
if include is not None:
mstump._apply_include(D, include)
start_row_idx = include.shape[0]
if discords:
D[start_row_idx:][::-1].sort(axis=0, kind="mergesort")
else:
D[start_row_idx:].sort(axis=0, kind="mergesort")
D_prime = np.zeros(k, dtype=np.float64)
for i in range(d):
D_prime[:] = D_prime + D[i]
D[i, :] = D_prime / (i + 1)
core.apply_exclusion_zone(D, query_idx, excl_zone, np.inf)
return D
def maamp_multi_distance_profile(query_idx, T, m, include=None, discords=False):
"""
Multi-dimensional wrapper to compute the multi-dimensional non-normalized (i.e.,
without z-normalization) distance profile for a given query window within the
times series or sequence that is denoted by the `query_idx` index.
Parameters
----------
query_idx : int
The window index to calculate the multi-dimensional distance profile
T : numpy.ndarray
The time series or sequence for which the multi-dimensional distance profile
will be returned
m : int
Window size
include : numpy.ndarray, default None
A list of (zero-based) indices corresponding to the dimensions in `T` that
must be included in the constrained multidimensional motif search.
For more information, see Section IV D in:
`DOI: 10.1109/ICDM.2017.66 \
<https://www.cs.ucr.edu/~eamonn/Motif_Discovery_ICDM.pdf>`__
discords : bool, default False
When set to `True`, this reverses the distance profile to favor discords rather
than motifs. Note that indices in `include` are still maintained and respected.
Returns
-------
D : numpy.ndarray
Multi-dimensional distance profile for the window with index equal to
`query_idx`
"""
T, T_subseq_isfinite = core.preprocess_non_normalized(T, m)
if T.ndim <= 1: # pragma: no cover
err = f"T is {T.ndim}-dimensional and must be at least 1-dimensional"
raise ValueError(f"{err}")
core.check_window_size(m, max_size=T.shape[1])
if include is not None: # pragma: no cover
include = mstump._preprocess_include(include)
excl_zone = int(
np.ceil(m / config.STUMPY_EXCL_ZONE_DENOM)
) # See Definition 3 and Figure 3
D = _maamp_multi_distance_profile(
query_idx, T, T, m, excl_zone, T_subseq_isfinite, include, discords
)
return D
def _get_first_maamp_profile(
start, T_A, T_B, m, excl_zone, T_B_subseq_isfinite, include=None, discords=False
):
"""
Multi-dimensional wrapper to compute the non-normalized (i.e., without
z-normalization multi-dimensional matrix profile and multi-dimensional matrix
profile index for a given window within the times series or sequence that is denoted
by the `start` index. Essentially, this is a convenience wrapper around
`_multi_mass_absolute`. This is a convenience wrapper for the
`_maamp_multi_distance_profile` function but does not return the multi-dimensional
matrix profile subspace.
Parameters
----------
start : int
The window index to calculate the first multi-dimensional matrix profile,
multi-dimensional matrix profile indices, and multi-dimensional subspace.
T_A : numpy.ndarray
The time series or sequence for which the multi-dimensional matrix profile,
multi-dimensional matrix profile indices, and multi-dimensional subspace will be
returned
T_B : numpy.ndarray
The time series or sequence that contains your query subsequences
m : int
Window size
excl_zone : int
The half width for the exclusion zone relative to the `start`.
T_B_subseq_isfinite : numpy.ndarray
A boolean array that indicates whether a subsequence in `Q` contains a
`np.nan`/`np.inf` value (False)
include : numpy.ndarray, default None
A list of (zero-based) indices corresponding to the dimensions in `T` that
must be included in the constrained multidimensional motif search.
For more information, see Section IV D in:
`DOI: 10.1109/ICDM.2017.66 \
<https://www.cs.ucr.edu/~eamonn/Motif_Discovery_ICDM.pdf>`__
discords : bool, default False
When set to `True`, this reverses the distance profile to favor discords rather
than motifs. Note that indices in `include` are still maintained and respected.
Returns
-------
P : numpy.ndarray
Multi-dimensional matrix profile for the window with index equal to
`start`
I : numpy.ndarray
Multi-dimensional matrix profile indices for the window with index
equal to `start`
"""
D = _maamp_multi_distance_profile(
start, T_A, T_B, m, excl_zone, T_B_subseq_isfinite, include, discords
)
d = T_A.shape[0]
P = np.full(d, np.inf, dtype=np.float64)
I = np.full(d, -1, dtype=np.int64)
for i in range(d):
min_index = np.argmin(D[i])
I[i] = min_index
P[i] = D[i, min_index]
if np.isinf(P[i]): # pragma nocover
I[i] = -1
return P, I
@njit(
# "(i8, i8, i8, f8[:, :], f8[:, :], i8, i8, b1[:, :], b1[:, :], f8[:, :],"
# "f8[:, :], f8[:, :], f8[:, :], f8[:, :])",
parallel=True,
fastmath=True,
)
def _compute_multi_D(
d,
k,
idx,
D,
T,
m,
excl_zone,
T_A_subseq_isfinite,
T_B_subseq_isfinite,
T_A_subseq_squared,
T_B_subseq_squared,
QT_even,
QT_odd,
QT_first,
):
"""
A Numba JIT-compiled version of non-normalized (i.e., without z-normalization)
mSTOMP for parallel computation of the multi-dimensional distance profile
Parameters
----------
d : int
The total number of dimensions in `T`
k : int
The total number of sliding windows to iterate over
idx : int
The row index in `T`
D : numpy.ndarray
The output distance profile
T : numpy.ndarray
The time series or sequence for which to compute the matrix profile
m : int
Window size
excl_zone : int
The half width for the exclusion zone relative to the current
sliding window
T_A_subseq_isfinite : numpy.ndarray
A boolean array that indicates whether a subsequence in `T_A` contains a
`np.nan`/`np.inf` value (False)
T_B_subseq_isfinite : numpy.ndarray
A boolean array that indicates whether a subsequence in `T_B` contains a
`np.nan`/`np.inf` value (False)
T_A_subseq_squared : numpy.ndarray
The rolling sum for `T_A * T_A`
T_B_subseq_squared : numpy.ndarray
The rolling sum for `T_B * T_B`
QT_even : numpy.ndarray
Dot product between some query sequence,`Q`, and time series, `T`
QT_odd : numpy.ndarray
Dot product between some query sequence,`Q`, and time series, `T`
QT_first : numpy.ndarray
QT for the first window relative to the current sliding window
Notes
-----
`DOI: 10.1109/ICDM.2017.66 \
<https://www.cs.ucr.edu/~eamonn/Motif_Discovery_ICDM.pdf>`__
See mSTAMP Algorithm
"""
for i in range(d):
# Numba's prange requires incrementing a range by 1 so replace
# `for j in range(k-1,0,-1)` with its incrementing compliment
for rev_j in prange(1, k):
j = k - rev_j
# GPU Stomp Parallel Implementation with Numba
# DOI: 10.1109/ICDM.2016.0085
# See Figure 5
if idx % 2 == 0:
# Even
QT_even[i, j] = (
QT_odd[i, j - 1]
- T[i, idx - 1] * T[i, j - 1]
+ T[i, idx + m - 1] * T[i, j + m - 1]
)
else:
# Odd
QT_odd[i, j] = (
QT_even[i, j - 1]
- T[i, idx - 1] * T[i, j - 1]
+ T[i, idx + m - 1] * T[i, j + m - 1]
)
if idx % 2 == 0:
QT_even[i, 0] = QT_first[i, idx]
if not T_B_subseq_isfinite[i, idx]:
D[i] = np.inf
else:
D[i] = (
T_B_subseq_squared[i, idx]
+ T_A_subseq_squared[i]
- 2.0 * QT_even[i]
)
else:
QT_odd[i, 0] = QT_first[i, idx]
if not T_B_subseq_isfinite[i, idx]:
D[i] = np.inf
else:
D[i] = (
T_B_subseq_squared[i, idx] + T_A_subseq_squared[i] - 2.0 * QT_odd[i]
)
D[i][~(T_A_subseq_isfinite[i])] = np.inf
D[i][D[i] < config.STUMPY_D_SQUARED_THRESHOLD] = 0
core._apply_exclusion_zone(D, idx, excl_zone, np.inf)
def _maamp(
T,
m,
range_stop,
excl_zone,
T_A_subseq_isfinite,
T_B_subseq_isfinite,
T_A_subseq_squared,
T_B_subseq_squared,
QT,
QT_first,
k,
range_start=1,
include=None,
discords=False,
):
"""
A Numba JIT-compiled version of non-normailzed (i.e., without z-normalization)
mSTOMP, a variant of mSTAMP, for parallel computation of the multi-dimensional
matrix profile and multi-dimensional matrix profile indices. Note that only
self-joins are supported.
Parameters
----------
T: numpy.ndarray
The time series or sequence for which to compute the multi-dimensional
matrix profile
m : int
Window size
range_stop : int
The index value along T for which to stop the matrix profile
calculation. This parameter is here for consistency with the
distributed `mstumped` algorithm.
excl_zone : int
The half width for the exclusion zone relative to the current
sliding window
T_A_subseq_isfinite : numpy.ndarray
A boolean array that indicates whether a subsequence in `T_A` contains a
`np.nan`/`np.inf` value (False)
T_B_subseq_isfinite : numpy.ndarray
A boolean array that indicates whether a subsequence in `T_B` contains a
`np.nan`/`np.inf` value (False)
T_A_subseq_squared : numpy.ndarray
The rolling sum for `T_A * T_A`
T_B_subseq_squared : numpy.ndarray
The rolling sum for `T_B * T_B`
QT : numpy.ndarray
Dot product between some query sequence,`Q`, and time series, `T`
QT_first : numpy.ndarray
QT for the first window relative to the current sliding window
k : int
The total number of sliding windows to iterate over
range_start : int, default 1
The starting index value along T_B for which to start the matrix
profile calculation. Default is 1.
include : numpy.ndarray, default None
A list of (zero-based) indices corresponding to the dimensions in `T` that
must be included in the constrained multidimensional motif search.
For more information, see Section IV D in:
`DOI: 10.1109/ICDM.2017.66 \
<https://www.cs.ucr.edu/~eamonn/Motif_Discovery_ICDM.pdf>`__
discords : bool, default False
When set to `True`, this reverses the distance profile to favor discords rather
than motifs. Note that indices in `include` are still maintained and respected.
Returns
-------
P : numpy.ndarray
The multi-dimensional matrix profile. Each row of the array corresponds
to each matrix profile for a given dimension (i.e., the first row is the
1-D matrix profile and the second row is the 2-D matrix profile).
I : numpy.ndarray
The multi-dimensional matrix profile index where each row of the array
corresponds to each matrix profile index for a given dimension.
Notes
-----
`DOI: 10.1109/ICDM.2017.66 \
<https://www.cs.ucr.edu/~eamonn/Motif_Discovery_ICDM.pdf>`__
See mSTAMP Algorithm
"""
QT_odd = QT.copy()
QT_even = QT.copy()
d = T.shape[0]
P = np.empty((d, range_stop - range_start), dtype=np.float64)
I = np.empty((d, range_stop - range_start), dtype=np.int64)
D = np.empty((d, k), dtype=np.float64)
D_prime = np.empty(k, dtype=np.float64)
start_row_idx = 0
if include is not None:
tmp_swap = np.empty((include.shape[0], k), dtype=np.float64)
restricted_indices = include[include < include.shape[0]]
unrestricted_indices = include[include >= include.shape[0]]
mask = np.ones(include.shape[0], dtype=bool)
mask[restricted_indices] = False
for idx in range(range_start, range_stop):
_compute_multi_D(
d,
k,
idx,
D,
T,
m,
excl_zone,
T_A_subseq_isfinite,
T_B_subseq_isfinite,
T_A_subseq_squared,
T_B_subseq_squared,
QT_even,
QT_odd,
QT_first,
)
# `include` processing must occur here since we are dealing with distances
if include is not None:
mstump._apply_include(
D,
include,
restricted_indices,
unrestricted_indices,
mask,
tmp_swap,
)
start_row_idx = include.shape[0]
if discords:
D[start_row_idx:][::-1].sort(axis=0)
else:
D[start_row_idx:].sort(axis=0)
mstump._compute_PI(d, idx, D, D_prime, range_start, P, I)
return P, I
def maamp(T, m, include=None, discords=False):
"""
Compute the multi-dimensional non-normalized (i.e., without z-normalization) matrix
profile
This is a convenience wrapper around the Numba JIT-compiled parallelized
`_maamp` function which computes the multi-dimensional matrix profile and
multi-dimensional matrix profile index according to mSTOMP, a variant of
mSTAMP. Note that only self-joins are supported.
Parameters
----------
T : numpy.ndarray
The time series or sequence for which to compute the multi-dimensional
matrix profile. Each row in `T` represents data from a different
dimension while each column in `T` represents data from the same
dimension.
m : int
Window size
include : list, numpy.ndarray, default None
A list of (zero-based) indices corresponding to the dimensions in `T` that
must be included in the constrained multidimensional motif search.
For more information, see Section IV D in:
`DOI: 10.1109/ICDM.2017.66 \
<https://www.cs.ucr.edu/~eamonn/Motif_Discovery_ICDM.pdf>`__
discords : bool, default False
When set to `True`, this reverses the distance matrix which results in a
multi-dimensional matrix profile that favors larger matrix profile values
(i.e., discords) rather than smaller values (i.e., motifs). Note that indices
in `include` are still maintained and respected.
Returns
-------
P : numpy.ndarray
The multi-dimensional matrix profile. Each row of the array corresponds
to each matrix profile for a given dimension (i.e., the first row is
the 1-D matrix profile and the second row is the 2-D matrix profile).
I : numpy.ndarray
The multi-dimensional matrix profile index where each row of the array
corresponds to each matrix profile index for a given dimension.
Notes
-----
`DOI: 10.1109/ICDM.2017.66 \
<https://www.cs.ucr.edu/~eamonn/Motif_Discovery_ICDM.pdf>`__
See mSTAMP Algorithm
"""
T_A = T
T_B = T_A
T_A, T_A_subseq_isfinite = core.preprocess_non_normalized(T_A, m)
T_B, T_B_subseq_isfinite = core.preprocess_non_normalized(T_B, m)
T_A_subseq_squared = np.sum(core.rolling_window(T_A * T_A, m), axis=2)
T_B_subseq_squared = np.sum(core.rolling_window(T_B * T_B, m), axis=2)
if T_A.ndim <= 1: # pragma: no cover
err = f"T is {T_A.ndim}-dimensional and must be at least 1-dimensional"
raise ValueError(f"{err}")
core.check_window_size(m, max_size=min(T_A.shape[1], T_B.shape[1]))
if include is not None:
include = mstump._preprocess_include(include)
d, n = T_B.shape
k = n - m + 1
excl_zone = int(
np.ceil(m / config.STUMPY_EXCL_ZONE_DENOM)
) # See Definition 3 and Figure 3
P = np.empty((d, k), dtype=np.float64)
I = np.empty((d, k), dtype=np.int64)
start = 0
stop = k
P[:, start], I[:, start] = _get_first_maamp_profile(
start,
T_A,
T_B,
m,
excl_zone,
T_B_subseq_isfinite,
include,
discords,
)
QT, QT_first = mstump._get_multi_QT(start, T_A, m)
P[:, start + 1 : stop], I[:, start + 1 : stop] = _maamp(
T_A,
m,
stop,
excl_zone,
T_A_subseq_isfinite,
T_B_subseq_isfinite,
T_A_subseq_squared,
T_B_subseq_squared,
QT,
QT_first,
k,
start + 1,
include,
discords,
)
return P, I
| 30.697674 | 88 | 0.628175 |
ace40c57af174a123137eba3ac097d14d667e2e5 | 3,889 | py | Python | python/decord/video_loader.py | TheTimmy/decord | 67bfd14e4ae50b2751e7b92b73c6d73df35cbdaf | [
"Apache-2.0"
] | 762 | 2020-01-16T02:44:50.000Z | 2022-03-30T10:03:36.000Z | python/decord/video_loader.py | TheTimmy/decord | 67bfd14e4ae50b2751e7b92b73c6d73df35cbdaf | [
"Apache-2.0"
] | 161 | 2020-01-20T07:47:38.000Z | 2022-03-11T15:19:10.000Z | python/decord/video_loader.py | TheTimmy/decord | 67bfd14e4ae50b2751e7b92b73c6d73df35cbdaf | [
"Apache-2.0"
] | 77 | 2020-01-23T17:47:20.000Z | 2022-03-28T10:12:19.000Z | """Video Loader."""
from __future__ import absolute_import
import ctypes
import numpy as np
from ._ffi.base import c_array, c_str
from ._ffi.function import _init_api
from .base import DECORDError
from . import ndarray as _nd
from .ndarray import DECORDContext
from .bridge import bridge_out
VideoLoaderHandle = ctypes.c_void_p
class VideoLoader(object):
"""Multiple video loader with advanced shuffling and batching methods.
Parameters
----------
uris : list of str
List of video paths.
ctx : decord.Context or list of Context
The context to decode the video file, can be decord.cpu() or decord.gpu().
If ctx is a list, videos will be evenly split over many ctxs.
shape : tuple
Returned shape of the batch images, e.g., (2, 320, 240, 3) as (Batch, H, W, 3)
interval : int
Intra-batch frame interval.
skip : int
Inter-batch frame interval.
shuffle : int
Shuffling strategy. Can be
`0`: all sequential, no seeking, following initial filename order
`1`: random filename order, no random access for each video, very efficient
`2`: random order
`3`: random frame access in each video only.
"""
def __init__(self, uris, ctx, shape, interval, skip, shuffle, prefetch=0):
self._handle = None
assert isinstance(uris, (list, tuple))
assert (len(uris) > 0)
uri = ','.join([x.strip() for x in uris])
if isinstance(ctx, DECORDContext):
ctx = [ctx]
for _ctx in ctx:
assert isinstance(_ctx, DECORDContext)
device_types = _nd.array([x.device_type for x in ctx])
device_ids = _nd.array([x.device_id for x in ctx])
assert isinstance(shape, (list, tuple))
assert len(shape) == 4, "expected shape: [bs, height, width, 3], given {}".format(shape)
self._handle = _CAPI_VideoLoaderGetVideoLoader(
uri, device_types, device_ids, shape[0], shape[1], shape[2], shape[3], interval, skip, shuffle, prefetch)
assert self._handle is not None
self._len = _CAPI_VideoLoaderLength(self._handle)
self._curr = 0
def __del__(self):
if self._handle:
_CAPI_VideoLoaderFree(self._handle)
def __len__(self):
"""Get number of batches in each epoch.
Returns
-------
int
number of batches in each epoch.
"""
return self._len
def reset(self):
"""Reset loader for next epoch.
"""
assert self._handle is not None
self._curr = 0
_CAPI_VideoLoaderReset(self._handle)
def __next__(self):
"""Get the next batch.
Returns
-------
ndarray, ndarray
Frame data and corresponding indices in videos.
Indices are [(n0, k0), (n1, k1)...] where n0 is the index of video, k0 is the index
of frame in video n0.
"""
assert self._handle is not None
# avoid calling CAPI HasNext
if self._curr >= self._len:
raise StopIteration
_CAPI_VideoLoaderNext(self._handle)
data = _CAPI_VideoLoaderNextData(self._handle)
indices = _CAPI_VideoLoaderNextIndices(self._handle)
self._curr += 1
return bridge_out(data), bridge_out(indices)
def next(self):
"""Alias of __next__ for python2.
"""
return self.__next__()
def __iter__(self):
assert self._handle is not None
# if (self._curr >= self._len):
# self.reset()
# else:
# err_msg = "Call __iter__ of VideoLoader during previous iteration is forbidden. \
# Consider using cached iterator by 'vl = iter(video_loader)' and reuse it."
# raise RuntimeError(err_msg)
return self
_init_api("decord.video_loader")
| 31.877049 | 117 | 0.613782 |
ace40c8175a1d458fd42e8e12bf0f19dbddaafe6 | 7,982 | py | Python | neurokernel/tools/mpi_run.py | mreitm/neurokernel | 8195a500ba1127f719e963465af9f43d6019b884 | [
"BSD-3-Clause"
] | 1 | 2018-12-23T08:48:25.000Z | 2018-12-23T08:48:25.000Z | neurokernel/tools/mpi_run.py | mreitm/neurokernel | 8195a500ba1127f719e963465af9f43d6019b884 | [
"BSD-3-Clause"
] | null | null | null | neurokernel/tools/mpi_run.py | mreitm/neurokernel | 8195a500ba1127f719e963465af9f43d6019b884 | [
"BSD-3-Clause"
] | null | null | null | import tempfile
import os
import inspect
import subprocess
import dill
import re
from neurokernel.mixins import LoggerMixin
def mpi_run(func, targets=None, delete_tempfile=True, log=False,
log_screen=False, log_file_name='neurokernel.log'):
"""
Run a function with mpiexec.
Implemented as a fix to 'import neurokernel.mpi_relaunch', which does not
work within notebooks. Writes the source code for a function to a temporary
file and then runs the temporary file using mpiexec. Returns the stdout of
from the function along with a string indicating whether or not the function
executed properly.
Parameters
----------
func : function or str
Function to be executed with mpiexec. All imports and variables used
must be imported or defined within the function. func can either be a callable
function or code that represents a valid function.
targets : list
Dependencies of the manager, such as child classes of the Module class
from neurokernel.core_gpu or neurokernel.core.
delete_tempfile : bool
Whether or not to delete temporary file once func is executed.
log : boolean
Whether or not to connect to logger for func if logger exists.
log_screen : bool
Whether or not to send log messages to the screen.
log_file_name : str
File to send log messages to.
Returns
-------
output : str
The stdout from the function run with mpiexec cast to a string.
Usage
-----
Does not seem to work with openmpi version 2
func should not import neurokernel.mpi_relaunch
All modules and variables used must be imported or defined within func
Returns the stdout from the function run under 'mpiexec -np 1 python {tmp_file_name}'
"""
l = LoggerMixin("mpi_run()",log_on=log)
if callable(func):
func_text = inspect.getsource(func)
# Make a feeble attempt at fixing indentation. Will work for a nested function
# that takes no args, not a member function that expects (self) or a class
func_text = "\n" + re.sub(r"(^\s+)def ","def ",func_text) + "\n"
func_name = func.__name__
else:
func_text = "\n" + func + "\n"
func_name = re.search('def *(.*)\(\):', func_text).group(1)
target_text = "\n"
if targets:
for t in targets:
target_text += "\n" + inspect.getsource(t) + "\n"
main_code = "\n"
main_code += "\nif __name__ == \"__main__\":"
main_code += "\n import neurokernel.mpi as mpi"
main_code += "\n from neurokernel.mixins import LoggerMixin"
main_code += "\n from mpi4py import MPI"
if log:
main_code += "\n mpi.setup_logger(screen=%s, file_name=\"%s\"," % (log_screen, log_file_name)
main_code += "\n mpi_comm=MPI.COMM_WORLD, multiline=True)"
main_code += "\n l = LoggerMixin(\"%s\",%s)" % (func_name,str(log))
main_code += "\n try:"
main_code += "\n %s()" % func_name
main_code += "\n print(\"MPI_RUN_SUCCESS: %s\")" % func_name
main_code += "\n l.log_info(\"MPI_RUN_SUCCESS: %s\")" % func_name
main_code += "\n except Exception as e:"
main_code += "\n print(\"MPI_RUN_FAILURE: %s\")" % func_name
main_code += "\n l.log_error(\"MPI_RUN_FAILURE: %s\")" % func_name
main_code += "\n print(e)"
main_code += "\n"
try:
from mpi4py import MPI
#Write code for the function to a temp file
temp = tempfile.NamedTemporaryFile(delete = delete_tempfile)
temp.write(target_text)
temp.write(func_text)
temp.write(main_code)
temp.flush()
#Execute the code
#There's a bug in Open MPI v2 that prevents running this with mpiexec. Running 'from mpi4py import MPI'
#does a basic mpi_relaunch which will work for the notebook code, but you give up some of the features
#of mpiexec.
if MPI.Get_library_version().startswith("Open MPI v2"):
command = ["python", temp.name]
else:
command = ["mpiexec", "-np", "1", "python", temp.name]
# Prevent SLURM from preventing mpiexec from starting multiple processes
env = os.environ.copy()
for k in env.keys():
if k.startswith("SLURM"):
del env[k]
l.log_info("Calling: " + " ".join(command))
out = subprocess.check_output(command, env = env)
except Exception as e:
l.log_error(str(e))
raise
finally:
#Closing the temp file closes and deletes it
temp.close()
#Return the output
if "MPI_RUN_FAILURE" in out:
raise RuntimeError(out)
return str(out)
def mpi_run_manager(man, steps, targets=None, delete_tempfile=True, log=False,
log_screen=False, log_file_name='neurokernel.log'):
"""
Run the manager with mpiexec.
Implemented as a fix to 'import neurokernel.mpi_relaunch', which does not work
in notebooks. Serializes the manager and sends it to a temporary file, then
sends a function to mpi_run, which loads the manager in an mpiexec process and
runs it using the common set of commands:
man.spawn()
man.start(steps = {Number of steps})
man.wait()
Returns the stdout of from the manager along with a string indicating whether
or not the manager ran properly.
Parameters
----------
man : neurokernel.core_gpu.Manager or neurokernel.core.Manager
The Neurokernel manager to be run.
steps : int
Number of steps to run the manager for.
targets : list
Dependencies of the manager, such as child classes of the Module class
from neurokernel.core_gpu or neurokernel.core.
delete_tempfile : bool
Whether or not to delete temporary file once the manager is executed.
log : boolean
Whether or not to connect to logger for manager if logger exists.
log_screen : bool
Whether or not to send log messages to the screen.
log_file_name : str
File to send log messages to.
Returns
-------
output : str
The stdout from the manager run with mpiexec cast to a string.
Usage
-----
Returns the stdout from the manager
"""
l = LoggerMixin("mpi_run_manager()",log_on=log)
#Write a function that loads and runs the Manager
func_code = "\ndef MPI_Function():"
func_code += "\n import dill"
func_code += "\n f = open(\"%s\",\"rb\")"
func_code += "\n man = dill.load(f)"
func_code += "\n man.spawn()"
func_code += "\n man.start(steps=%i)"
func_code += "\n man.wait()"
try:
#Store the Manager in a temporary file
temp = tempfile.NamedTemporaryFile(delete = delete_tempfile)
dill.dump(man, temp)
temp.flush()
#Run the function using mpiexec
out = mpi_run(func_code % (temp.name,steps), targets,
delete_tempfile=delete_tempfile, log=log,
log_screen=log_screen, log_file_name=log_file_name)
except Exception as e:
l.log_error(str(e))
raise
finally:
#Closing the temp file closes and deletes it
temp.close()
#Return the output
return str(out)
#Basic sanity checks
if __name__ == "__main__":
from tools.logging import setup_logger
setup_logger(screen=True, file_name='neurokernel.log', multiline=True)
def _test_success():
print("HELLO WORLD")
def _test_fail():
open("RANDOM_FILE", "r").read()
print("This should succeed:")
print(mpi_run(_test_success))
print("This should also succeed:")
code = "\ndef func():"
code += "\n print(\"HELLO AGAIN\")"
print(mpi_run(code))
print("This should fail:")
print(mpi_run(_test_fail))
| 34.257511 | 112 | 0.626911 |
ace40dfe84b2ea698885555eb795d66905f2dd01 | 38,246 | py | Python | build/lib/geoplotlib/layers.py | shuqiz/geoplotlibpython3 | 63346c32639cc8783911997e3c7dd748d9c0cdcc | [
"MIT"
] | null | null | null | build/lib/geoplotlib/layers.py | shuqiz/geoplotlibpython3 | 63346c32639cc8783911997e3c7dd748d9c0cdcc | [
"MIT"
] | null | null | null | build/lib/geoplotlib/layers.py | shuqiz/geoplotlibpython3 | 63346c32639cc8783911997e3c7dd748d9c0cdcc | [
"MIT"
] | null | null | null | from collections import defaultdict
from math import log10, log
from threading import Thread
import threading
import math
import pyglet
import numpy as np
import geoplotlib.colors as colors
from geoplotlib.core import BatchPainter,FONT_NAME
from geoplotlib.utils import BoundingBox
import queue
from inspect import isfunction
import json
class HotspotManager():
def __init__(self):
self.rects = []
self.poly = []
# adapted from:
# http://stackoverflow.com/questions/16625507/python-checking-if-point-is-inside-a-polygon
@staticmethod
def point_in_poly(x, y, bbox, poly):
left, top, right, bottom = bbox
if x < left or x > right or y < top or y > bottom:
return False
n = len(poly)
is_inside = False
x1,y1 = poly[0]
for i in range(n+1):
x2,y2 = poly[i % n]
if y > min(y1,y2):
if y <= max(y1,y2):
if x <= max(x1,x2):
if y1 != y2:
xints = (y-y1)*(x2-x1)/(y2-y1)+x1
if x1 == x2 or x <= xints:
is_inside = not is_inside
x1,y1 = x2,y2
return is_inside
def add_rect(self, x, y, w, h, value):
self.rects.append(((x, y, w, h), value))
def add_poly(self, x, y, value):
bbox = (x.min(), y.min(), x.max(), y.max())
self.poly.append((list(zip(x,y)), bbox, value))
def pick(self, mouse_x, mouse_y):
for (x, y, w, h), value in self.rects:
if (x <= mouse_x <= x + w) and (y <= mouse_y <= y + h):
return value
for points, bbox, value in self.poly:
if HotspotManager.point_in_poly(mouse_x, mouse_y, bbox, points):
return value
return None
class BaseLayer():
"""
Base class for layers
"""
def invalidate(self, proj):
"""
This method is called each time layers need to be redrawn, i.e. on zoom.
Typically in this method a BatchPainter is instantiated and all the rendering is performed
:param proj: the current Projector object
"""
pass
def draw(self, proj, mouse_x, mouse_y, ui_manager):
"""
This method is called at every frame, and typically executes BatchPainter.batch_draw()
:param proj: the current Projector object
:param mouse_x: mouse x
:param mouse_y: mouse y
:param ui_manager: the current UiManager
"""
pass
def bbox(self):
"""
Return the bounding box for this layer
"""
return BoundingBox.WORLD
def on_key_release(self, key, modifiers):
"""
Override this method for custom handling of keystrokes
:param key: the key that has been released
:param modifiers: the key modifiers
:return: True if the layer needs to call invalidate
"""
return False
class DotDensityLayer(BaseLayer):
def __init__(self, data, color=None, point_size=2, f_tooltip=None):
"""Create a dot density map
:param data: data access object
:param color: color
:param point_size: point size
:param f_tooltip: function to return a tooltip string for a point
"""
self.data = data
self.color = color
if self.color is None:
self.color = [255,0,0]
self.point_size = point_size
self.f_tooltip = f_tooltip
self.hotspots = HotspotManager()
def invalidate(self, proj):
self.painter = BatchPainter()
x, y = proj.lonlat_to_screen(self.data['lon'], self.data['lat'])
if self.f_tooltip:
for i in range(0, len(x)):
record = {k: self.data[k][i] for k in list(self.data.keys())}
self.hotspots.add_rect(x[i] - self.point_size, y[i] - self.point_size,
2*self.point_size, 2*self.point_size,
self.f_tooltip(record))
self.painter.set_color(self.color)
self.painter.points(x, y, 2*self.point_size, False)
def draw(self, proj, mouse_x, mouse_y, ui_manager):
self.painter.batch_draw()
picked = self.hotspots.pick(mouse_x, mouse_y)
if picked:
ui_manager.tooltip(picked)
def bbox(self):
return BoundingBox.from_points(lons=self.data['lon'], lats=self.data['lat'])
class HistogramLayer(BaseLayer):
def __init__(self, data, cmap='hot', alpha=220, colorscale='sqrt', binsize=16,
show_tooltip=False, scalemin=0, scalemax=None, f_group=None, show_colorbar=True):
"""Create a 2D histogram
:param data: data access object
:param cmap: colormap name
:param alpha: color alpha
:param colorscale: scaling [lin, log, sqrt]
:param binsize: size of the hist bins
:param show_tooltip: if True, will show the value of bins on mouseover
:param scalemin: min value for displaying a bin
:param scalemax: max value for a bin
:param f_group: function to apply to samples in the same bin. Default is to count
:param show_colorbar: show colorbar
:return:
"""
self.data = data
self.cmap = colors.ColorMap(cmap, alpha=alpha)
self.binsize = binsize
self.show_tooltip = show_tooltip
self.scalemin = scalemin
self.scalemax = scalemax
self.colorscale = colorscale
self.f_group = f_group
if self.f_group is None:
self.f_group = lambda grp: len(grp)
self.show_colorbar = show_colorbar
def invalidate(self, proj):
self.painter = BatchPainter()
x, y = proj.lonlat_to_screen(self.data['lon'], self.data['lat'])
self.data['_xbin'] = (x / self.binsize).astype(int)
self.data['_ybin'] = (y / self.binsize).astype(int)
uniquevalues = set([tuple(row) for row in np.vstack([self.data['_xbin'],self.data['_ybin']]).T])
results = {(v1,v2): self.f_group(self.data.where((self.data['_xbin'] == v1) & (self.data['_ybin'] == v2))) \
for v1, v2 in uniquevalues}
del self.data['_xbin']
del self.data['_ybin']
self.hotspot = HotspotManager()
if self.scalemax:
self.vmax = self.scalemax
else:
self.vmax = max(results.values()) if len(results) > 0 else 0
if self.vmax >= 1:
for (ix, iy), value in list(results.items()):
if value > self.scalemin:
self.painter.set_color(self.cmap.to_color(value, self.vmax, self.colorscale))
l = self.binsize
rx = ix * self.binsize
ry = iy * self.binsize
self.painter.rect(rx, ry, rx+l, ry+l)
if self.show_tooltip:
self.hotspot.add_rect(rx, ry, l, l, 'Value: %d' % value)
def draw(self, proj, mouse_x, mouse_y, ui_manager):
self.painter.batch_draw()
picked = self.hotspot.pick(mouse_x, mouse_y)
if picked:
ui_manager.tooltip(picked)
if self.show_colorbar:
ui_manager.add_colorbar(self.cmap, self.vmax, self.colorscale)
def bbox(self):
return BoundingBox.from_points(lons=self.data['lon'], lats=self.data['lat'])
class GraphLayer(BaseLayer):
def __init__(self, data, src_lat, src_lon, dest_lat, dest_lon,
linewidth=1, alpha=220, color='hot',levels=10,
color_by = None, seg_scale='log'):
"""Create a graph drawing a line between each pair of (src_lat, src_lon) and (dest_lat, dest_lon)
:param data: data access object
:param src_lat: field name of source latitude
:param src_lon: field name of source longitude
:param dest_lat: field name of destination latitude
:param dest_lon: field name of destination longitude
:param linewidth: line width
:param alpha: color alpha
:param color: color or colormap
:param levels: coloring levels
:param color_by: attribute name for color, default using node distance
:param seg_scale: coloring data segamentation sacle, 'log' or 'lin',
'lin' only used if not by distance
"""
self.data = data
self.src_lon = src_lon
self.src_lat = src_lat
self.dest_lon = dest_lon
self.dest_lat = dest_lat
self.linewidth = linewidth
alpha = alpha
self.color = color
if type(self.color) == str:
self.cmap = colors.ColorMap(self.color, alpha, levels = levels)
if color_by is None:
self.color_by = 'distance'
self.seg_scale = seg_scale
def invalidate(self, proj):
self.painter = BatchPainter()
x0, y0 = proj.lonlat_to_screen(self.data[self.src_lon], self.data[self.src_lat])
x1, y1 = proj.lonlat_to_screen(self.data[self.dest_lon], self.data[self.dest_lat])
if type(self.color) == list:
self.painter.set_color(self.color)
self.painter.lines(x0, y0, x1, y1, width=self.linewidth)
else:
if self.color_by == 'distance':
manhattan = np.abs(x0-x1) + np.abs(y0-y1)
vmax = manhattan.max()
segmentations = np.logspace(0, log10(vmax), 20)
self.seg_scale = 'log'
else:
manhattan = self.data[self.color_by]
vmax = manhattan.max()
if self.seg_scale == 'log':
# value 20 maybe should be optional
segmentations = np.logspace(0, log10(vmax), 20)
else:
# linear
segmentations = np.linspace(0, vmax ,20)
for i in range(len(segmentations)-1, 1, -1):
mask = (manhattan > segmentations[i-1]) & (manhattan <= segmentations[i])
self.painter.set_color(self.cmap.to_color(segmentations[i], vmax, self.seg_scale))
self.painter.lines(x0[mask], y0[mask], x1[mask], y1[mask], width=self.linewidth)
def draw(self, proj, mouse_x, mouse_y, ui_manager):
self.painter.batch_draw()
def bbox(self):
return BoundingBox.from_points(lons=np.hstack([self.data[self.src_lon], self.data[self.dest_lon]]),
lats=np.hstack([self.data[self.src_lat], self.data[self.dest_lat]]))
class ShapefileLayer(BaseLayer):
def __init__(self, fname, f_tooltip=None, color=None, linewidth=3, shape_type='full', encoding='utf-8', encodingErrors='strict'):
"""
Loads and draws shapefiles
:param fname: full path to the shapefile
:param f_tooltip: function to generate a tooltip on mouseover
:param color: color
:param linewidth: line width
:param shape_type: either full or bbox
"""
if color is None:
color = [255, 0, 0]
self.color = color
self.linewidth = linewidth
self.f_tooltip = f_tooltip
self.shape_type = shape_type
try:
import shapefile
except:
raise Exception('ShapefileLayer requires pyshp')
self.reader = shapefile.Reader(fname, encoding=encoding, encodingErrors=encodingErrors)
self.worker = None
self.queue = Queue.Queue()
def invalidate(self, proj):
self.painter = BatchPainter()
self.hotspots = HotspotManager()
self.painter.set_color(self.color)
if self.worker:
self.worker.stop()
self.worker.join()
self.queue = Queue.Queue()
self.worker = ShapeLoadingThread(self.queue, self.reader, self.shape_type, proj)
self.worker.start()
def draw(self, proj, mouse_x, mouse_y, ui_manager):
self.painter.batch_draw()
picked = self.hotspots.pick(mouse_x, mouse_y)
if picked:
ui_manager.tooltip(picked)
while True:
try:
x, y, record = self.queue.get_nowait()
self.painter.linestrip(x, y, self.linewidth, closed=True)
if self.f_tooltip:
attr = {t[0][0]: t[1] for t in zip(self.reader.fields[1:], record)}
value = self.f_tooltip(attr)
if self.shape_type == 'bbox':
self.hotspots.add_rect(x.min(), y.min(), x.max()-x.min(), y.max()-y.min(), value)
else:
self.hotspots.add_poly(x, y, value)
except Queue.Empty:
break
class ShapeLoadingThread(Thread):
def __init__(self, queue, reader, shape_type, proj):
Thread.__init__(self)
self.queue = queue
self.reader = reader
self.shape_type = shape_type
self.proj = proj
self.stop_flag = threading.Event()
self.counter = 0
self.daemon = True
def stop(self):
self.stop_flag.set()
def run(self):
while (self.counter < self.reader.numRecords) and (not self.stop_flag.is_set()):
r = self.reader.shapeRecord(self.counter)
if self.shape_type == 'bbox':
top, left, bottom, right = r.shape.bbox
vertices = np.array([top, left, top, right, bottom, right, bottom, left]).reshape(-1,2)
else:
vertices = np.array(r.shape.points)
x, y = self.proj.lonlat_to_screen(vertices[:,0], vertices[:,1])
self.queue.put((x, y, r.record))
self.counter += 1
class DelaunayLayer(BaseLayer):
def __init__(self, data, line_color=None, line_width=2, cmap=None, max_lenght=100):
"""
Draw a delaunay triangulation of the points
:param data: data access object
:param line_color: line color
:param line_width: line width
:param cmap: color map
:param max_lenght: scaling constant for coloring the edges
"""
self.data = data
if cmap is None and line_color is None:
raise Exception('need either cmap or line_color')
if cmap is not None:
cmap = colors.ColorMap(cmap, alpha=196)
self.cmap = cmap
self.line_color = line_color
self.line_width = line_width
self.max_lenght = max_lenght
@staticmethod
def _get_area(p):
x1, y1, x2, y2, x3, y3 = p
return 0.5*(x1*(y2-y3)+x2*(y3-y1)+x3*(y1-y2))
def invalidate(self, proj):
try:
from scipy.spatial.qhull import Delaunay
except ImportError:
print('DelaunayLayer needs scipy >= 0.12')
raise
self.painter = BatchPainter()
x, y = proj.lonlat_to_screen(self.data['lon'], self.data['lat'])
points = list(set(zip(x,y)))
dela = Delaunay(points)
edges = set()
for tria in dela.vertices:
edges.add((tria[0], tria[1]))
edges.add((tria[1], tria[2]))
edges.add((tria[2], tria[0]))
allx0 = []
ally0 = []
allx1 = []
ally1 = []
colors = []
for a, b in edges:
x0, y0 = dela.points[a]
x1, y1 = dela.points[b]
allx0.append(x0)
ally0.append(y0)
allx1.append(x1)
ally1.append(y1)
if self.line_color:
colors.append(self.line_color)
colors.append(self.line_color)
elif self.cmap:
l = math.sqrt((x0 - x1)**2+(y0 - y1)**2)
c = self.cmap.to_color(l, self.max_lenght, 'log')
colors.append(c)
colors.append(c)
self.painter.lines(allx0, ally0, allx1, ally1, colors, width=self.line_width)
def draw(self, proj, mouse_x, mouse_y, ui_manager):
self.painter.batch_draw()
def bbox(self):
return BoundingBox.from_points(lons=self.data['lon'], lats=self.data['lat'])
class VoronoiLayer(BaseLayer):
def __init__(self, data, line_color=None, line_width=2, f_tooltip=None, cmap=None, max_area=1e4, alpha=220):
"""
Draw the voronoi tesselation of the points from data
:param data: data access object
:param line_color: line color
:param line_width: line width
:param f_tooltip: function to generate a tooltip on mouseover
:param cmap: color map
:param max_area: scaling constant to determine the color of the voronoi areas
:param alpha: color alpha
:return:
"""
self.data = data
if cmap is None and line_color is None:
raise Exception('need either cmap or line_color')
if cmap is not None:
cmap = colors.ColorMap(cmap, alpha=alpha, levels=10)
self.cmap = cmap
self.line_color = line_color
self.line_width = line_width
self.f_tooltip = f_tooltip
self.max_area = max_area
# source: https://gist.github.com/pv/8036995
@staticmethod
def __voronoi_finite_polygons_2d(vor, radius=None):
"""
Reconstruct infinite voronoi regions in a 2D diagram to finite
regions.
Parameters
----------
vor : Voronoi
Input diagram
radius : float, optional
Distance to 'points at infinity'.
Returns
-------
regions : list of tuples
Indices of vertices in each revised Voronoi regions.
vertices : list of tuples
Coordinates for revised Voronoi vertices. Same as coordinates
of input vertices, with 'points at infinity' appended to the
end.
"""
if vor.points.shape[1] != 2:
raise ValueError("Requires 2D input")
new_regions = []
new_vertices = vor.vertices.tolist()
center = vor.points.mean(axis=0)
if radius is None:
radius = vor.points.ptp().max()
# Construct a map containing all ridges for a given point
all_ridges = {}
for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):
all_ridges.setdefault(p1, []).append((p2, v1, v2))
all_ridges.setdefault(p2, []).append((p1, v1, v2))
# Reconstruct infinite regions
for p1, region in enumerate(vor.point_region):
vertices = vor.regions[region]
if all(v >= 0 for v in vertices):
# finite region
new_regions.append(vertices)
continue
# reconstruct a non-finite region
if p1 not in all_ridges:
continue
ridges = all_ridges[p1]
new_region = [v for v in vertices if v >= 0]
for p2, v1, v2 in ridges:
if v2 < 0:
v1, v2 = v2, v1
if v1 >= 0:
# finite ridge: already in the region
continue
# Compute the missing endpoint of an infinite ridge
t = vor.points[p2] - vor.points[p1] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[[p1, p2]].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[v2] + direction * radius
new_region.append(len(new_vertices))
new_vertices.append(far_point.tolist())
# sort region counterclockwise
vs = np.asarray([new_vertices[v] for v in new_region])
c = vs.mean(axis=0)
angles = np.arctan2(vs[:,1] - c[1], vs[:,0] - c[0])
new_region = np.array(new_region)[np.argsort(angles)]
# finish
new_regions.append(new_region.tolist())
return new_regions, np.asarray(new_vertices)
# Area of a polygon: http://www.mathopenref.com/coordpolygonarea.html
@staticmethod
def _get_area(p):
return 0.5 * abs(sum(x0*y1 - x1*y0
for ((x0, y0), (x1, y1)) in zip(p, p[1:] + [p[0]])))
def invalidate(self, proj):
try:
from scipy.spatial.qhull import Voronoi
except ImportError:
print('VoronoiLayer needs scipy >= 0.12')
raise
x, y = proj.lonlat_to_screen(self.data['lon'], self.data['lat'])
points = list(zip(x,y))
vor = Voronoi(points)
regions, vertices = VoronoiLayer.__voronoi_finite_polygons_2d(vor)
self.hotspots = HotspotManager()
self.painter = BatchPainter()
for idx, region in enumerate(regions):
polygon = vertices[region]
if self.line_color:
self.painter.set_color(self.line_color)
self.painter.linestrip(polygon[:,0], polygon[:,1], width=self.line_width, closed=True)
if self.cmap:
area = VoronoiLayer._get_area(polygon.tolist())
area = max(area, 1)
self.painter.set_color(self.cmap.to_color(area, self.max_area, 'log'))
self.painter.poly(polygon[:,0], polygon[:,1])
if self.f_tooltip:
record = {k: self.data[k][idx] for k in list(self.data.keys())}
self.hotspots.add_poly(polygon[:,0], polygon[:,1], self.f_tooltip(record))
def draw(self, proj, mouse_x, mouse_y, ui_manager):
self.painter.batch_draw()
picked = self.hotspots.pick(mouse_x, mouse_y)
if picked:
ui_manager.tooltip(picked)
def bbox(self):
return BoundingBox.from_points(lons=self.data['lon'], lats=self.data['lat'])
class MarkersLayer(BaseLayer):
def __init__(self, data, marker, f_tooltip=None, marker_preferred_size=32):
"""
Draw markers
:param data: data access object
:param marker: full filename of the marker image
:param f_tooltip: function to generate a tooltip on mouseover
:param marker_preferred_size: size in pixel for the marker images
"""
self.data = data
self.f_tooltip = f_tooltip
self.marker_preferred_size = float(marker_preferred_size)
self.marker = pyglet.image.load(marker)
self.marker.anchor_x = self.marker.width / 2
self.marker.anchor_y = self.marker.height / 2
self.scale = self.marker_preferred_size / max(self.marker.width, self.marker.height)
self.hotspots = HotspotManager()
def invalidate(self, proj):
self.painter = BatchPainter()
x, y = proj.lonlat_to_screen(self.data['lon'], self.data['lat'])
if self.f_tooltip:
for i in range(0, len(x)):
record = {k: self.data[k][i] for k in list(self.data.keys())}
self.hotspots.add_rect(x[i] - self.marker_preferred_size/2,
y[i] - self.marker_preferred_size/2,
self.marker_preferred_size,
self.marker_preferred_size,
self.f_tooltip(record))
self.painter.sprites(self.marker, x, y, self.scale)
def draw(self, proj, mouse_x, mouse_y, ui_manager):
self.painter.batch_draw()
picked = self.hotspots.pick(mouse_x, mouse_y)
if picked:
ui_manager.tooltip(picked)
def bbox(self):
return BoundingBox.from_points(lons=self.data['lon'], lats=self.data['lat'])
class KDELayer(BaseLayer):
def __init__(self, values, bw, cmap='hot', method='hist', scaling='sqrt', alpha=220,
cut_below=None, clip_above=None, binsize=1, cmap_levels=10, show_colorbar=False):
"""
Kernel density estimation visualization
:param data: data access object
:param bw: kernel bandwidth (in screen coordinates)
:param cmap: colormap
:param method: if kde use KDEMultivariate from statsmodel, which provides a more accurate but much slower estimation.
If hist, estimates density applying gaussian smoothing on a 2D histogram, which is much faster but less accurate
:param scaling: colorscale, lin log or sqrt
:param alpha: color alpha
:param cut_below: densities below cut_below are not drawn
:param clip_above: defines the max value for the colorscale
:param binsize: size of the bins for hist estimator
:param cmap_levels: discretize colors into cmap_levels
:param show_colorbar: show colorbar
"""
self.values = values
self.bw = bw
self.cmap = colors.ColorMap(cmap, alpha=alpha, levels=cmap_levels)
self.method = method
self.scaling = scaling
self.cut_below = cut_below
self.clip_above = clip_above
self.binsize = binsize
self.show_colorbar = show_colorbar
def _get_grid(self, proj):
west, north = proj.lonlat_to_screen([proj.bbox().west], [proj.bbox().north])
east, south = proj.lonlat_to_screen([proj.bbox().east], [proj.bbox().south])
xgrid = np.arange(west, east, self.binsize)
ygrid = np.arange(south, north, self.binsize)
return xgrid, ygrid
def invalidate(self, proj):
self.painter = BatchPainter()
xv, yv = proj.lonlat_to_screen(self.values['lon'], self.values['lat'])
rects_vertices = []
rects_colors = []
if self.method == 'kde':
try:
import statsmodels.api as sm
except:
raise Exception('KDE requires statsmodel')
kde_res = sm.nonparametric.KDEMultivariate(data=[xv, yv], var_type='cc', bw=self.bw)
xgrid, ygrid = self._get_grid(proj)
xmesh, ymesh = np.meshgrid(xgrid,ygrid)
grid_coords = np.append(xmesh.reshape(-1,1), ymesh.reshape(-1,1),axis=1)
z = kde_res.pdf(grid_coords.T)
z = z.reshape(len(ygrid), len(xgrid))
# np.save('z.npy', z)
# z = np.load('z.npy')
print(('smallest non-zero density:', z[z > 0][0]))
print(('max density:', z.max()))
if self.cut_below is None:
zmin = z[z > 0][0]
else:
zmin = self.cut_below
if self.clip_above is None:
zmax = z.max()
else:
zmax = self.clip_above
for ix in range(len(xgrid)-1):
for iy in range(len(ygrid)-1):
if z[iy, ix] > zmin:
rects_vertices.append((xgrid[ix], ygrid[iy], xgrid[ix+1], ygrid[iy+1]))
rects_colors.append(self.cmap.to_color(z[iy, ix], zmax, self.scaling))
elif self.method == 'hist':
try:
from scipy.ndimage import gaussian_filter
except:
raise Exception('KDE requires scipy')
xgrid, ygrid = self._get_grid(proj)
H, _, _ = np.histogram2d(yv, xv, bins=(ygrid, xgrid))
if H.sum() == 0:
print('no data in current view')
return
H = gaussian_filter(H, sigma=self.bw)
print(('smallest non-zero count', H[H > 0][0]))
print(('max count:', H.max()))
if self.cut_below is None:
Hmin = H[H > 0][0]
else:
Hmin = self.cut_below
if self.clip_above is None:
self.Hmax = H.max()
else:
self.Hmax = self.clip_above
if self.scaling == 'ranking':
from statsmodels.distributions.empirical_distribution import ECDF
ecdf = ECDF(H.flatten())
for ix in range(len(xgrid)-2):
for iy in range(len(ygrid)-2):
if H[iy, ix] > Hmin:
rects_vertices.append((xgrid[ix], ygrid[iy], xgrid[ix+1], ygrid[iy+1]))
if self.scaling == 'ranking':
rects_colors.append(self.cmap.to_color(ecdf(H[iy, ix]) - ecdf(Hmin), 1 - ecdf(Hmin), 'lin'))
else:
rects_colors.append(self.cmap.to_color(H[iy, ix], self.Hmax, self.scaling))
else:
raise Exception('method not supported')
self.painter.batch_rects(rects_vertices, rects_colors)
def draw(self, proj, mouse_x, mouse_y, ui_manager):
self.painter.batch_draw()
if self.show_colorbar:
ui_manager.add_colorbar(self.cmap, self.Hmax, self.scaling)
class ConvexHullLayer(BaseLayer):
def __init__(self, data, col, fill=True, point_size=4):
"""
Convex hull for a set of points
:param data: points
:param col: color
:param fill: whether to fill the convexhull polygon or not
:param point_size: size of the points on the convexhull. Points are not rendered if None
"""
self.data = data
self.col = col
self.fill = fill
self.point_size=point_size
def invalidate(self, proj):
self.painter = BatchPainter()
self.painter.set_color(self.col)
x, y = proj.lonlat_to_screen(self.data['lon'], self.data['lat'])
if len(x) >= 3:
self.painter.convexhull(x, y, self.fill)
else:
self.painter.linestrip(x, y)
if self.point_size > 0:
self.painter.points(x, y, self.point_size)
def draw(self, proj, mouse_x, mouse_y, ui_manager):
self.painter.batch_draw()
class GridLayer(BaseLayer):
def __init__(self, lon_edges, lat_edges, values, cmap, alpha=255, vmin=None, vmax=None, levels=10,
colormap_scale='lin', show_colorbar=True):
"""
Values over a uniform grid
:param lon_edges: longitude edges
:param lat_edges: latitude edges
:param values: matrix representing values on the grid
:param cmap: colormap name
:param alpha: color alpha
:param vmin: minimum value for the colormap
:param vmax: maximum value for the colormap
:param levels: number of levels for the colormap
:param colormap_scale: colormap scale
:param show_colorbar: show the colorbar in the UI
"""
self.lon_edges = lon_edges
self.lat_edges = lat_edges
self.values = values
self.cmap = colors.ColorMap(cmap, alpha=alpha, levels=levels)
self.colormap_scale = colormap_scale
self.show_colorbar = show_colorbar
if vmin:
self.vmin = vmin
else:
self.vmin = 0
if vmax:
self.vmax = vmax
else:
self.vmax = self.values[~np.isnan(self.values)].max()
def invalidate(self, proj):
self.painter = BatchPainter()
xv, yv = proj.lonlat_to_screen(self.lon_edges, self.lat_edges)
rects = []
cols = []
for ix in range(len(xv)-1):
for iy in range(len(yv)-1):
d = self.values[iy, ix]
if d > self.vmin:
rects.append((xv[ix], yv[iy], xv[ix+1], yv[iy+1]))
cols.append(self.cmap.to_color(d, self.vmax, self.colormap_scale))
self.painter.batch_rects(rects, cols)
def draw(self, proj, mouse_x, mouse_y, ui_manager):
self.painter.batch_draw()
if self.show_colorbar:
ui_manager.add_colorbar(self.cmap, self.vmax, self.colormap_scale)
def bbox(self):
return BoundingBox(north=self.lat_edges[-1], south=self.lat_edges[0],
west=self.lon_edges[0], east=self.lon_edges[-1])
class GeoJSONLayer(BaseLayer):
def __init__(self, geojson_or_fname, color='b', linewidth=1, fill=False, f_tooltip=None):
self.color = color
self.linewidth = linewidth
self.fill = fill
self.f_tooltip = f_tooltip
if type(geojson_or_fname) == str:
with open(geojson_or_fname) as fin:
self.data = json.load(fin)
elif type(geojson_or_fname) == dict:
self.data = geojson_or_fname
else:
raise Exception('must provide either dict or filename')
self.boundingbox = None
for feature in self.data['features']:
if feature['geometry'] is None:
print(('feature without geometry data: %s' % feature['properties']['NAME']))
continue
if feature['geometry']['type'] == 'Polygon':
for poly in feature['geometry']['coordinates']:
poly = np.array(poly)
self.__update_bbox(poly[:,0], poly[:,1])
elif feature['geometry']['type'] == 'MultiPolygon':
for multipoly in feature['geometry']['coordinates']:
for poly in multipoly:
poly = np.array(poly)
self.__update_bbox(poly[:,0], poly[:,1])
elif feature['geometry']['type'] == 'Point':
lon,lat = feature['geometry']['coordinates']
self.__update_bbox(np.array([lon]), np.array([lat]))
elif feature['geometry']['type'] == 'LineString':
line = np.array(feature['geometry']['coordinates'])
self.__update_bbox(line[:,0], line[:,1])
def __update_bbox(self, lon, lat):
if self.boundingbox is None:
self.boundingbox = BoundingBox(north=lat.max(), south=lat.min(), west=lon.min(), east=lon.max())
else:
self.boundingbox = BoundingBox(
north=max(self.boundingbox.north, lat.max()),
south=min(self.boundingbox.south, lat.min()),
west=min(self.boundingbox.west, lon.min()),
east=max(self.boundingbox.east, lon.max()))
def invalidate(self, proj):
self.painter = BatchPainter()
self.hotspots = HotspotManager()
for feature in self.data['features']:
if isfunction(self.color):
self.painter.set_color(self.color(feature['properties']))
else:
self.painter.set_color(self.color)
if feature['geometry']['type'] == 'Polygon':
for poly in feature['geometry']['coordinates']:
poly = np.array(poly)
x, y = proj.lonlat_to_screen(poly[:,0], poly[:,1])
if self.fill:
self.painter.poly(x, y)
else:
self.painter.linestrip(x, y, self.linewidth, closed=True)
if self.f_tooltip:
self.hotspots.add_poly(x, y, self.f_tooltip(feature['properties']))
elif feature['geometry']['type'] == 'MultiPolygon':
for multipoly in feature['geometry']['coordinates']:
for poly in multipoly:
poly = np.array(poly)
x, y = proj.lonlat_to_screen(poly[:,0], poly[:,1])
if self.fill:
self.painter.poly(x, y)
else:
self.painter.linestrip(x, y, self.linewidth, closed=True)
if self.f_tooltip:
self.hotspots.add_poly(x, y, self.f_tooltip(feature['properties']))
elif feature['geometry']['type'] == 'Point':
lon,lat = feature['geometry']['coordinates']
x, y = proj.lonlat_to_screen(np.array([lon]), np.array([lat]))
self.painter.points(x, y)
elif feature['geometry']['type'] == 'LineString':
line = np.array(feature['geometry']['coordinates'])
x, y = proj.lonlat_to_screen(line[:,0], line[:,1])
self.painter.linestrip(x, y, self.linewidth, closed=False)
else:
print(('unknow geometry %s' % feature['geometry']['type']))
def draw(self, proj, mouse_x, mouse_y, ui_manager):
self.painter.batch_draw()
picked = self.hotspots.pick(mouse_x, mouse_y)
if picked:
ui_manager.tooltip(picked)
def bbox(self):
if self.boundingbox:
return self.boundingbox
else:
return BoundingBox.WORLD
class LabelsLayer(BaseLayer):
def __init__(self, data, label_column, color=None, font_name=FONT_NAME, font_size=14, anchor_x='left', anchor_y='top'):
"""Create a layer with a text label for each sample
:param data: data access object
:param label_column: column in the data access object where the labels text is stored
:param color: color
:param font_name: font name
:param font_size: font size
:param anchor_x: anchor x
:param anchor_y: anchor y
"""
self.data = data
self.label_column = label_column
self.color = color
self.font_name = font_name
self.font_size = font_size
self.anchor_x = anchor_x
self.anchor_y = anchor_y
if self.color is None:
self.color = [255,0,0]
def invalidate(self, proj):
self.painter = BatchPainter()
x, y = proj.lonlat_to_screen(self.data['lon'], self.data['lat'])
self.painter.set_color(self.color)
self.painter.labels(x, y, self.data[self.label_column],
font_name=self.font_name,
font_size=self.font_size,
anchor_x=self.anchor_x,
anchor_y=self.anchor_y)
def draw(self, proj, mouse_x, mouse_y, ui_manager):
self.painter.batch_draw()
def bbox(self):
return BoundingBox.from_points(lons=self.data['lon'], lats=self.data['lat'])
| 34.864175 | 133 | 0.56249 |
ace40e4e53b28b35db3903fba2c352603bff2294 | 2,371 | py | Python | src/crawl/unicrawl/spiders/heaj_programs.py | DeLeb86/unicrawl | 3ef96c3095203a1e714f7f837c4d79a2f3836eb9 | [
"MIT"
] | 5 | 2020-10-28T15:22:40.000Z | 2021-06-08T18:50:24.000Z | src/crawl/unicrawl/spiders/heaj_programs.py | DeLeb86/unicrawl | 3ef96c3095203a1e714f7f837c4d79a2f3836eb9 | [
"MIT"
] | 1 | 2020-12-02T21:06:16.000Z | 2020-12-02T21:06:16.000Z | src/crawl/unicrawl/spiders/heaj_programs.py | DeLeb86/unicrawl | 3ef96c3095203a1e714f7f837c4d79a2f3836eb9 | [
"MIT"
] | 2 | 2020-12-13T17:55:03.000Z | 2021-03-16T20:07:08.000Z | # -*- coding: utf-8 -*-
from abc import ABC
from pathlib import Path
import scrapy
from settings import YEAR, CRAWLING_OUTPUT_FOLDER
BASE_URL = "http://progcours.heaj.be/cocoon/fac/fac{}"
DEPARTMENTS_CODES = {"E": "Département Economique",
"P": "Département Pédagogique",
"T": "Département Technique"}
class HEAJProgramSpider(scrapy.Spider, ABC):
"""
Program crawler for Haute Ecole Albert Jacquard
"""
name = "heaj-programs"
custom_settings = {
'FEED_URI': Path(__file__).parent.absolute().joinpath(
f'../../../../{CRAWLING_OUTPUT_FOLDER}heaj_programs_{YEAR}.json').as_uri()
}
def start_requests(self):
for code in DEPARTMENTS_CODES.keys():
yield scrapy.Request(BASE_URL.format(code), self.parse_main,
cb_kwargs={'faculty': DEPARTMENTS_CODES[code]})
def parse_main(self, response, faculty):
# Get list of faculties
programs_names = response.xpath(f"//a[@class='LienProg']/text()").getall()
programs_links = response.xpath(f"//a[@class='LienProg']/@href").getall()
programs_codes = [link.split("/")[-1].split("_")[0] for link in programs_links]
programs_cycles = [name.split(" ")[0].lower() for name in programs_names]
for program_name, code, link, cycle in zip(programs_names, programs_codes, programs_links, programs_cycles):
if 'bachelier' in cycle:
cycle = 'bac'
elif 'master' in cycle:
cycle = 'master'
else:
cycle = 'other'
base_dict = {'id': code,
'name': program_name,
'cycle': cycle,
'faculty': faculty,
'campus': ''}
yield response.follow(link, self.parse_program, cb_kwargs={'base_dict': base_dict})
@staticmethod
def parse_program(response, base_dict):
ects = response.xpath("//td[contains(@class, 'ContColG')]/text()").getall()
ects = [int(e) for e in ects if e != '\xa0']
courses_ids = response.xpath("//nobr/text()").getall()
cur_dict = {"url": response.url,
"courses": courses_ids,
"ects": ects
}
yield {**base_dict, **cur_dict}
| 35.38806 | 116 | 0.564741 |
ace40e9f2dc2e823464ed1c9242b91b0ea9e6d6d | 4,473 | py | Python | desktop/core/src/desktop/lib/apputil.py | civascu/hue | 82f2de44789ff5a981ed725175bae7944832d1e9 | [
"Apache-2.0"
] | 2 | 2021-04-27T03:57:00.000Z | 2021-06-18T09:39:58.000Z | desktop/core/src/desktop/lib/apputil.py | civascu/hue | 82f2de44789ff5a981ed725175bae7944832d1e9 | [
"Apache-2.0"
] | null | null | null | desktop/core/src/desktop/lib/apputil.py | civascu/hue | 82f2de44789ff5a981ed725175bae7944832d1e9 | [
"Apache-2.0"
] | 2 | 2021-09-06T18:44:45.000Z | 2022-02-24T04:10:10.000Z | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import sys
from django.conf import settings
def has_hadoop():
"""has_hadoop() -> bool (Whether the Hadoop binary is installed)"""
# Do lazy import, since desktop.lib shouldn't depend on an sdk library (i.e. hadoop.conf)
# in general.
import hadoop.conf
return os.path.isfile(hadoop.conf.HADOOP_BIN.get())
def get_current_app(frame=None):
"""
Return the name of the app from INSTALLED_APPS that is most recently
present on the call stack.
"""
if frame == None:
frame = inspect.currentframe().f_back
while frame:
module = getmodule_wrapper(frame.f_code)
if not module:
raise Exception(("No module for code %s (frame %s). Perhaps you have an old " +
".pyc file hanging around?") % (repr(frame.f_code), repr(frame)))
app = get_app_for_module(module)
if app:
return app
frame = frame.f_back
# did not find any app
return None
def get_app_for_module(module):
for app in settings.INSTALLED_APPS:
# TODO(philip): This is quite hacky. If desktop becomes a more
# full application, we'll want to separate this out more cleanly.
if module.__name__.startswith(app) and not module.__name__.startswith("desktop.lib"):
return app
return None
def getmodule_wrapper(obj):
"""
inspect.getmodule() does not work with symlink well before Python 2.5. It
uses realpath() to determine the locations of sys.modules.
So we borrow the getmodule() code from Python 2.5 and do it ourselves.
"""
if sys.version_info >= (2, 5):
return inspect.getmodule(obj)
return getmodule_2_5(obj)
#
# The following is taken from Python-2.5.4's inspect.py.
#
modulesbyfile = {}
_filesbymodname = {}
def getmodule_2_5(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
global modulesbyfile
global _filesbymodname
if inspect.ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = inspect.getabsfile(object)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in sys.modules.items():
if inspect.ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = inspect.getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['__builtin__']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
| 34.945313 | 91 | 0.689023 |
ace40f33c2d52631a400c02594604900fe20f45d | 968 | py | Python | tests/core/staking/data/delegation_test.py | abduramann/secret-sdk-python | a6b0103569052c9526fc802e8d6ffc22f0a3c001 | [
"MIT"
] | 26 | 2022-01-28T21:19:42.000Z | 2022-03-28T01:56:11.000Z | tests/core/staking/data/delegation_test.py | abduramann/secret-sdk-python | a6b0103569052c9526fc802e8d6ffc22f0a3c001 | [
"MIT"
] | 6 | 2022-01-29T10:02:18.000Z | 2022-03-17T03:01:40.000Z | tests/core/staking/data/delegation_test.py | abduramann/secret-sdk-python | a6b0103569052c9526fc802e8d6ffc22f0a3c001 | [
"MIT"
] | 7 | 2022-01-28T19:46:02.000Z | 2022-03-19T15:18:18.000Z | from secret_sdk.core.staking import Delegation, Redelegation, UnbondingDelegation
# /staking/delegators/secret1axk8d8hmpradn7k33x95r8dvq77tajg8v6hn5e/unbonding_delegations
def test_deserialize_unbonding_delegation_examples(load_json_examples):
examples = load_json_examples("./UnbondingDelegation.data.json")
for example in examples:
assert UnbondingDelegation.from_data(example).to_data() == example
# /staking/delegators/secret1axk8d8hmpradn7k33x95r8dvq77tajg8v6hn5e/delegations
def test_deserialize_delegation_examples(load_json_examples):
examples = load_json_examples("./Delegation.data.json")
for example in examples:
assert Delegation.from_data(example).to_data() == example
# /staking/redelegations
def test_deserialize_redelegation_examples(load_json_examples):
examples = load_json_examples("./Redelegation.data.json")
for example in examples:
assert Redelegation.from_data(example).to_data() == example
| 42.086957 | 89 | 0.808884 |
ace410b387a85807a62f1857b88092f8be3285fe | 4,426 | py | Python | cscs-checks/prgenv/environ_check.py | toxa81/reframe | 81357405c0c53ba9def4048c29774c867c69adc2 | [
"BSD-3-Clause"
] | null | null | null | cscs-checks/prgenv/environ_check.py | toxa81/reframe | 81357405c0c53ba9def4048c29774c867c69adc2 | [
"BSD-3-Clause"
] | null | null | null | cscs-checks/prgenv/environ_check.py | toxa81/reframe | 81357405c0c53ba9def4048c29774c867c69adc2 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
import reframe.utility.osext as osext
import reframe.utility.sanity as sn
@rfm.simple_test
class DefaultPrgEnvCheck(rfm.RunOnlyRegressionTest):
def __init__(self):
self.descr = 'Ensure PrgEnv-cray is loaded by default'
self.valid_prog_environs = ['builtin']
self.valid_systems = ['daint:login', 'dom:login', 'eiger:login']
self.executable = 'module'
self.maintainers = ['TM', 'CB']
self.tags = {'production', 'craype'}
self.sanity_patterns = sn.assert_found(r'^PrgEnv-cray', self.stderr)
if self.current_system.name == 'eiger':
self.executable_opts = ['list']
prgenv_patt = r'1\) cpe-cray'
else:
self.executable_opts = ['list', '-t']
prgenv_patt = r'^PrgEnv-cray'
self.sanity_patterns = sn.assert_found(prgenv_patt, self.stderr)
@rfm.simple_test
class EnvironmentCheck(rfm.RunOnlyRegressionTest):
def __init__(self):
self.descr = 'Ensure programming environment is loaded correctly'
self.valid_systems = ['daint:login', 'dom:login', 'eiger:login']
self.valid_prog_environs = ['PrgEnv-cray', 'PrgEnv-gnu', 'PrgEnv-pgi',
'PrgEnv-intel', 'PrgEnv-aocc']
self.executable = 'module'
if self.current_system.name == 'eiger':
self.executable_opts = ['list']
else:
self.executable_opts = ['list', '-t']
self.maintainers = ['TM', 'CB']
self.tags = {'production', 'craype'}
@rfm.run_before('sanity')
def set_sanity(self):
# NOTE: On eiger, the first module of each programming environment,
# follows the 'cpe-<name>' pattern where <name> corresponds to the
# 'PrgEnv-<name>' used.
if self.current_system.name == 'eiger':
module_patt = rf'1\) cpe-{self.current_environ.name[7:]}'
else:
module_patt = rf'^{self.current_environ.name}'
self.sanity_patterns = sn.assert_found(module_patt, self.stderr)
class CrayVariablesCheck(rfm.RunOnlyRegressionTest):
cray_module = parameter()
def __init__(self):
self.descr = 'Check for standard Cray variables'
self.valid_prog_environs = ['builtin']
self.executable = 'module'
self.executable_opts = ['show', self.cray_module]
envvar_prefix = self.cray_module.upper().replace('-', '_')
self.sanity_patterns = sn.all([
sn.assert_found(f'{envvar_prefix}_PREFIX', self.stderr),
sn.assert_found(f'{envvar_prefix}_VERSION', self.stderr)
])
self.tags = {'production', 'craype'}
self.maintainers = ['EK', 'TM']
@rfm.simple_test
class CrayVariablesCheckDaint(CrayVariablesCheck):
cray_module = parameter([
'cray-fftw', 'cray-hdf5', 'cray-hdf5-parallel', 'cray-libsci',
'cray-mpich', 'cray-netcdf', 'cray-netcdf-hdf5parallel', 'cray-petsc',
'cray-petsc-complex-64', 'cray-python', 'cray-R', 'cray-tpsl',
'cray-tpsl-64', 'cudatoolkit', 'gcc', 'papi', 'pmi'
])
def __init__(self):
super().__init__()
self.valid_systems = ['daint:login', 'dom:login']
# FIXME: These modules should be fixed in later releases,
# while gcc was fixed in 20.11
cdt = osext.cray_cdt_version()
if ((cdt and cdt <= '20.11' and
self.cray_module in ['cray-petsc-complex',
'cray-petsc-complex-64',
'cudatoolkit']) or
(cdt and cdt < '20.11' and module_name == 'gcc')):
self.valid_systems = []
@rfm.simple_test
class CrayVariablesCheckEiger(CrayVariablesCheck):
cray_module = parameter([
'cray-fftw', 'cray-hdf5', 'cray-hdf5-parallel', 'cray-libsci',
'cray-mpich', 'cray-openshmemx', 'cray-parallel-netcdf', 'cray-pmi',
'cray-python', 'cray-R', 'gcc', 'papi'
])
def __init__(self):
super().__init__()
self.valid_systems = ['eiger:login']
# FIXME: These modules should be fixed in later releases
if self.cray_module in {'cray-fftw', 'cray-python', 'cray-mpich'}:
self.valid_systems = []
| 37.193277 | 78 | 0.612743 |
ace4134c4ac8dee05685d9bdcc1223d9ba906721 | 3,620 | py | Python | mloncode/data/fields/label_field.py | m09/mloncode | 0c28e6007d5e3cf0e4e0963428f15ec488cb49eb | [
"Apache-2.0"
] | 1 | 2020-03-12T12:58:42.000Z | 2020-03-12T12:58:42.000Z | mloncode/data/fields/label_field.py | m09/mloncode | 0c28e6007d5e3cf0e4e0963428f15ec488cb49eb | [
"Apache-2.0"
] | 2 | 2019-11-07T14:52:30.000Z | 2019-11-08T14:40:06.000Z | mloncode/data/fields/label_field.py | m09/mloncode | 0c28e6007d5e3cf0e4e0963428f15ec488cb49eb | [
"Apache-2.0"
] | null | null | null | from typing import Iterable, List, NamedTuple, Tuple
from torch import device as torch_device, long as torch_long, Tensor, tensor
from torch.nn.utils.rnn import pack_sequence, PackedSequence
from mloncode.data.fields.field import Field
from mloncode.data.vocabulary import Vocabulary
from mloncode.parsing.parser import FORMATTING_INTERNAL_TYPE, Nodes
from mloncode.utils.torch_helpers import unpack_packed_sequence
class LabelFieldOutput(NamedTuple):
"""Output of the label field."""
indexes: Tensor
decoder_inputs: PackedSequence
labels: PackedSequence
n_nodes: int
class LabelField(Field[Nodes, LabelFieldOutput]):
def __init__(self, name: str, type: str) -> None:
super().__init__(name, type)
self.vocabulary = Vocabulary(unknown="<UNK>")
self.vocabulary.add_item("<PAD>")
self.vocabulary.add_item("<GO>")
self.vocabulary.add_item("<STOP>")
def index(self, sample: Nodes) -> None:
for node in sample.nodes:
if node.internal_type == FORMATTING_INTERNAL_TYPE:
self.vocabulary.add_items(
list(node.token if node.token is not None else "")
)
def tensorize(self, sample: Nodes) -> LabelFieldOutput:
node_sequences = []
for i, node in enumerate(sample.nodes):
if node.internal_type == FORMATTING_INTERNAL_TYPE:
mapped = self.vocabulary.get_indexes(
list(node.token if node.token else "")
)
labels = tensor(
mapped + [self.vocabulary.get_index("<STOP>")], dtype=torch_long
)
decoder_inputs = tensor(
[self.vocabulary.get_index("<GO>")] + mapped, dtype=torch_long
)
node_sequences.append((i, decoder_inputs, labels))
node_sequences.sort(reverse=True, key=lambda s: s[1].shape[0])
indexes, decoder_inputs_tensor, labels_tensor = map(list, zip(*node_sequences))
assert len(indexes) == len(decoder_inputs_tensor) and len(indexes) == len(
labels_tensor
)
return LabelFieldOutput(
indexes=tensor(indexes, dtype=torch_long),
decoder_inputs=pack_sequence(decoder_inputs_tensor),
labels=pack_sequence(labels_tensor),
n_nodes=len(sample.nodes),
)
def collate(self, tensors: Iterable[LabelFieldOutput]) -> LabelFieldOutput:
inputs_list: List[Tuple[int, Tensor, Tensor]] = []
offset = 0
for t in tensors:
for indexes, decoder_inputs, labels in zip(
(t.indexes + offset).tolist(),
unpack_packed_sequence(t.decoder_inputs),
unpack_packed_sequence(t.labels),
):
inputs_list.append((indexes, decoder_inputs, labels))
offset += t.n_nodes
inputs_list.sort(reverse=True, key=lambda t: t[1].shape[0])
indexes, decoder_inputs_tensor, labels_tensor = map(list, zip(*inputs_list))
return LabelFieldOutput(
indexes=tensor(indexes, dtype=torch_long),
decoder_inputs=pack_sequence(decoder_inputs_tensor),
labels=pack_sequence(labels_tensor),
n_nodes=offset,
)
def to(self, tensor: LabelFieldOutput, device: torch_device) -> LabelFieldOutput:
return LabelFieldOutput(
indexes=tensor.indexes.to(device),
decoder_inputs=tensor.decoder_inputs.to(device),
labels=tensor.labels.to(device),
n_nodes=tensor.n_nodes,
)
| 40.674157 | 87 | 0.630387 |
ace4135eb0037db7946a3738de41bba2981f49be | 20,087 | py | Python | fairnr/data/shape_dataset.py | lingjie0206/NSVF | 1f3eefc4408c8673bc7b3c0208e861e0de90deb0 | [
"MIT"
] | 1 | 2021-07-29T15:13:56.000Z | 2021-07-29T15:13:56.000Z | fairnr/data/shape_dataset.py | lingjie0206/NSVF | 1f3eefc4408c8673bc7b3c0208e861e0de90deb0 | [
"MIT"
] | null | null | null | fairnr/data/shape_dataset.py | lingjie0206/NSVF | 1f3eefc4408c8673bc7b3c0208e861e0de90deb0 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os, glob
import copy
import numpy as np
import torch
import logging
from collections import defaultdict
from fairseq.data import FairseqDataset, BaseWrapperDataset
from . import data_utils, geometry, trajectory
logger = logging.getLogger(__name__)
class ShapeDataset(FairseqDataset):
"""
A dataset that only returns data per shape
"""
def __init__(self,
paths,
preload=True,
repeat=1,
subsample_valid=-1,
ids=None):
if os.path.isdir(paths):
self.paths = [paths]
else:
self.paths = [line.strip() for line in open(paths)]
self.subsample_valid = subsample_valid
self.total_num_shape = len(self.paths)
self.cache = None
self.repeat = repeat
# -- load per-shape data
_data_per_shape = {}
_data_per_shape['shape'] = list(range(len(self.paths)))
_ixts = self.find_intrinsics()
if len(_ixts) > 0:
_data_per_shape['ixt'] = _ixts
if self.subsample_valid > -1:
for key in _data_per_shape:
_data_per_shape[key] = _data_per_shape[key][::self.subsample_valid]
self.paths = self.paths[::self.subsample_valid]
self.total_num_shape = len(self.paths)
# group the data..
data_list = []
for r in range(repeat):
# HACK: making several copies to enable multi-GPU usage.
if r == 0 and preload:
self.cache = []
logger.info('pre-load the dataset into memory.')
for id in range(self.total_num_shape):
element = {}
for key in _data_per_shape:
element[key] = _data_per_shape[key][id]
data_list.append(element)
if r == 0 and preload:
self.cache += [self._load_batch(data_list, id)]
# group the data together
self.data = data_list
def find_intrinsics(self):
ixt_list = []
for path in self.paths:
if os.path.exists(path + '/intrinsic.txt'):
ixt_list.append(path + '/intrinsic.txt')
elif os.path.exists(path + '/intrinsics.txt'):
ixt_list.append(path + '/intrinsics.txt')
return ixt_list
def _load_shape(self, packed_data):
intrinsics = data_utils.load_intrinsics(packed_data['ixt']).astype('float32') \
if packed_data.get('ixt', None) is not None else None
shape_id = packed_data['shape']
return {'intrinsics': intrinsics, 'id': shape_id}
def _load_batch(self, data, index):
return index, self._load_shape(data[index])
def __getitem__(self, index):
if self.cache is not None:
return self.cache[index % self.total_num_shape][0], \
self.cache[index % self.total_num_shape][1]
return self._load_batch(self.data, index)
def __len__(self):
return len(self.data)
def num_tokens(self, index):
return 1
def _collater(self, samples):
results = {}
results['shape'] = torch.from_numpy(np.array([s[0] for s in samples]))
for key in samples[0][1]:
if samples[0][1][key] is not None:
results[key] = torch.from_numpy(
np.array([s[1][key] for s in samples]))
else:
results[key] = None
return results
def collater(self, samples):
try:
results = self._collater(samples)
except IndexError:
results = None
return results
class ShapeViewDataset(ShapeDataset):
"""
A dataset contains a series of images renderred offline for an object.
"""
def __init__(self,
paths,
views,
num_view,
subsample_valid=-1,
resolution=None,
load_depth=False,
load_mask=False,
train=True,
preload=True,
repeat=1,
binarize=True,
bg_color="1,1,1",
min_color=-1,
ids=None):
super().__init__(paths, False, repeat, subsample_valid, ids)
self.train = train
self.load_depth = load_depth
self.load_mask = load_mask
self.views = views
self.num_view = num_view
if isinstance(resolution, str):
self.resolution = [int(r) for r in resolution.split('x')]
else:
self.resolution = [resolution, resolution]
self.world2camera = True
self.cache_view = None
bg_color = [float(b) for b in bg_color.split(',')] \
if isinstance(bg_color, str) else [bg_color]
if min_color == -1:
bg_color = [b * 2 - 1 for b in bg_color]
if len(bg_color) == 1:
bg_color = bg_color + bg_color + bg_color
self.bg_color = bg_color
self.min_color = min_color
self.apply_mask_color = (self.bg_color[0] >= -1) & (self.bg_color[0] <= 1) # if need to apply
# -- load per-view data
_data_per_view = {}
_data_per_view['rgb'] = self.find_rgb()
_data_per_view['ext'] = self.find_extrinsics()
if self.find_intrinsics_per_view() is not None:
_data_per_view['ixt_v'] = self.find_intrinsics_per_view()
if self.load_depth:
_data_per_view['dep'] = self.find_depth()
if self.load_mask:
_data_per_view['mask'] = self.find_mask()
_data_per_view['view'] = self.summary_view_data(_data_per_view)
# group the data.
_index = 0
for r in range(repeat):
# HACK: making several copies to enable multi-GPU usage.
if r == 0 and preload:
self.cache = []
logger.info('pre-load the dataset into memory.')
for id in range(self.total_num_shape):
element = {}
total_num_view = len(_data_per_view['rgb'][id])
perm_ids = np.random.permutation(total_num_view) if train else np.arange(total_num_view)
for key in _data_per_view:
element[key] = [_data_per_view[key][id][i] for i in perm_ids]
self.data[_index].update(element)
if r == 0 and preload:
phase_name = f"{'train' if self.train else 'valid'}" + \
f".{self.resolution[0]}x{self.resolution[1]}" + \
f"{'.d' if load_depth else ''}" + \
f"{'.m' if load_mask else ''}" + \
f"{'b' if not self.apply_mask_color else ''}" + \
"_full"
logger.info("preload {}-{}".format(id, phase_name))
if binarize:
cache = self._load_binary(id, np.arange(total_num_view), phase_name)
else:
cache = self._load_batch(self.data, id, np.arange(total_num_view))
self.cache += [cache]
_index += 1
# group the data together
self.data_index = []
for i, d in enumerate(self.data):
if self.train:
index_list = list(range(len(d['rgb'])))
self.data_index.append(
data_utils.InfIndex(index_list, shuffle=True)
)
else:
copy_id = i // self.total_num_shape
index_list = []
for j in range(copy_id * num_view, copy_id * num_view + num_view):
index_list.append(j % len(d['rgb']))
self.data_index.append(
data_utils.InfIndex(index_list, shuffle=False)
)
def _load_binary(self, id, views, phase='train'):
root = os.path.dirname(self.data[id]['shape'])
npzfile = os.path.join(root, '{}.npz'.format(phase))
try:
with np.load(npzfile, allow_pickle=True) as f:
return f['cache']
except Exception:
cache = self._load_batch(self.data, id, views)
if data_utils.get_rank() == 0:
np.savez(npzfile, cache=cache)
return cache
def select(self, file_list):
if len(file_list[0]) == 0:
raise FileNotFoundError
return [[files[i] for i in self.views] for files in file_list]
def find_rgb(self):
try:
return self.select([sorted(glob.glob(path + '/rgb/*.*g')) for path in self.paths])
except FileNotFoundError:
try:
return self.select([sorted(glob.glob(path + '/color/*.*g')) for path in self.paths])
except FileNotFoundError:
raise FileNotFoundError("CANNOT find rendered images.")
def find_depth(self):
try:
return self.select([sorted(glob.glob(path + '/depth/*.exr')) for path in self.paths])
except FileNotFoundError:
raise FileNotFoundError("CANNOT find estimated depths images")
def find_mask(self):
try:
return self.select([sorted(glob.glob(path + '/mask/*')) for path in self.paths])
except FileNotFoundError:
raise FileNotFoundError("CANNOT find precomputed mask images")
def find_extrinsics(self):
try:
return self.select([sorted(glob.glob(path + '/extrinsic/*.txt')) for path in self.paths])
except FileNotFoundError:
try:
self.world2camera = False
return self.select([sorted(glob.glob(path + '/pose/*.txt')) for path in self.paths])
except FileNotFoundError:
raise FileNotFoundError('world2camera or camera2world matrices not found.')
def find_intrinsics_per_view(self):
try:
return self.select([sorted(glob.glob(path + '/intrinsic/*.txt')) for path in self.paths])
except FileNotFoundError:
return None
def summary_view_data(self, _data_per_view):
keys = [k for k in _data_per_view if _data_per_view[k] is not None]
num_of_objects = len(_data_per_view[keys[0]])
for k in range(num_of_objects):
assert len(set([len(_data_per_view[key][k]) for key in keys])) == 1, "numer of views must be consistent."
return [list(range(len(_data_per_view[keys[0]][k]))) for k in range(num_of_objects)]
def num_tokens(self, index):
return self.num_view
def _load_view(self, packed_data, view_idx):
image, uv, ratio = data_utils.load_rgb(
packed_data['rgb'][view_idx],
resolution=self.resolution,
bg_color=self.bg_color,
min_rgb=self.min_color)
rgb, alpha = image[:3], image[3] # C x H x W for RGB
extrinsics = data_utils.load_matrix(packed_data['ext'][view_idx])
extrinsics = geometry.parse_extrinsics(extrinsics, self.world2camera).astype('float32') # this is C2W
intrinsics = data_utils.load_intrinsics(packed_data['ixt_v'][view_idx]).astype('float32') \
if packed_data.get('ixt_v', None) is not None else None
z, mask = None, None
if packed_data.get('dep', None) is not None:
z = data_utils.load_depth(packed_data['dep'][view_idx], resolution=self.resolution)
if packed_data.get('mask', None) is not None:
mask = data_utils.load_mask(packed_data['mask'][view_idx], resolution=self.resolution)
if self.apply_mask_color: # we can also not apply mask
rgb = rgb * mask[None, :, :] + (1 - mask[None, :, :]) * np.asarray(self.bg_color)[:, None, None]
return {
'path': packed_data['rgb'][view_idx],
'view': view_idx,
'uv': uv.reshape(2, -1),
'colors': rgb.reshape(3, -1),
'alpha': alpha.reshape(-1),
'extrinsics': extrinsics,
'intrinsics': intrinsics,
'depths': z.reshape(-1) if z is not None else None,
'mask': mask.reshape(-1) if mask is not None else None,
'size': np.array([rgb.shape[1], rgb.shape[2]] + ratio, dtype=np.float32)
}
def _load_batch(self, data, index, view_ids=None):
if view_ids is None:
view_ids = [next(self.data_index[index]) for _ in range(self.num_view)]
return index, self._load_shape(data[index]), [self._load_view(data[index], view_id) for view_id in view_ids]
def __getitem__(self, index):
if self.cache is not None:
view_ids = [next(self.data_index[index]) for _ in range(self.num_view)]
return copy.deepcopy(self.cache[index % self.total_num_shape][0]), \
copy.deepcopy(self.cache[index % self.total_num_shape][1]), \
[copy.deepcopy(self.cache[index % self.total_num_shape][2][i]) for i in view_ids]
return self._load_batch(self.data, index)
def collater(self, samples):
results = super().collater(samples)
if results is None:
return results
for key in samples[0][2][0]:
if key == 'path':
results[key] = [[d[key] for d in s[2]] for s in samples]
elif samples[0][2][0][key] is not None:
results[key] = torch.from_numpy(
np.array([[d[key] for d in s[2]] for s in samples])
)
results['colors'] = results['colors'].transpose(2, 3)
if results.get('full_rgb', None) is not None:
results['full_rgb'] = results['full_rgb'].transpose(2, 3)
return results
class ShapeViewStreamDataset(ShapeViewDataset):
"""
Different from ShapeViewDataset.
We merge all the views together into one dataset regardless of the shapes.
** HACK **: an alternative of the ShapeViewDataset
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.repeat == 1, "Comboned dataset does not support repeating"
assert self.num_view == 1, "StreamDataset only supports one view per shape at a time."
# reset the data_index
self.data_index = []
for i, d in enumerate(self.data):
for j, _ in enumerate(d['rgb']):
self.data_index.append((i, j)) # shape i, view j
def __len__(self):
return len(self.data_index)
def _load_batch(self, data, shape_id, view_ids):
return shape_id, self._load_shape(data[shape_id]), [self._load_view(data[shape_id], view_id) for view_id in view_ids]
def __getitem__(self, index):
shape_id, view_id = self.data_index[index]
if self.cache is not None:
return copy.deepcopy(self.cache[shape_id % self.total_num_shape][0]), \
copy.deepcopy(self.cache[shape_id % self.total_num_shape][1]), \
[copy.deepcopy(self.cache[shape_id % self.total_num_shape][2][view_id])]
return self._load_batch(self.data, shape_id, [view_id])
def _load_binary(self, id, views, phase='train'):
root = os.path.dirname(self.data[id]['ixt'])
npzfile = os.path.join(root, '{}.npz'.format(phase))
try:
with np.load(npzfile, allow_pickle=True) as f:
return f['cache']
except Exception:
caches = [self._load_batch(self.data, id, view_id) for view_id in views]
cache = [caches[0][0], caches[0][1], [caches[i][2][0] for i in range(len(views))]]
if data_utils.get_rank() == 0:
np.savez(npzfile, cache=cache)
return cache
class SampledPixelDataset(BaseWrapperDataset):
"""
A wrapper dataset, which split rendered images into pixels
"""
def __init__(self,
dataset,
num_sample=None,
sampling_on_mask=1.0,
sampling_on_bbox=False,
sampling_at_center=1.0,
resolution=512,
patch_size=1):
super().__init__(dataset)
self.num_sample = num_sample
self.sampling_on_mask = sampling_on_mask
self.sampling_on_bbox = sampling_on_bbox
self.sampling_at_center = sampling_at_center
self.patch_size = patch_size
self.res = resolution
def __getitem__(self, index):
index, data_per_shape, data_per_view = self.dataset[index]
# sample pixels from the original images
sample_index = [
data_utils.sample_pixel_from_image(
data['alpha'].shape[-1],
self.num_sample,
data.get('mask', None)
if data.get('mask', None) is not None
else data.get('alpha', None),
self.sampling_on_mask,
self.sampling_on_bbox,
self.sampling_at_center,
width=int(data['size'][1]),
patch_size=self.patch_size)
for data in data_per_view
]
for i, data in enumerate(data_per_view):
data_per_view[i]['full_rgb'] = copy.deepcopy(data['colors'])
for key in data:
if data[key] is not None \
and (key != 'extrinsics' and key != 'view' and key != 'full_rgb') \
and data[key].shape[-1] > self.num_sample:
if len(data[key].shape) == 2:
data_per_view[i][key] = data[key][:, sample_index[i]]
else:
data_per_view[i][key] = data[key][sample_index[i]]
data_per_view[i]['index'] = sample_index[i]
return index, data_per_shape, data_per_view
def num_tokens(self, index):
return self.dataset.num_view * self.num_sample
class WorldCoordDataset(BaseWrapperDataset):
"""
A wrapper dataset. transform UV space into World space
"""
def __getitem__(self, index):
index, data_per_shape, data_per_view = self.dataset[index]
def camera2world(data):
inv_RT = data['extrinsics']
intrinsics = data_per_shape['intrinsics']
# get camera center (XYZ)
ray_start = inv_RT[:3, 3]
# get points at a random depth (=1)
ray_dir = geometry.get_ray_direction(
ray_start, data['uv'], intrinsics, inv_RT, 1
)
# here we still keep the original data for tracking purpose
data.update({
'ray_start': ray_start,
'ray_dir': ray_dir,
})
return data
return index, data_per_shape, [camera2world(data) for data in data_per_view]
def collater(self, samples):
results = self.dataset.collater(samples)
if results is None:
return results
results['ray_start'] = results['ray_start'].unsqueeze(-2)
results['ray_dir'] = results['ray_dir'].transpose(2, 3)
results['colors'] = results['colors'].transpose(2, 3)
if results.get('full_rgb', None) is not None:
results['full_rgb'] = results['full_rgb'].transpose(2, 3)
return results
class InfiniteDataset(BaseWrapperDataset):
"""
A wrapper dataset which supports infnite sampling from a dataset.
No epochs in this case.
"""
def __init__(self, dataset, max_len=1000000):
super().__init__(dataset)
self.MAXLEN = max_len
def __len__(self):
return self.MAXLEN
def ordered_indices(self):
return np.arange(self.MAXLEN)
def __getitem__(self, index):
actual_length = len(self.dataset)
return self.dataset[index % actual_length] | 38.260952 | 125 | 0.566934 |
ace4149b679748d7cf78c01401edab781f3118de | 309 | py | Python | setup.py | antoinealb/ghg | 50c744edf319614a8cb1d4554731b74650509462 | [
"BSD-2-Clause"
] | 5 | 2015-10-22T12:04:50.000Z | 2015-10-22T19:27:34.000Z | setup.py | antoinealb/ghg | 50c744edf319614a8cb1d4554731b74650509462 | [
"BSD-2-Clause"
] | null | null | null | setup.py | antoinealb/ghg | 50c744edf319614a8cb1d4554731b74650509462 | [
"BSD-2-Clause"
] | null | null | null | from setuptools import setup
setup(
name='ghg',
version = '0.2.0',
description = 'Simple script to quickly go on a Github page',
author = 'antoinealb',
url = 'http://github.com/antoinealb/ghg',
py_modules = ['ghg.ghg'],
scripts = ['bin/ghg', 'bin/github-get-stars'])
| 30.9 | 67 | 0.598706 |
ace416cc52b200e3d69e9d114b48d6c5ca411d18 | 5,899 | py | Python | zeeguu/model/user_article.py | alinbalutoiu/Zeeguu-Core | 348f0aa05603fb9d2b06e1f38dbf6bb9fdcaac6d | [
"MIT"
] | null | null | null | zeeguu/model/user_article.py | alinbalutoiu/Zeeguu-Core | 348f0aa05603fb9d2b06e1f38dbf6bb9fdcaac6d | [
"MIT"
] | null | null | null | zeeguu/model/user_article.py | alinbalutoiu/Zeeguu-Core | 348f0aa05603fb9d2b06e1f38dbf6bb9fdcaac6d | [
"MIT"
] | null | null | null | from datetime import datetime
from sqlalchemy.orm.exc import NoResultFound
import zeeguu
from sqlalchemy import Column, UniqueConstraint, Integer, ForeignKey, DateTime, Boolean, or_
from sqlalchemy.orm import relationship
from zeeguu.constants import JSON_TIME_FORMAT
from zeeguu.model import Article, User
class UserArticle(zeeguu.db.Model):
"""
A user and an article.
It's simple.
Did she open it?
Did she like it?
The kind of info that's in here.
"""
__table_args__ = {'mysql_collate': 'utf8_bin'}
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey(User.id))
user = relationship(User)
article_id = Column(Integer, ForeignKey(Article.id))
article = relationship(Article)
# Together an url_id and user_id are UNIQUE :)
UniqueConstraint(article_id, user_id)
# once an article has been opened, we display it
# in a different way in the article list; we might
# also, just as well not even show it anymore
# we don't keep only the boolean here, since it is
# more informative to have the time when opened;
# could turn out to be useful for showing the
# user reading history for example
opened = Column(DateTime)
# There's a star icon at the top of an article;
# Reader can use it to mark the article in any way
# they like.
starred = Column(DateTime)
# There's a button at the bottom of every article
# this tracks the state of that button
liked = Column(Boolean)
def __init__(self, user, article, opened=None, starred=None, liked=False):
self.user = user
self.article = article
self.opened = opened
self.starred = starred
self.liked = liked
def __repr__(self):
return f'{self.user} and {self.article}: Opened: {self.opened}, Starred: {self.starred}, Liked: {self.liked}'
def user_info_as_string(self):
return f'{self.user} Opened: {self.opened}, Starred: {self.starred}, Liked: {self.liked}'
def set_starred(self, state=True):
if state:
self.starred = datetime.now()
else:
self.starred = None
def set_liked(self, new_state=True):
self.liked = new_state
def last_interaction(self):
"""
sometimes we want to order articles based
on this
:return:
"""
if self.opened:
return self.opened
if self.starred:
return self.starred
return None
@classmethod
def find_by_article(cls, article: Article):
try:
return cls.query.filter_by(article=article).all()
except NoResultFound:
return None
@classmethod
def find(cls, user: User, article: Article):
"""
create a new object and add it to the db if it's not already there
otherwise retrieve the existing object and update
"""
try:
return cls.query.filter_by(
user=user,
article=article
).one()
except NoResultFound:
return None
@classmethod
def find_or_create(cls, session, user: User, article: Article, opened=None, liked=False, starred=None):
"""
create a new object and add it to the db if it's not already there
otherwise retrieve the existing object and update
"""
try:
return cls.query.filter_by(
user=user,
article=article
).one()
except NoResultFound:
try:
new = cls(user, article, opened=opened, liked=liked, starred=starred)
session.add(new)
session.commit()
return new
except Exception as e:
print("seems we avoided a race condition")
session.rollback()
return cls.query.filter_by(
user=user,
article=article
).one()
@classmethod
def all_starred_articles_of_user(cls, user):
return cls.query.filter_by(user=user).filter(UserArticle.starred.isnot(None)).all()
@classmethod
def all_starred_or_liked_articles_of_user(cls, user):
return cls.query.filter_by(user=user).filter(
or_(UserArticle.starred.isnot(None), UserArticle.liked.isnot(False))).all()
@classmethod
def all_starred_articles_of_user_info(cls, user):
"""
prepares info as it is promised by /get_starred_articles
:param user:
:return:
"""
user_articles = cls.all_starred_articles_of_user(user)
dicts = [dict(
user_id=each.user.id,
url=each.article.url.as_string(),
title=each.article.title,
language=each.article.language.code,
starred_date=each.starred.strftime(JSON_TIME_FORMAT),
starred=(each.starred is not None),
liked=each.liked
) for each in user_articles]
return dicts
@classmethod
def all_starred_and_liked_articles_of_user_info(cls, user):
"""
prepares info as it is promised by /get_starred_articles
:param user:
:return:
"""
from zeeguu.content_recommender.mixed_recommender import user_article_info
user_articles = cls.all_starred_or_liked_articles_of_user(user)
return [
user_article_info(user, each.article, with_translations=False)
for each in user_articles
if each.last_interaction() is not None
]
@classmethod
def exists(cls, obj):
try:
cls.query.filter(
cls.id == obj.id
).one()
return True
except NoResultFound:
return False
| 28.635922 | 117 | 0.605018 |
ace41888475c46118f691ba4133996b187bca276 | 5,985 | py | Python | SolveRR.py | NickRoss/SolveRagingRapids | 70250a790ee1e52482038007ddb98e618b3ac4bf | [
"MIT"
] | null | null | null | SolveRR.py | NickRoss/SolveRagingRapids | 70250a790ee1e52482038007ddb98e618b3ac4bf | [
"MIT"
] | null | null | null | SolveRR.py | NickRoss/SolveRagingRapids | 70250a790ee1e52482038007ddb98e618b3ac4bf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import numpy as np
import itertools
import string
num2alpha = dict(zip(range(0, 26), string.ascii_lowercase))
### piece_list conains a list of the pieces. Starting from their knees (piece looking at spot 0)
### and going clockwise, a 1 means "out" and a 0 means "in", refering to the type of connection.
### On the bottom of each piece is a letter (A-L). The zeroth element is "A", 1st element is "B", etc.
### Note that the basic solution is to solve the corners first, and then itterate over every possibly within
### that search space. Since the number of tiles which fit into a corner is quite small, this reduces the
### search space from 12! to something much more manageable. Note that the code has two overriding issues I'm
### too lazy to fix: shitty globals/naming and the main loop doesn't stop when it finds a solution.
piece_list = [
[0,1,0,1,1,0] #A
,[0,0,1,1,0,1] #B
,[1,0,1,0,0,1] #C
,[0,1,0,0,1,0] #D
,[1,1,0,1,1,0] #E
,[0,0,1,0,1,0] #F
,[1,1,0,0,1,0] #G
,[0,0,1,1,1,0] #H
,[1,1,0,1,0,1] #I
,[1,0,1,1,1,0] #J
,[1,1,0,0,0,1] #K
,[0,1,0,0,0,1] #L
]
### The variables "backropedir" and "faceropedir" contain information about
### what the pieces have to be. We call each element a "frame filter"
### There are three different values 1,0 and 2
### A 1 means that there is an outer tab in that position, 0 means there is an
### inner tab in the position and 2 means that anything can be in the position.
### The order of the list is as follows -- for the back_rope_dir, hold the frame with
### The rope in the back and then read as
### 0,1,2
### 3,4,5
### 6,7,8
### 9,10,11
### For face_rope_dir, hold the frame with the rope at the front and then:
### 0,1,2
### 3,4,5
### 6,7,8
### 9,10,11
back_rope_dir = [
[1,2,2,2,1,0] #0
, [1,2,2,2,2,2] #1
, [1,0,1,2,2,2] #2
, [2,2,2,2,1,0] #3
, [2,2,2,2,2,2] #4
, [2,1,0,2,2,2] #5
, [2,2,2,2,1,0] #6
, [2,2,2,2,2,2] #7
, [2,1,0,2,2,2] #8
, [2,2,2,0,0,1] #9
, [2,2,2,0,2,2] #10
, [2,1,0,0,2,2] #11
]
face_rope_dir = [
[0,2,2,2,1,0] #0
, [0,2,2,2,2,2] #1
, [0,0,1,2,2,2] #2
, [2,2,2,2,1,9] #3
, [2,2,2,2,2,2] #4
, [2,1,0,2,2,2] #5
, [2,2,2,2,1,0] #6
, [2,2,2,2,2,2] #7
, [2,1,0,2,2,2] #9
, [2,2,2,1,0,1] #9
, [2,2,2,1,2,2] #10
, [2,1,0,1,2,2] #11
]
### all_set is the set of all numbers and is used to determine which pieces are left
### to go in the puzzle
all_set = set(range(12))
### Set the following to 1/0 depending on which direction you wish to solve
if 1 == 0:
### This does back_rope_dir
np_filters = [np.array(x) for x in back_rope_dir]
else:
### This does face_rope_dir
np_filters = [np.array(x) for x in face_rope_dir]
np_piece_list = [np.array(x) for x in piece_list]
### All of the above are converted to np arrays so that some functions below
### (which I no longer remember) can be used.
def piece_fit(dlst, flt):
### Return a true/false as to if a piece passes a frame filter
### Logic is elem by elem subtraction. If the subraction is zero that
### means that there is a 1 - 1 or 0 - 0, which fails.
for sval in dlst - flt:
if sval == 0:
return False
else:
return True
def genvalst(nplst, npfltrs):
val = []
vallst = []
for x in range(len(nplst)):
val.append( [x,[piece_fit(np_piece_list[x],y) for y in np_filters]])
vallst.append([piece_fit(np_piece_list[x],y) for y in np_filters] )
return val, vallst
def setcorners(vallst):
sp = []
for spt in [0,2,9,11]:
sptlst = []
for x in range(len(val)):
if val[x][1][spt] == True:
sptlst.append(x)
sp.append(sptlst)
lst2 = sp
j = list(itertools.product(*lst2))
cornersols = []
for t in j:
if len(set(t)) != 4:
continue
else:
cornersols.append(t)
return cornersols
def used(sol):
### Returns a list of used tiles from a solution
return set(sol) - set([None] )
def unused(allS, sol):
### Return unused tiles
return allS - set(sol)
def ltor(lst, ltile, rtile):
lefttile = lst[ltile]
righttile = lst[rtile]
return lefttile[1] - righttile[5] != 0 and lefttile[2] - righttile[4] != 0
def ttob(lst, ttile, btile):
toptile = lst[ttile]
bottile = lst[btile]
return toptile[3] - bottile[0] != 0
def checkint(possol):
### this goes through a list and verifies that the pieces can work together
sidetoside = [0,1,3,4,6,7,9,10]
toptobottom =[0,1,2,3,4,5,6,7,8]
failed = 0
for x in sidetoside:
if ltor(piece_list, possol[x], possol[x+1]) == False:
failed = 1
break
for x in toptobottom:
if ttob(piece_list, possol[x], possol[x+3]) == False:
#print(x, 'ttob')
failed = 1
break
return failed
[val, vallst] = genvalst(np_piece_list, np_filters)
cornersols = setcorners(vallst)
finalsol = []
for csol in cornersols:
sol = [None] * 12
sol[0] = csol[0]
sol[2] = csol[1]
sol[9] = csol[2]
sol[11] = csol[3]
used = set(sol) - set([None] )
for x in itertools.permutations(list(unused(all_set, sol))):
vlst = list(x)
newsol = []
for pos in range(len(sol)):
if sol[pos] == None:
possval = vlst.pop()
if vallst[possval][pos] == False:
break
else:
newsol.append(possval)
else:
newsol.append( sol[pos] )
if len(newsol) == 12:
if checkint(newsol) == 0:
finalsol = newsol
break
print('finished computation')
### Final Solution
print( list( map(lambda x: num2alpha[x], finalsol) ))
## EOF ##
| 26.6 | 110 | 0.562573 |
ace41a2d694b15b973a015c75706cf4ab52666b3 | 2,200 | py | Python | pico/micropython/mini-oled-i2c-ssd1306-image/main.py | leon-anavi/rpi-examples | c729eaa3e03f915df1c1d5eca711c865f607cd50 | [
"MIT"
] | 126 | 2016-03-18T09:05:35.000Z | 2022-03-30T21:52:52.000Z | pico/micropython/mini-oled-i2c-ssd1306-image/main.py | leon-anavi/rpi-examples | c729eaa3e03f915df1c1d5eca711c865f607cd50 | [
"MIT"
] | 4 | 2016-05-22T10:40:42.000Z | 2018-05-09T11:13:27.000Z | pico/micropython/mini-oled-i2c-ssd1306-image/main.py | leon-anavi/rpi-examples | c729eaa3e03f915df1c1d5eca711c865f607cd50 | [
"MIT"
] | 81 | 2016-03-18T09:05:41.000Z | 2022-03-08T11:30:24.000Z | from machine import Pin, I2C
from ssd1306 import SSD1306_I2C
import framebuf
i2c=I2C(0,sda=Pin(4), scl=Pin(5), freq=400000)
oled = SSD1306_I2C(128, 64, i2c)
TH = bytearray(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f\x80\x01\xfe\x00\x00\x00\x03\xf5\xf8\x1f\xaf\xc0\x00\x00\x0f@\\>\x02p\x00\x00\x0c\x00\x06`\x000\x00\x00\x0c\x00\x06`\x000\x00\x00\x0c\x10\x03\xc0\x08\x10\x00\x00\x0e\x0c\x03\xc000\x00\x00\x0c\x03\x01\x80\xc00\x00\x00\x06\x00\xc1\x83\x00`\x00\x00\x06\x00c\xc6\x00`\x00\x00\x03\x003\xec\x00\xc0\x00\x00\x03\x00\x0f\xf0\x00\xc0\x00\x00\x01\x80\x0f\xf0\x01\x80\x00\x00\x00\xe0\x1f\xf8\x07\x00\x00\x00\x00|\x7f\xfe\x1e\x00\x00\x00\x00?\xff\xff\xfc\x00\x00\x00\x00?\xf0\x0f\xfc\x00\x00\x00\x00p`\x04\x1e\x00\x00\x00\x00\xc0\xc0\x02\x07\x00\x00\x00\x01\x81\xc0\x03\x03\x80\x00\x00\x01\x03\xe0\x07\x81\x80\x00\x00\x01\x07\xf0\x0f\xc1\x80\x00\x00\x03\x0f\xff\xff\xf1\xc0\x00\x00\x03>\x0f\xf08\xc0\x00\x00\x03\xf8\x07\xc0\x1e\xc0\x00\x00\x03\xf0\x03\xc0\x0f\xc0\x00\x00\x0f\xf0\x03\xc0\x0f\xf0\x00\x00\x1c\xe0\x01\x80\x068\x00\x00\x18\xe0\x01\x80\x06\x18\x00\x00\x18\xe0\x01\xc0\x02\x18\x00\x000\xc0\x03\xc0\x02\x1c\x00\x000\xe0\x03\xc0\x06\x0c\x00\x000\xe0\x07\xe0\x06\x0c\x00\x000\xe0\x07\xf0\x0f\x0c\x00\x000\xf0\x0f\xf8\x0f\x1c\x00\x001\xf88\x1e?\x1c\x00\x00\x19\xff\xf0\x07\xff\xb8\x00\x00\x1f\xff\xe0\x07\xf8\xf8\x00\x00\x0f\x1f\xc0\x03\xf0\xf0\x00\x00\x0e\x07\xc0\x03\xc0p\x00\x00\x06\x03\xc0\x03\x80`\x00\x00\x06\x01\xc0\x03\x80`\x00\x00\x06\x01\xc0\x03\x00`\x00\x00\x06\x00\xc0\x03\x00`\x00\x00\x03\x00\xe0\x07\x00\xc0\x00\x00\x03\x00\xf0\x0e\x00\xc0\x00\x00\x01\x80\xfc>\x01\x80\x00\x00\x00\xc0\xff\xfe\x03\x00\x00\x00\x00q\xff\xff\x0e\x00\x00\x00\x00?\xe0\x07\xfc\x00\x00\x00\x00\x0f\xc0\x03\xf0\x00\x00\x00\x00\x03\xc0\x03\xc0\x00\x00\x00\x00\x01\xe0\x07\x80\x00\x00\x00\x00\x00p\x1e\x00\x00\x00\x00\x00\x00>x\x00\x00\x00\x00\x00\x00\x0f\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
fb = framebuf.FrameBuffer(TH,64,64, framebuf.MONO_HLSB)
oled.fill(0)
oled.blit(fb,32,0)
oled.show() | 200 | 1,942 | 0.744091 |
ace41aa7f7ea72d53cc01c2bc1ed849a610da57b | 3,006 | py | Python | ethereum2etl/jobs/export_beacon_validators_job.py | blockchain-etl/ethereum2-etl | d8f5c5bfa7d9645608b0526bcf1b632d2651a262 | [
"MIT"
] | 30 | 2020-10-06T14:50:50.000Z | 2022-03-23T16:09:46.000Z | ethereum2etl/jobs/export_beacon_validators_job.py | blockchain-etl/ethereum2-etl | d8f5c5bfa7d9645608b0526bcf1b632d2651a262 | [
"MIT"
] | 6 | 2020-10-13T12:41:34.000Z | 2021-03-06T10:40:41.000Z | ethereum2etl/jobs/export_beacon_validators_job.py | blockchain-etl/ethereum2-etl | d8f5c5bfa7d9645608b0526bcf1b632d2651a262 | [
"MIT"
] | 5 | 2020-10-07T18:34:10.000Z | 2021-11-20T04:59:21.000Z | # MIT License
#
# Copyright (c) 2020 Evgeny Medvedev, evge.medvedev@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from blockchainetl_common.executors.batch_work_executor import BatchWorkExecutor
from blockchainetl_common.jobs.base_job import BaseJob
from ethereum2etl.mappers.validator_mapper import ValidatorMapper
class ExportBeaconValidatorsJob(BaseJob):
def __init__(
self,
start_epoch,
end_epoch,
ethereum2_service,
max_workers,
item_exporter,
batch_size=1):
self.start_epoch = start_epoch
self.end_epoch = end_epoch
self.batch_size=batch_size
self.batch_work_executor = BatchWorkExecutor(batch_size, max_workers)
self.item_exporter = item_exporter
self.ethereum2_service = ethereum2_service
self.validator_mapper = ValidatorMapper()
def _start(self):
self.item_exporter.open()
def _export(self):
self.batch_work_executor.execute(
range(self.start_epoch, self.end_epoch + 1),
self._export_batch,
total_items=self.end_epoch - self.start_epoch + 1
)
def _export_batch(self, epoch_batch):
for epoch in epoch_batch:
self._export_epoch(epoch)
def _export_epoch(self, epoch):
slot = self.ethereum2_service.compute_slot_at_epoch(epoch)
logging.info(f'Slot for epoch {epoch} is {slot}')
validators_response = self.ethereum2_service.get_beacon_validators(slot)
timestamp = self.ethereum2_service.compute_time_at_slot(slot)
for validator_response in validators_response['data']:
validator = self.validator_mapper.json_dict_to_validator(validator_response, timestamp, epoch)
self.item_exporter.export_item(self.validator_mapper.validator_to_dict(validator))
def _end(self):
self.batch_work_executor.shutdown()
self.item_exporter.close()
| 38.050633 | 106 | 0.725549 |
ace41c5fbcf4bf6910edb206be4f4d0860b64224 | 4,806 | py | Python | backend/database/events.py | manibhushan05/flask-boilerplate | 26f26691c57dd984ded0e5357b757c7c0542cd1c | [
"MIT"
] | null | null | null | backend/database/events.py | manibhushan05/flask-boilerplate | 26f26691c57dd984ded0e5357b757c7c0542cd1c | [
"MIT"
] | 1 | 2021-03-31T19:24:53.000Z | 2021-03-31T19:24:53.000Z | backend/database/events.py | karajrish/flask-boilerplate | 584da59e3eb3c39261b5b435ef7eaf92bf127aeb | [
"MIT"
] | null | null | null | from functools import partial
from sqlalchemy import event, inspect
from backend.utils import slugify as _slugify, was_decorated_without_parenthesis
# EVENTS DOCS
# http://docs.sqlalchemy.org/en/rel_1_1/core/event.html
# ORM EVENTS DOCS
# http://docs.sqlalchemy.org/en/rel_1_1/orm/events.html
class _SQLAlchemyEvent(object):
"""Private helper class for the @attach_events and @on decorators"""
ATTR = '_sqlalchemy_event'
def __init__(self, field_name, event_name, listen_kwargs=None):
self.field_name = field_name
self.event_name = event_name
self.listen_kwargs = listen_kwargs or {}
def attach_events(*args):
"""Class decorator for SQLAlchemy models to attach listeners on class
methods decorated with :func:`.on`
Usage::
@attach_events
class User(Model):
email = Column(String(50))
@on('email', 'set')
def lowercase_email(self, new_value, old_value, initiating_event):
self.email = new_value.lower()
"""
def wrapper(cls):
for name, fn in cls.__dict__.items():
if not name.startswith('__') and hasattr(fn, _SQLAlchemyEvent.ATTR):
e = getattr(fn, _SQLAlchemyEvent.ATTR)
if e.field_name:
event.listen(getattr(cls, e.field_name), e.event_name, fn,
**e.listen_kwargs)
else:
event.listen(cls, e.event_name, fn, **e.listen_kwargs)
return cls
if was_decorated_without_parenthesis(args):
return wrapper(args[0])
return wrapper
# pylint: disable=invalid-name
def on(*args, **listen_kwargs):
"""Class method decorator for SQLAlchemy models. Must be used in
conjunction with the :func:`.attach_events` class decorator
Usage::
@attach_events
class Post(Model):
uuid = Column(String(36))
post_tags = relationship('PostTag', back_populates='post') # m2m
# instance event (only one positional argument, the event name)
# kwargs are passed on to the sqlalchemy.event.listen function
@on('init', once=True)
def generate_uuid(self, args, kwargs):
self.uuid = str(uuid.uuid4())
# attribute event (two positional args, field name and event name)
@on('post_tags', 'append')
def set_tag_order(self, post_tag, initiating_event):
if not post_tag.order:
post_tag.order = len(self.post_tags) + 1
"""
if len(args) == 1:
field_name, event_name = (None, args[0])
elif len(args) == 2:
field_name, event_name = args
else:
raise NotImplementedError(
'@on accepts only one or two positional arguments')
def wrapper(fn):
setattr(fn, _SQLAlchemyEvent.ATTR,
_SQLAlchemyEvent(field_name, event_name, listen_kwargs))
return fn
return wrapper
def slugify(field_name, slug_field_name=None, mutable=False):
"""Class decorator to specify a field to slugify. Slugs are immutable by
default unless mutable=True is passed.
Usage::
@slugify('title')
def Post(Model):
title = Column(String(100))
slug = Column(String(100))
# pass a second argument to specify the slug attribute field:
@slugify('title', 'title_slug')
def Post(Model)
title = Column(String(100))
title_slug = Column(String(100))
# optionally set mutable to True for a slug that changes every time
# the slugified field changes:
@slugify('title', mutable=True)
def Post(Model):
title = Column(String(100))
slug = Column(String(100))
"""
slug_field_name = slug_field_name or 'slug'
def _set_slug(target, value, old_value, _, mutable=False):
existing_slug = getattr(target, slug_field_name)
if existing_slug and not mutable:
return
if value and (not existing_slug or value != old_value):
slug_value_base = _slugify(value)
slug_value = slug_value_base
count = 0
while True:
if target.query.filter_by(**{slug_field_name: slug_value}).first() is None:
setattr(target, slug_field_name, slug_value)
break
else:
count += 1
slug_value = '{}-{}'.format(slug_value_base, count)
def wrapper(cls):
event.listen(getattr(cls, field_name), 'set',
partial(_set_slug, mutable=mutable))
return cls
return wrapper
__all__ = [
'event',
'inspect',
'attach_events',
'on',
'slugify'
]
| 32.693878 | 91 | 0.603204 |
ace41d040353021c8e96ef47caafae4b2fa9054e | 3,646 | py | Python | tests/python/unittest/test_target_codegen_arm.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 4 | 2019-05-08T04:46:07.000Z | 2019-11-11T19:43:04.000Z | tests/python/unittest/test_target_codegen_arm.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 2 | 2020-09-14T09:18:25.000Z | 2020-09-24T03:28:18.000Z | tests/python/unittest/test_target_codegen_arm.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 2 | 2019-08-08T01:48:03.000Z | 2019-09-27T06:49:16.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import re
import os
import ctypes
def test_popcount():
target = 'llvm -mtriple=armv7l-none-linux-gnueabihf -mcpu=cortex-a53 -mattr=+neon'
def check_correct_assembly(type, elements, counts):
n = tvm.runtime.convert(elements)
A = te.placeholder(n, dtype=type, name='A')
B = te.compute(A.shape, lambda i: tvm.tir.popcount(A[i]), name='B')
s = te.create_schedule(B.op)
s[B].vectorize(s[B].op.axis[0])
f = tvm.build(s, [A, B], target)
# Verify we see the correct number of vpaddl and vcnt instructions in the assembly
assembly = f.get_source('asm')
matches = re.findall("vpaddl", assembly)
assert (len(matches) == counts)
matches = re.findall("vcnt", assembly)
assert (len(matches) == 1)
check_correct_assembly('uint16', 8, 1)
check_correct_assembly('uint16', 4, 1)
check_correct_assembly('uint32', 4, 2)
check_correct_assembly('uint32', 2, 2)
check_correct_assembly('uint64', 2, 3)
def test_vmlal_s16():
target = 'llvm -mtriple=armv7l-none-linux-gnueabihf -mcpu=cortex-a53 -mattr=+neon'
def check_correct_assembly(N):
K = te.size_var("K")
A = te.placeholder((K, N), dtype="int8", name='A')
B = te.placeholder((K, N), dtype="int8", name='B')
k = te.reduce_axis((0, K))
C = te.compute((N, ), lambda n: te.sum(
A[k, n].astype("int32") * B[k, n].astype("int32"), axis=[k]), name='C')
s = te.create_schedule(C.op)
s[C].vectorize(s[C].op.axis[0])
f = tvm.build(s, [A, B, C], target)
# Verify we see the correct number of vmlal.s16 instructions
assembly = f.get_source('asm')
matches = re.findall("vmlal.s16", assembly)
assert (len(matches) == N // 4)
check_correct_assembly(8)
check_correct_assembly(16)
check_correct_assembly(32)
check_correct_assembly(64)
def check_broadcast_correct_assembly(N):
K = te.size_var("K")
A = te.placeholder((K, N), dtype="int8", name='A')
B = te.placeholder((K,), dtype="int8", name='B')
k = te.reduce_axis((0, K))
C = te.compute((N, ), lambda n: te.sum(
A[k, n].astype("int32") * B[k].astype("int32"),
axis=[k]), name='C')
s = te.create_schedule(C.op)
s[C].vectorize(s[C].op.axis[0])
f = tvm.build(s, [A, B, C], target)
# Verify we see the correct number of vmlal.s16 instructions
assembly = f.get_source('asm')
matches = re.findall("vmlal.s16", assembly)
assert len(matches) == N // 4
check_broadcast_correct_assembly(8)
check_broadcast_correct_assembly(16)
check_broadcast_correct_assembly(32)
check_broadcast_correct_assembly(64)
if __name__ == "__main__":
test_popcount()
test_vmlal_s16()
| 38.378947 | 90 | 0.644542 |
ace41dcafb0f208277fdaea6ad42c0a666a1891d | 12,161 | py | Python | json_py.py | bycym/python_json_parser | ace18bee8df13b63e04d8bc9417af88436760e4c | [
"Apache-2.0"
] | null | null | null | json_py.py | bycym/python_json_parser | ace18bee8df13b63e04d8bc9417af88436760e4c | [
"Apache-2.0"
] | null | null | null | json_py.py | bycym/python_json_parser | ace18bee8df13b63e04d8bc9417af88436760e4c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
######################################################################
# Author : Aaron Benkoczy
# Date : 2018.01.04.
######################################################################
# https://stackoverflow.com/questions/28132055/not-able-to-display-multiple-json-keys-in-tkinter-gui
# for search
# https://stackoverflow.com/questions/17225920/python-tkinter-treeview-searchable
import fnmatch
import os
import re
import xml.etree.ElementTree as ET
import sys
from Tkinter import *
import tkFont
import ttk
import uuid
import json
class App:
JSON_dictionary = {}
def selectAll(event):
print('e.get():', e.get())
# or more universal
print('event.widget.get():', event.widget.get())
# select text
event.widget.select_range(0, 'end')
# move cursor to the end
event.widget.icursor('end')
def Killme():
self.root.quit()
self.root.destroy()
# refresh menu
def RefreshMenu(self):
self._tree.delete(*self._tree.get_children())
self.retrieve_input()
global JSON_dictionary
if(len(JSON_dictionary) > 0):
self.ReReadFile()
def ClearText(self):
self._inputEntry.delete('1.0', END)
def CopyText(self):
input_string = self._inputEntry.get("1.0",END)
self.root.withdraw()
self.root.clipboard_clear()
self.root.clipboard_append(input_string)
self.root.update() # now it stays on the clipboard after the window is closed
# double click on a node
def is_json(myjson):
try:
json_object = json.loads(myjson)
except ValueError as e:
return False
return True
def retrieve_input(self):
input_string = self._inputEntry.get("1.0",END)
input_string = input_string.strip()
global JSON_dictionary
data = JSON_dictionary
if(len(input_string) > 0 and input_string != ""):
# parse json string
try:
print("json string convert")
data = json.loads(input_string)
except ValueError as e:
print(e)
print("cannot read json string");
# paerse perl json string
try:
print("try perl json string convert")
input_string = input_string.replace(" =>",":")
input_string = input_string.replace("\'","\"")
input_string = input_string.replace("undef","0")
input_string = " ".join(input_string.split())
print(input_string)
data = json.loads(input_string)
except ValueError as e:
print(e)
print("cannot read perl json string");
JSON_dictionary = data
def JSONTree(self, Tree, Parent, Dictionary):
for key in Dictionary :
uid = uuid.uuid4()
if isinstance(Dictionary[key], dict):
Tree.insert(Parent, 'end', uid, text=key)
self.JSONTree(Tree, uid, Dictionary[key])
elif isinstance(Dictionary[key], list):
Tree.insert(Parent, 'end', uid, text=key + '[]')
self.JSONTree(Tree,
uid,
dict([(i, x) for i, x in enumerate(Dictionary[key])]))
else:
value = Dictionary[key]
if isinstance(value, str) or isinstance(value, unicode):
value = value.replace(' ', '_')
else:
value = str(value)
Tree.insert(Parent, 'end', uid, text=key, value=value)
def OnDoubleClick(self, event):
selected_item = self._tree.focus()
value = self._tree.item(selected_item, "values")
key = self._tree.item(selected_item, "text")
if (value or key):
to_clipboard = str(key) + " : " + str(value[0])
# lineArray = string_line.split(",")
# line = str(lineArray)
# line = line.replace("'", " ")
# line = line.strip()
# to_clipboard = line[line.find("/home"):line.find(": ")]
#print(to_clipboard)
self.root.clipboard_clear()
self.root.clipboard_append(to_clipboard.decode('utf-8'))
print ("===============================================================")
print (to_clipboard.decode('utf-8'))
print ("===============================================================")
def __init__(self):
self.root=Tk()
self.root.title("Json Parser")
self.root.geometry('10x10+0+0')
self.dFont=tkFont.Font(family="Arial", size=14)
# Menu elements
self.menu = Menu(self.root)
self.root.config(menu=self.menu)
self.fileMenu = Menu(self.menu)
self.menu.add_cascade(label="File", menu=self.fileMenu)
self.fileMenu.add_command(label="Refresh", command=self.RefreshMenu)
self._loadButton = Button(self.root, height=1, width=10, text="Load",
command=lambda: self.RefreshMenu())
self._loadButton.grid(row=0, column=1)
self._clearButton = Button(self.root, height=1, width=10, text="Clear",
command=lambda: self.ClearText())
self._clearButton.grid(row=1, column=1)
self._copyButton = Button(self.root, height=1, width=10, text="Copy",
command=lambda: self.CopyText())
self._copyButton.grid(row=0, column=3)
self._inputEntry = Text(self.root, height=2, width=10, bg="grey")
self._inputEntry.grid(row=0, column=4)
self._inputEntry.insert(INSERT, '{"submit":"finish","responses":{"762":{"3483":["10591"],"yolo":["10594"],"3485":[10595,10596,10597],"3486":["sdfghjkl harom"]}},"comments":{"762":{}}}')
# init tree
self._tree = ttk.Treeview(self.root)
#_tree.LabelEdit = TRUE
# self._tree["columns"]=("one","two")
self._tree["columns"]=("one")
self._tree.heading("#0", text="Key")
self._tree.heading("one", text="Value")
# self._tree.heading("two", text="Place")
self._tree.column("#0", minwidth=35, stretch=FALSE)
self._tree.column("one", minwidth=60, stretch=TRUE)
# self._tree.column("two", minwidth=45, stretch=FALSE)
self._tree.grid(row=1, column=1, stic="nsew")
# event listener double click
self._tree.bind("<Double-1>", self.OnDoubleClick)
self._tree.bind('<Control-KeyRelease-a>', self.selectAll)
#ttk.Style().configure('Treeview', rowheight=50)
# scroll bar to root
self.yscrollbar=Scrollbar(self.root, orient=VERTICAL, command=self._tree.yview)
self.yscrollbar.pack(side=RIGHT, fill=Y)
self.xscrollbar=Scrollbar(self.root, orient=HORIZONTAL, command=self._tree.xview)
self.xscrollbar.pack(side=BOTTOM, fill=X)
self._tree.configure(yscrollcommand=self.yscrollbar.set, xscrollcommand=self.xscrollbar.set)
self.root.geometry('600x600+0+0')
# self._loadButton.pack(side=LEFT, fill=BOTH, expand = YES)
# self._clearButton.pack(side=LEFT, fill=BOTH, expand = YES)
# self._copyButton.pack(side=LEFT, fill=BOTH, expand = YES)
# self._inputEntry.pack(side=LEFT, fill=BOTH, expand = YES)
# self._tree.pack(side=RIGHT, fill=BOTH, expand = YES)
self._loadButton.pack(fill=X)
self._clearButton.pack(fill=X)
self._copyButton.pack(fill=X)
self._inputEntry.pack(fill=X)
self._tree.pack(fill=BOTH, expand = YES)
global JSON_dictionary
JSON_dictionary = {"submit":"finish","responses":{"762":{"3483":["10591"],"yolo":["10594"],"3485":[10595,10596,10597],"3486":["sdfghjkl harom"]}},"comments":{"762":{}}}
self.ReReadFile()
###############################
# lets read the content: ######
# file name
# if(len(JSON_dictionary) > 0 ):
# self.ReReadFile()
# else:
# print(JSON_dictionary + " is not a valid file")
# sys.exit(-1)
# content reader
def ReReadFile(self):
global JSON_dictionary
allWarnings = 0
SSstring =""
# open the file
if (len(SSstring) > 0):
# content = f.read()
content = JSON_dictionary.strip()
# split the content by the pointer arrow ^
contentList = content.split("^")
tagTypes = set()
# the first root tag what shows "All Warnings"
tagMap = {"[root]": 0}
tagMap["[root]"] = self._tree.insert("", 0, "[root]", text="[All Warnings: 0]")
tagIndex = 1
# iterate throu the splitted elements
for i, line in enumerate(contentList):
#line = line.strip()
#get the tag, like: [-Wsomething]
if re.search("\[\-W.*\]", line):
#tag = "["+line[line.find("[")+1:line.find("]")]+"]"
tag = re.search("\[\-W.*\]", line).group()
# insert a tag if it is not exsist
if(tag not in tagTypes):
tagTypes.add(tag)
tagMap[tag] = self._tree.insert("", tagIndex, tag, text=tag + " [1]")
++tagIndex
# update the tags child counter
if(len(self._tree.get_children(tagMap[tag])) > 0):
self._tree.item(tag, text=tag + " ["+ str(len(self._tree.get_children(tagMap[tag]))+1) +"]")
# Tags - column
tagColumn = line[line.find(": warning: ")+1:line.find("[")]
# Place - column
placeColumn = line[line.find("/home"):line.find(": ")]
#placeColumn = line.search("/:]", line).group()
# Problem - column
lineArray = line.splitlines()
problemColumn = lineArray[len(lineArray)-2]
#problemColumn = line[line.find("]\n"):]
#insert an element under the tag
self._tree.insert(tagMap[tag], "end", i,
text=tagColumn, values=(problemColumn, placeColumn));
# if can't find a tag then add it to the "root" "All warnings"
else:
# Tags - column
tagColumn = line[line.find(": warning: ")+1:line.find("[")]
# Place - column
placeColumn = line[line.find("/home"):line.find(": ")]
# Problem - column
lineArray = line.splitlines()
problemColumn = lineArray[len(lineArray)-2]
#problemColumn = line[line.find("]\n"):]
#insert an element under the tag
self._tree.insert(tagMap["[root]"], "end", i,
text=tagColumn, values=(problemColumn, placeColumn));
allWarnings = i;
# count all of the warnings
# get the elements under the all warnings to the second counter
# self._tree.item("[root]", text="[All Warnings: " + str(allWarnings) +"]"
# " ["+ str(len(self._tree.get_children(tagMap["[root]"]))+1) +"]")
# data = json.load(JSON_dictionary)
self.JSONTree(self._tree, '', JSON_dictionary)
#self._tree.pack()
self.root.mainloop();
if __name__ == '__main__':
# if len(sys.argv) < 2:
# print("usage: " + os.path.basename(sys.argv[0]) + " [JSON_dictionary]")
# print("example: ")
# print("python " + os.path.basename(sys.argv[0]) + " \"Make.log\"")
# print("")
# print("Features:")
# print("- You can double click on a node to copy the path!")
# print("- There is a \"File > Refresh\" menu where you can reread the makelog file.")
# sys.exit(0)
app = App()
| 38.362776 | 193 | 0.528328 |
ace41e3f5c44cd3781dfe7d7210e6b555094ebe3 | 161 | py | Python | src/test/pythonFiles/datascience/simple_note_book.py | ChaseKnowlden/vscode-jupyter | 9bdaf87f0b6dcd717c508e9023350499a6093f97 | [
"MIT"
] | 2,461 | 2016-01-21T16:40:43.000Z | 2022-03-31T12:01:55.000Z | src/test/pythonFiles/datascience/simple_note_book.py | ChaseKnowlden/vscode-jupyter | 9bdaf87f0b6dcd717c508e9023350499a6093f97 | [
"MIT"
] | 12,536 | 2019-05-06T21:26:14.000Z | 2022-03-31T23:06:48.000Z | src/test/pythonFiles/datascience/simple_note_book.py | vasili8m/vscode-python | 846eee870e8b7bab38172600836faedb5fb80166 | [
"MIT"
] | 871 | 2019-05-15T13:43:55.000Z | 2022-03-31T03:04:35.000Z | # %%
import os.path
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, 'ds.log'), 'a') as fp:
fp.write('Hello World')
| 20.125 | 55 | 0.670807 |
ace41e5a610c8121480e97eb925e9eda3dcea174 | 326 | py | Python | sdk/python/pulumi_azure_nextgen/migrate/__init__.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/migrate/__init__.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/migrate/__init__.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Make subpackages available:
from . import (
latest,
v20171111preview,
v20180202,
v20180901preview,
v20191001,
v20191001preview,
)
| 21.733333 | 80 | 0.677914 |
ace41fb0478b6bce5f980cc3fa33eb27dcd8e22e | 1,004 | py | Python | microsoftgraph/resources/groups.py | wpajunior/microsoftgraph | bae830a567bba9fd5eb644e530b98fb78f5c3a13 | [
"MIT"
] | null | null | null | microsoftgraph/resources/groups.py | wpajunior/microsoftgraph | bae830a567bba9fd5eb644e530b98fb78f5c3a13 | [
"MIT"
] | 1 | 2021-06-01T23:09:07.000Z | 2021-06-01T23:09:07.000Z | microsoftgraph/resources/groups.py | wpajunior/microsoftgraph | bae830a567bba9fd5eb644e530b98fb78f5c3a13 | [
"MIT"
] | null | null | null | class Groups:
def __init__(self, client):
self.client = client
def create(self, group):
""" Creates a new group
:param group: Group data. Refer to: https://docs.microsoft.com/en-us/graph/api/group-post-groups?view=graph-rest-1.0
:type group: dict
:returns: The data of the new group, including its new id
:rtype: dict
:raises: Exception
"""
path = '/groups'
return self.client.post(path, group)
def find_all(self, params={}, **options):
""" Fetches all groups
:returns: A list containing all groups
:rtype: list of dict
:raises: Exception
"""
path = '/groups'
return self.client.get_collection(path)
def find_by_id(self, id, params={}, **options):
""" Fetches a group by id
:returns: Group data
:rtype: dict
:raises: Exception
"""
path = '/groups/' + id
return self.client.get(path) | 28.685714 | 124 | 0.561753 |
ace421a3e44c3825ed3bdb2cd94a78b3c3a8aeed | 5,331 | py | Python | Scripts_Model/scripts_pytorch/VGG16_pytorch.py | zhangziyezzy/DeepLearningMugenKnock | e306f436fb41b5549d0adf9ad331d638e5906e29 | [
"MIT"
] | 10 | 2021-12-17T06:07:25.000Z | 2022-03-25T13:50:05.000Z | Scripts_Model/scripts_pytorch/VGG16_pytorch.py | karaage0703/DeepLearningMugenKnock | 26830fe049c7da8001977ca0df12e946c0f030eb | [
"MIT"
] | null | null | null | Scripts_Model/scripts_pytorch/VGG16_pytorch.py | karaage0703/DeepLearningMugenKnock | 26830fe049c7da8001977ca0df12e946c0f030eb | [
"MIT"
] | 2 | 2022-03-15T02:42:09.000Z | 2022-03-30T23:19:55.000Z | import torch
import torch.nn.functional as F
import numpy as np
from collections import OrderedDict
from easydict import EasyDict
from _main_base import main
import os
#---
# config
#---
cfg = EasyDict()
# class
cfg.CLASS_LABEL = ['akahara', 'madara']
cfg.CLASS_NUM = len(cfg.CLASS_LABEL)
# model
cfg.INPUT_HEIGHT = 64
cfg.INPUT_WIDTH = 64
cfg.INPUT_CHANNEL = 3
cfg.GPU = False
cfg.DEVICE = torch.device("cuda" if cfg.GPU and torch.cuda.is_available() else "cpu")
cfg.MODEL_SAVE_PATH = 'models/VGG16_{}.pt'
cfg.MODEL_SAVE_INTERVAL = 200
cfg.ITERATION = 1000
cfg.MINIBATCH = 8
cfg.OPTIMIZER = torch.optim.SGD
cfg.LEARNING_RATE = 0.1
cfg.MOMENTUM = 0.9
cfg.LOSS_FUNCTION = loss_fn = torch.nn.NLLLoss()
cfg.TRAIN = EasyDict()
cfg.TRAIN.DISPAY_ITERATION_INTERVAL = 50
cfg.TRAIN.DATA_PATH = '../Dataset/train/images/'
cfg.TRAIN.DATA_HORIZONTAL_FLIP = True
cfg.TRAIN.DATA_VERTICAL_FLIP = True
cfg.TRAIN.DATA_ROTATION = False
cfg.TEST = EasyDict()
cfg.TEST.MODEL_PATH = cfg.MODEL_SAVE_PATH.format('final')
cfg.TEST.DATA_PATH = '../Dataset/test/images/'
cfg.TEST.MINIBATCH = 2
# random seed
torch.manual_seed(0)
class VGG16(torch.nn.Module):
def __init__(self):
super(VGG16, self).__init__()
self.conv1 = torch.nn.Sequential(OrderedDict({
'conv1_1' : torch.nn.Conv2d(cfg.INPUT_CHANNEL, 64, kernel_size=3, padding=1, stride=1),
'conv1_1_relu' : torch.nn.ReLU(),
'conv1_1_bn' : torch.nn.BatchNorm2d(64),
'conv1_2' : torch.nn.Conv2d(64, 64, kernel_size=3, padding=1, stride=1),
'conv1_2_relu' : torch.nn.ReLU(),
'conv1_2_bn' : torch.nn.BatchNorm2d(64),
}))
self.conv2 = torch.nn.Sequential(OrderedDict({
'conv2_1' : torch.nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=1),
'conv2_1_relu' : torch.nn.ReLU(),
'conv2_1_bn' : torch.nn.BatchNorm2d(128),
'conv2_2' : torch.nn.Conv2d(128, 128, kernel_size=3, padding=1, stride=1),
'conv2_2_relu' : torch.nn.ReLU(),
'conv2_2_bn' : torch.nn.BatchNorm2d(128),
}))
self.conv3 = torch.nn.Sequential(OrderedDict({
'conv3_1' : torch.nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=1),
'conv3_1_relu' : torch.nn.ReLU(),
'conv3_1_bn' : torch.nn.BatchNorm2d(256),
'conv3_2' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
'conv3_2_relu' : torch.nn.ReLU(),
'conv3_2_bn' : torch.nn.BatchNorm2d(256),
'conv3_3' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
'conv3_3_relu' : torch.nn.ReLU(),
'conv3_3_bn' : torch.nn.BatchNorm2d(256),
}))
self.conv4 = torch.nn.Sequential(OrderedDict({
'conv4_1' : torch.nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=1),
'conv4_1_relu' : torch.nn.ReLU(),
'conv4_1_bn' : torch.nn.BatchNorm2d(512),
'conv4_2' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv4_2_relu' : torch.nn.ReLU(),
'conv4_2_bn' : torch.nn.BatchNorm2d(512),
'conv4_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv4_3_relu' : torch.nn.ReLU(),
'conv4_3_bn' : torch.nn.BatchNorm2d(512),
}))
self.conv5 = torch.nn.Sequential(OrderedDict({
'conv5_1' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_1_relu' : torch.nn.ReLU(),
'conv5_1_bn' : torch.nn.BatchNorm2d(512),
'conv5_2' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_2_relu' : torch.nn.ReLU(),
'conv5_2_bn' : torch.nn.BatchNorm2d(512),
'conv5_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_3_relu' : torch.nn.ReLU(),
'conv5_3_bn' : torch.nn.BatchNorm2d(512),
}))
self.top = torch.nn.Sequential(OrderedDict({
'Dense1' : torch.nn.Linear(512 * (cfg.INPUT_HEIGHT // 32) * (cfg.INPUT_WIDTH // 32), 256),
'Dense1_relu' : torch.nn.ReLU(),
'Dense1_dropout' : torch.nn.Dropout(p=0.5),
'Dense2' : torch.nn.Linear(256, 256),
'Dense2_relu' : torch.nn.ReLU(),
'Dense2_dropout' : torch.nn.Dropout(p=0.5),
}))
self.fc_out = torch.nn.Linear(256, cfg.CLASS_NUM)
def forward(self, x):
# block conv1
x = self.conv1(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
# block conv2
x = self.conv2(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
# block conv3
x = self.conv3(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
# block conv4
x = self.conv4(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
# block conv5
x = self.conv5(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
x = x.view(x.shape[0], -1)
x = self.top(x)
x = self.fc_out(x)
x = F.softmax(x, dim=1)
return x
# main
if __name__ == '__main__':
model_save_dir = '/'.join(cfg.MODEL_SAVE_PATH.split('/')[:-1])
os.makedirs(model_save_dir, exist_ok=True)
main(cfg, VGG16()) | 34.393548 | 102 | 0.596136 |
ace421e4166c1cb1bd741ed34a94c08e70c64e69 | 344 | py | Python | utilslibrary/models/menu_model.py | mantou22/SC_system | 0c048c1ba678e378e62bb046b39c1a0f7792adee | [
"MulanPSL-1.0"
] | null | null | null | utilslibrary/models/menu_model.py | mantou22/SC_system | 0c048c1ba678e378e62bb046b39c1a0f7792adee | [
"MulanPSL-1.0"
] | 1 | 2021-09-01T03:28:39.000Z | 2021-09-01T03:28:39.000Z | utilslibrary/models/menu_model.py | mantou22/SC_system | 0c048c1ba678e378e62bb046b39c1a0f7792adee | [
"MulanPSL-1.0"
] | null | null | null | # coding:utf-8
"""
tree select model
"""
class MenuInfo(object):
id = ''
parentIds = ''
name = ''
href = ''
icon = ''
sort =''
isShow = ''
type = ''
hasChildren = ''
parentId = ''
def conver_to_dict(self,obj):
d = {}
d.update(obj.__dict__)
return d
| 13.230769 | 33 | 0.436047 |
ace4222c7bbb221c167ed965a781e69381eae67b | 7,853 | py | Python | gym_gazebo2/envs/Turtlebot3/turtlebot3.py | Jens22/gym-gazebo2 | 9e2bfaf21fc7ac191a8c0b0130c0086433d6024f | [
"Apache-2.0"
] | null | null | null | gym_gazebo2/envs/Turtlebot3/turtlebot3.py | Jens22/gym-gazebo2 | 9e2bfaf21fc7ac191a8c0b0130c0086433d6024f | [
"Apache-2.0"
] | null | null | null | gym_gazebo2/envs/Turtlebot3/turtlebot3.py | Jens22/gym-gazebo2 | 9e2bfaf21fc7ac191a8c0b0130c0086433d6024f | [
"Apache-2.0"
] | null | null | null | import gym
gym.logger.set_level(40) # hide warnings
import time
import numpy as np
import copy
import math
import os
import psutil
import signal
import sys
from scipy.stats import skew
from gym import utils, spaces
from gym_gazebo2.utils import ut_generic, ut_launch, ut_gazebo, general_utils
from gym.utils import seeding
from gazebo_msgs.srv import SpawnEntity
import subprocess
import argparse
import transforms3d as tf3d
# ROS 2
import rclpy
from rclpy.qos import qos_profile_sensor_data
from rclpy.qos import QoSProfile
from rclpy.qos import QoSReliabilityPolicy
# from gazebo_msgs.srv import SetEntityState, DeleteEntity
from gazebo_msgs.msg import ContactState, ModelState#, GetModelList
from std_msgs.msg import String
from std_srvs.srv import Empty
from geometry_msgs.msg import Pose, Twist
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
from ros2pkg.api import get_prefix_path
from builtin_interfaces.msg import Duration
#launch description
from launch import LaunchDescription
from launch.actions import IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from ament_index_python.packages import get_package_share_directory
class Turtlebot3Env(gym.Env):
"""
TODO. Define the environment.
"""
def __init__(self):
"""
Initialize the Turtlebot3 environemnt
"""
rclpy.init()
# Launch Turtlebot3 in gazebo
gazeboLaunchFileDir = os.path.join(get_package_share_directory('turtlebot3_gazebo'),'launch')
launch_desc = LaunchDescription([
IncludeLaunchDescription(
PythonLaunchDescriptionSource([gazeboLaunchFileDir,'/turtlebot3_4OG.launch.py']))])
self.launch_subp = ut_launch.startLaunchServiceProcess(launch_desc)
# Create the node after the new ROS_DOMAIN_ID is set in generate_launch_description()
self.node = rclpy.create_node(self.__class__.__name__)
# class variables
self._odom_msg = None
self.max_episode_steps = 1024 #default value, can be updated from baselines
self.iterator = 0
self.reset_jnts = True
self._scan_msg = None
# Subscribe to the appropriate topics, taking into account the particular robot
qos = QoSProfile(depth=10)
self._pub_cmd_vel = self.node.create_publisher(Twist, 'cmd_vel', qos)
self._sub_odom = self.node.create_subscription(Odometry, 'odom', self.odom_callback, qos)
self._sub_scan = self.node.create_subscription(LaserScan, 'scan', self.scan_callback, qos)
self.reset_sim = self.node.create_client(Empty, '/reset_simulation')
self.unpause = self.node.create_client(Empty, '/unpause_physics')
self.pause = self.node.create_client(Empty,'/pause_physics')
self.action_space = spaces.Discrete(13)
len_scan = 24
high = np.inf*np.ones(len_scan)
high = np.append(high, [np.inf, np.inf])
low = 0*np.ones(len_scan)
low = np.append(low, [-1*np.inf, -1*np.inf])
self.observation_space = spaces.Box(low, high)
# Seed the environment
self.seed()
def odom_callback(self, message):
"""
Callback method for the subscriber of odometry data
"""
self._odom_msg = message
def scan_callback(self, message):
"""
Callback method for the subscriber of scan data
"""
self._scan_msg = message
def set_episode_size(self, episode_size):
self.max_episode_steps = episode_size
def take_observation(self):
"""
Take observation from the environment and return it.
:return: state.
"""
# # # # Take an observation
rclpy.spin_once(self.node)
odom_message = self._odom_msg #msg of the callback,
scan_message = self._scan_msg
while scan_message is None or odom_message is None:
#print("I am waiting for massage")
rclpy.spin_once(self.node)
odom_message = self._odom_msg
scan_message = self._scan_msg
#TODO write function that prepare the scan informations
lastVelocities = [odom_message.twist.twist.linear.x, odom_message.twist.twist.angular.z]
lastScans = scan_message.ranges
done = False
for i, item in enumerate(lastScans):
if lastScans[i] <= 0.2:
done = True
elif lastScans[i] == float('inf') or np.isinf(lastScans[i]):
lastScans[i] = 4.0
#Set observation to None after it has been read.
self._odom_msg = None
self._scan_msg = None
#TODO what should all in the state?
state = np.r_[np.reshape(lastScans, -1),
np.reshape(lastVelocities, -1)]
return state, done
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
"""
Implement the environment step abstraction. Execute action and returns:
- action
- observation
- reward
- done (status)
"""
while not self.unpause.wait_for_service(timeout_sec=1.0):
self.node.get_logger().info('/unpause simulation service not available, waiting again...')
unpause = self.unpause.call_async(Empty.Request())
rclpy.spin_until_future_complete(self.node, unpause)
self.iterator+=1
# Execute "action"
action_list = [-0.3,-0.25,-0.2,-0.15,-0.1,-0.05,0.0,0.05,0.1,0.15,0.2,0.25,0.3]
V_CONST = 0.3
vel_cmd = Twist()
vel_cmd.linear.x = V_CONST
vel_cmd.angular.z = action_list[action]
self._pub_cmd_vel.publish(vel_cmd)
# Take an observation
obs, done = self.take_observation()
# Get reward, default is 1
reward = 1.0
# Calculate if the env has been solved
if done == False:
done = bool(self.iterator == self.max_episode_steps)
info = {}
while not self.pause.wait_for_service(timeout_sec=1.0):
self.node.get_logger().info('/pause simulation service not available, waiting again...')
pause = self.pause.call_async(Empty.Request())
rclpy.spin_until_future_complete(self.node, pause)
# Return the corresponding observations, rewards, etc.
return obs, reward, done, info
def reset(self):
"""
Reset the agent for a particular experiment condition.
"""
while not self.unpause.wait_for_service(timeout_sec=1.0):
self.node.get_logger().info('/unpause simulation service not available, waiting again...')
unpause = self.unpause.call_async(Empty.Request())
rclpy.spin_until_future_complete(self.node, unpause)
self.iterator = 0
if self.reset_jnts is True:
# reset simulation
while not self.reset_sim.wait_for_service(timeout_sec=1.0):
self.node.get_logger().info('/reset_simulation service not available, waiting again...')
reset_future = self.reset_sim.call_async(Empty.Request())
rclpy.spin_until_future_complete(self.node, reset_future)
# Take an observation
obs, done = self.take_observation()
# Return the corresponding observation
return obs
def close(self):
print("Closing " + self.__class__.__name__ + " environment.")
self.node.destroy_node()
parent = psutil.Process(self.launch_subp.pid)
for child in parent.children(recursive=True):
child.kill()
rclpy.shutdown()
parent.kill()
| 34.442982 | 104 | 0.648287 |
ace4230c45efce7988ea268b168639cd90972fbb | 1,885 | py | Python | ISJ/isj_proj2_xhorna14.py | PeetHornak/Documents | ee94e7fbbfcbf2889d7d5feeb8abd840fba1990a | [
"MIT"
] | null | null | null | ISJ/isj_proj2_xhorna14.py | PeetHornak/Documents | ee94e7fbbfcbf2889d7d5feeb8abd840fba1990a | [
"MIT"
] | 9 | 2021-03-25T22:49:47.000Z | 2021-05-08T20:41:04.000Z | ISJ/isj_proj2_xhorna14.py | PeetHornak/FIT-BUT | ee94e7fbbfcbf2889d7d5feeb8abd840fba1990a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# ukol za 2 body
def she_says_he_says(she_says):
"""Replaces y/i, removes spaces, returns reversed
>>> she_says_he_says('ma rymu')
'umiram'
"""
phonetic_she_says = she_says.replace('y', 'i') # vase reseni
compact = phonetic_she_says.replace(' ', '') # vase reseni
he_says = compact[::-1] # vase reseni
return he_says
# ukol za 3 body
def solfege(title_hymn):
"""Partitions the input string to (an optional) title, ': ', and the hymn,
takes a sublist starting from the first string, skipping always two
other strings, and ending 3 strings from the end, returns the result
as a string with ', ' as a separator
>>> solfege('Hymn of St. John: Ut queant laxis re sonare fibris mi ra gestorum fa muli tuorum sol ve polluti la bii reatum Sancte Iohannes')
'Ut, re, mi, fa, sol, la'
>>> solfege('Ut queant laxis re sonare fibris mi ra gestorum fa muli tuorum sol ve polluti la bii reatum Sancte Iohannes')
'Ut, re, mi, fa, sol, la'
"""
# the input string partitioned to the title (if given) and the actual hymn
possible_title, hymn = title_hymn.split(': ') if ': ' in title_hymn else ('', title_hymn) # vase reseni
# the hymn as a list of strings separated by ' '
hymn_list = hymn.split(' ') # vase reseni
# skipping always two strings, and ending 3 strings from the end
skip2 = hymn_list[:-3:3] # vase reseni
# the skip2 list as a string, ', ' as a separator
skip2_str = ", ".join(skip2) # vase reseni
return skip2_str
if __name__ == "__main__":
import doctest
doctest.testmod()
pass
| 37.7 | 144 | 0.574005 |
ace42364fa57364691c4228feece1832364d55fe | 11,206 | py | Python | examples/pytorch/decoding/utils/translation_model.py | hieuhoang/FasterTransformer | 440695ccac874574b1d2e1121788e8fa674b4381 | [
"Apache-2.0"
] | null | null | null | examples/pytorch/decoding/utils/translation_model.py | hieuhoang/FasterTransformer | 440695ccac874574b1d2e1121788e8fa674b4381 | [
"Apache-2.0"
] | null | null | null | examples/pytorch/decoding/utils/translation_model.py | hieuhoang/FasterTransformer | 440695ccac874574b1d2e1121788e8fa674b4381 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_
import onmt.inputters as inputters
import onmt.modules
from onmt.encoders.transformer import TransformerEncoder
from onmt.modules import Embeddings, VecEmbedding, CopyGenerator
from onmt.modules.util_class import Cast
from onmt.utils.misc import use_gpu
from onmt.utils.parse import ArgumentParser
from examples.pytorch.encoder.utils.ft_encoder import EncoderWeights, CustomEncoder
from .decoding import FTDecoder, DecodingWeights, TorchDecoding, TransformerDecoder
from .ft_decoding import FtDecodingWeights, CustomDecoding
def build_embeddings(opt, text_field, for_encoder=True):
"""
Args:
opt: the option in current environment.
text_field(TextMultiField): word and feats field.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
emb_dim = opt.src_word_vec_size if for_encoder else opt.tgt_word_vec_size
if opt.model_type == "vec" and for_encoder:
return VecEmbedding(
opt.feat_vec_size,
emb_dim,
position_encoding=opt.position_encoding,
dropout=(opt.dropout[0] if type(opt.dropout) is list
else opt.dropout),
)
pad_indices = [f.vocab.stoi[f.pad_token] for _, f in text_field]
word_padding_idx, feat_pad_indices = pad_indices[0], pad_indices[1:]
num_embs = [len(f.vocab) for _, f in text_field]
num_word_embeddings, num_feat_embeddings = num_embs[0], num_embs[1:]
fix_word_vecs = opt.fix_word_vecs_enc if for_encoder \
else opt.fix_word_vecs_dec
emb = Embeddings(
word_vec_size=emb_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feat_pad_indices,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam",
fix_word_vecs=fix_word_vecs
)
return emb
def load_test_model(opt, args):
model_path = opt.models[0]
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
model_opt = ArgumentParser.ckpt_model_opts(checkpoint['opt'])
ArgumentParser.update_model_opts(model_opt)
ArgumentParser.validate_model_opts(model_opt)
vocab = checkpoint['vocab']
if inputters.old_style_vocab(vocab):
fields = inputters.load_old_vocab(
vocab, opt.data_type, dynamic_dict=model_opt.copy_attn
)
else:
fields = vocab
model = build_base_model(model_opt, fields, use_gpu(opt), args, checkpoint,
opt.gpu)
if args.data_type == 'fp32':
model.float()
elif args.data_type == 'fp16':
model.half()
else:
raise ValueError('wrong data_type argument {}'.format(args.data_type))
model.eval()
model.generator.eval()
return fields, model, model_opt
def build_base_model(model_opt, fields, gpu, args, checkpoint=None, gpu_id=None):
"""Build a model from opts.
Args:
model_opt: the option loaded from checkpoint. It's important that
the opts have been updated and validated. See
:class:`onmt.utils.parse.ArgumentParser`.
fields (dict[str, torchtext.data.Field]):
`Field` objects for the model.
gpu (bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
gpu_id (int or NoneType): Which GPU to use.
Returns:
the NMTModel.
"""
# for back compat when attention_dropout was not defined
try:
model_opt.attention_dropout
except AttributeError:
model_opt.attention_dropout = model_opt.dropout
# Build embeddings.
if model_opt.model_type == "text" or model_opt.model_type == "vec":
src_field = fields["src"]
src_emb = build_embeddings(model_opt, src_field)
else:
src_emb = None
# Build encoder.
encoder = TransformerEncoder.from_opt(model_opt, src_emb)
# Build decoder.
tgt_field = fields["tgt"]
tgt_emb = build_embeddings(model_opt, tgt_field, for_encoder=False)
# Share the embedding matrix - preprocess with share_vocab required.
if model_opt.share_embeddings:
# src/tgt vocab should be the same if `-share_vocab` is specified.
assert src_field.base_field.vocab == tgt_field.base_field.vocab, \
"preprocess with -share_vocab if you use share_embeddings"
tgt_emb.word_lut.weight = src_emb.word_lut.weight
decoder = TransformerDecoder.from_opt(model_opt, tgt_emb, args)
# Build NMTModel(= encoder + decoder).
if gpu and gpu_id is not None:
device = torch.device("cuda", gpu_id)
elif gpu and not gpu_id:
device = torch.device("cuda")
elif not gpu:
device = torch.device("cpu")
model = onmt.models.NMTModel(encoder, decoder)
# Build Generator.
if not model_opt.copy_attn:
if model_opt.generator_function == "sparsemax":
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(model_opt.dec_rnn_size,
len(fields["tgt"].base_field.vocab)),
Cast(torch.float32),
gen_func
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
tgt_base_field = fields["tgt"].base_field
vocab_size = len(tgt_base_field.vocab)
pad_idx = tgt_base_field.vocab.stoi[tgt_base_field.pad_token]
generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx)
if model_opt.share_decoder_embeddings:
generator.linear.weight = decoder.embeddings.word_lut.weight
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
# This preserves backward-compat for models using customed layernorm
def fix_key(s):
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2',
r'\1.layer_norm\2.bias', s)
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2',
r'\1.layer_norm\2.weight', s)
return s
checkpoint['model'] = {fix_key(k): v
for k, v in checkpoint['model'].items()}
# end of patch for backward compatibility
model.load_state_dict(checkpoint['model'], strict=False)
generator.load_state_dict(checkpoint['generator'], strict=False)
if args.model_type != 'torch_decoding':
encoder_weights = EncoderWeights(model_opt.enc_layers, model_opt.enc_rnn_size, checkpoint['model'])
if args.data_type == 'fp16':
encoder_weights.to_half()
encoder_weights.to_cuda()
encoder = CustomEncoder(model_opt.enc_layers, model_opt.heads, model_opt.enc_rnn_size // model_opt.heads, encoder_weights,
path=args.encoder_ths_path, embedding=model.encoder.embeddings)
model.encoder = encoder
if args.model_type == 'decoding_ext':
vocab_size = len(fields["tgt"].base_field.vocab)
bos_idx = fields["tgt"].base_field.vocab.stoi[fields["tgt"].base_field.init_token]
eos_idx = fields["tgt"].base_field.vocab.stoi[fields["tgt"].base_field.eos_token]
decoding_weights = DecodingWeights(model_opt.dec_layers, model_opt.dec_rnn_size, vocab_size, checkpoint)
ft_decoding_weights = FtDecodingWeights(model_opt.dec_layers, model_opt.dec_rnn_size, decoding_weights.w)
if args.data_type == 'fp16':
ft_decoding_weights.to_half()
ft_decoding_weights.to_cuda()
model.decoder = CustomDecoding(model_opt.heads, model_opt.dec_rnn_size // model_opt.heads,
model_opt.dec_rnn_size * 4, model_opt.dec_rnn_size, model_opt.dec_layers,
vocab_size, bos_idx, eos_idx, args.beam_search_diversity_rate,
args.sampling_topk, args.sampling_topp, 1.0, 1.0, 1.0, ft_decoding_weights, args=args)
elif args.model_type == 'torch_decoding' or args.model_type == 'torch_decoding_with_decoder_ext':
vocab_size = len(fields["tgt"].base_field.vocab)
bos_idx = fields["tgt"].base_field.vocab.stoi[fields["tgt"].base_field.init_token]
eos_idx = fields["tgt"].base_field.vocab.stoi[fields["tgt"].base_field.eos_token]
decoding_weights = DecodingWeights(model_opt.dec_layers, model_opt.dec_rnn_size, vocab_size, checkpoint)
decoding_weights.to_cuda()
if args.data_type == 'fp16':
decoding_weights.to_half()
model.decoder = TorchDecoding(model_opt.dec_layers, model_opt.heads, model_opt.dec_rnn_size // model_opt.heads,
vocab_size, bos_idx, eos_idx, decoding_weights, args=args)
else:
raise ValueError("Wrong model_type argument, must be one of [decoding_ext, torch_decoding, torch_decoding_with_decoder_ext]")
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if hasattr(model.encoder, 'embeddings'):
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec)
model.generator = generator
model.to(device)
if model_opt.model_dtype == 'fp16' and model_opt.optim == 'fusedadam':
model.half()
return model
| 42.608365 | 137 | 0.659468 |
ace424e9137f7eb31d37594ede3b44e6da622970 | 566 | py | Python | setup.py | sahilkabra/kafka-python | c3e72ee6b461b73bd07d1cca934f8f2af108dd46 | [
"MIT"
] | null | null | null | setup.py | sahilkabra/kafka-python | c3e72ee6b461b73bd07d1cca934f8f2af108dd46 | [
"MIT"
] | null | null | null | setup.py | sahilkabra/kafka-python | c3e72ee6b461b73bd07d1cca934f8f2af108dd46 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import find_packages, setup
try:
long_description = open("README.rst").read()
except IOError:
long_description = ""
setup(name="kafka-python",
version="0.0.1",
description="A package that demonstrates kafka producer and consumer",
license="MIT",
author="Sahil Kabra",
packages=find_packages(),
install_requires=[],
long_description=long_description,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
])
| 26.952381 | 76 | 0.636042 |
ace425a54951fdbd8f210744e8e072a4db02c9c8 | 5,647 | py | Python | src/visualization/mld_depth_influence.py | VictorOnink/Wind-Mixing-Diffusion | 3a7051efefb6a6f89035099ac4d50ab11f242881 | [
"MIT"
] | null | null | null | src/visualization/mld_depth_influence.py | VictorOnink/Wind-Mixing-Diffusion | 3a7051efefb6a6f89035099ac4d50ab11f242881 | [
"MIT"
] | null | null | null | src/visualization/mld_depth_influence.py | VictorOnink/Wind-Mixing-Diffusion | 3a7051efefb6a6f89035099ac4d50ab11f242881 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import utils, settings
from visualization import utils_visualization as utils_v
import numpy as np
def mld_depth_influence(w_rise_list, MLD_list, alpha_list, selection='w_rise', output_step=-1, single_select=0,
y_label='Depth/MLD', close_up=None, beaufort=5,
x_label=r'Normalised Plastic Counts ($n/n_0$)', fig_size=(16, 8), ax_label_size=16,
legend_size=12, diffusion_type='KPP', boundary='Reflect', alpha=0.3):
"""
Figure looking at the influence of the MLD on the diffusion profile, where everything is plotted with normalized
depths (relative to the MLD)
:param w_rise_list: list of rise velocities
:param MLD_list: list of MLD depths
:param alpha_list: list of alpha values for M-1 simulation
:param selection: selection criteria for loading the parcels concentration data
:param output_step: which time index is being plotted, with the default being the last of the simulation
:param single_select: selection criteria related to 'selection'
:param y_label: label of the y axis
:param close_up: setting the limits of the y axis as (max, min0
:param beaufort: wind force selecting which profiles to plot
:param x_label: label of the x axis
:param fig_size: size of the figure
:param ax_label_size: fontsize of the axes labels
:param legend_size: fontsize of the legend
:param diffusion_type: type of diffusion, either KPP or SWB
:param boundary: which boundary condition, and whether M-0 or M-1
:param alpha: transparancy of the field data markers
:return:
"""
correction = settings.MLD
ax_range = utils_v.get_axes_range(close_up=close_up, norm_depth=True)
# Selecting which model data we want to plot based on the diffusion type
swb, kpp, artificial = utils_v.boolean_diff_type(diffusion_type)
# Get the base figure axis
ax = utils_v.base_figure(fig_size, ax_range, y_label, x_label, ax_label_size)
# Plotting the field data points
wind_range = utils.beaufort_limits()[beaufort]
_, _ = utils_v.add_observations(ax, norm_depth=True, alpha=alpha, wind_range=wind_range)
line_style = ['-', '--', '-.']
for count_mld, mld in enumerate(MLD_list):
# Plotting the distribution according to the SWB parametrization
if swb:
profile_dict = utils_v.get_concentration_list([np.mean(wind_range)], w_rise_list, selection,
single_select, alpha_list=alpha_list,
output_step=output_step, diffusion_type='SWB',
boundary=boundary, mld=mld)
for counter in range(len(profile_dict['concentration_list'])):
ax.plot(profile_dict['concentration_list'][counter], profile_dict['depth_bins'] / correction,
label=label_MLD_Comparison(parameters=profile_dict['parameter_SWB'][counter],
mld=mld, diffusion_type='SWB'),
linestyle=line_style[counter], color=utils_v.return_color(count_mld))
# Plotting the distribution according to the KPP parametrization
if kpp:
profile_dict = utils_v.get_concentration_list([np.mean(wind_range)], w_rise_list, selection,
single_select, alpha_list=alpha_list,
output_step=output_step, diffusion_type='KPP',
boundary=boundary, mld=mld)
for counter in range(len(profile_dict['concentration_list'])):
ax.plot(profile_dict['concentration_list'][counter], profile_dict['depth_bins'] / correction,
label=label_MLD_Comparison(parameters=profile_dict['parameter_SWB'][counter],
mld=mld, diffusion_type='KPP'),
linestyle=line_style[counter], color=utils_v.return_color(count_mld))
# Adding the legend
ax.legend(fontsize=legend_size, loc='lower right')
ax.set_title(r'u$_{10}$' + '={}-{}'.format(*wind_range) + r' m s$^{-1}$', fontsize=ax_label_size)
plt.savefig(mld_comparison_name(diffusion_type, boundary, beaufort=beaufort, close_up=close_up),
bbox_inches='tight')
def mld_comparison_name(diffusion_type, boundary, beaufort, close_up=None, output_type='.png'):
""" Creating the name of the figure"""
diff_dict = {'SWB': 'SWB', 'KPP': 'KPP', 'all': 'SWB_KPP'}
figure_name = settings.figure_dir + 'norm_comparison_{}_{}_Bft{}'.format(diff_dict[diffusion_type], boundary,
beaufort)
if close_up is not None:
max, min = close_up
figure_name += '_max_{}_min_{}'.format(max, min)
return figure_name + output_type
def label_MLD_Comparison(parameters, diffusion_type, mld=settings.MLD):
""" Creating the labels of the plot """
w_10, w_rise = parameters
w_rise = np.abs(w_rise)
if diffusion_type == 'KPP':
return r'KPP, u$_{10}$ '+'= {:.2f}'.format(w_10) + 'm s$^{-1}$,' + 'w$_{rise}$ '+'= {}'.format(w_rise) + \
'm s$^{-1}$, MLD = ' + '{:.1f} m'.format(mld)
elif diffusion_type == 'SWB':
return r'SWB, u$_{10}$ '+'= {:.2f}'.format(w_10) + 'm s$^{-1}$,' + 'w$_{rise}$ '+'= {}'.format(w_rise) + \
'm s$^{-1}$, MLD = ' + '{:.1f} m'.format(mld) | 57.040404 | 116 | 0.616788 |
ace4265b77d74c6eff6fb463f34d333c9a52ea94 | 10,526 | py | Python | test.py | metamx/Diamond | 2fcf12708cdb614c358a8ceb67d7ae1593080d69 | [
"MIT"
] | 1 | 2015-10-18T17:37:30.000Z | 2015-10-18T17:37:30.000Z | test.py | metamx/Diamond | 2fcf12708cdb614c358a8ceb67d7ae1593080d69 | [
"MIT"
] | null | null | null | test.py | metamx/Diamond | 2fcf12708cdb614c358a8ceb67d7ae1593080d69 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
###############################################################################
import os
import sys
import unittest
import inspect
import traceback
import optparse
import logging
import configobj
try:
import cPickle as pickle
pickle # workaround for pyflakes issue #13
except ImportError:
import pickle as pickle
try:
from cStringIO import StringIO
StringIO # workaround for pyflakes issue #13
except ImportError:
from StringIO import StringIO
try:
from setproctitle import setproctitle
setproctitle # workaround for pyflakes issue #13
except ImportError:
setproctitle = None
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'src')))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'src', 'collectors')))
def run_only(func, predicate):
if predicate():
return func
else:
def f(arg):
pass
return f
def get_collector_config(key, value):
config = configobj.ConfigObj()
config['server'] = {}
config['server']['collectors_config_path'] = ''
config['collectors'] = {}
config['collectors']['default'] = {}
config['collectors']['default']['hostname_method'] = "uname_short"
config['collectors'][key] = value
return config
class CollectorTestCase(unittest.TestCase):
def setDocExample(self, collector, metrics, defaultpath=None):
if not len(metrics):
return False
filePath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'docs', 'collectors-' + collector + '.md')
if not os.path.exists(filePath):
return False
if not os.access(filePath, os.W_OK):
return False
if not os.access(filePath, os.R_OK):
return False
try:
fp = open(filePath, 'Ur')
content = fp.readlines()
fp.close()
fp = open(filePath, 'w')
for line in content:
if line.strip() == '__EXAMPLESHERE__':
for metric in sorted(metrics.iterkeys()):
metricPath = 'servers.hostname.'
if defaultpath:
metricPath += defaultpath + '.'
metricPath += metric
metricPath = metricPath.replace('..', '.')
fp.write('%s %s\n' % (metricPath, metrics[metric]))
else:
fp.write(line)
fp.close()
except IOError:
return False
return True
def getFixturePath(self, fixture_name):
file = os.path.join(os.path.dirname(inspect.getfile(self.__class__)),
'fixtures',
fixture_name)
if not os.access(file, os.R_OK):
print "Missing Fixture " + file
return file
def getFixture(self, fixture_name):
try:
f = open(self.getFixturePath(fixture_name), 'r')
data = StringIO(f.read())
return data
finally:
f.close()
def getPickledResults(self, results_name):
try:
f = open(self.getFixturePath(results_name), 'r')
data = pickle.load(f)
return data
finally:
f.close()
def setPickledResults(self, results_name, data):
pickle.dump(data, open(self.getFixturePath(results_name), "w+b"))
def assertUnpublished(self, mock, key, value, expected_value=0):
return self.assertPublished(mock, key, value, expected_value)
def assertPublished(self, mock, key, value, expected_value=1):
if type(mock) is list:
for m in mock:
calls = (filter(lambda x: x[0][0] == key, m.call_args_list))
if len(calls) > 0:
break
else:
calls = filter(lambda x: x[0][0] == key, mock.call_args_list)
actual_value = len(calls)
message = '%s: actual number of calls %d, expected %d' % (
key, actual_value, expected_value)
self.assertEqual(actual_value, expected_value, message)
if expected_value:
actual_value = calls[0][0][1]
expected_value = value
precision = 0
if isinstance(value, tuple):
expected_value, precision = expected_value
message = '%s: actual %r, expected %r' % (key,
actual_value,
expected_value)
#print message
if precision is not None:
self.assertAlmostEqual(float(actual_value),
float(expected_value),
places=precision,
msg=message)
else:
self.assertEqual(actual_value, expected_value, message)
def assertUnpublishedMany(self, mock, dict, expected_value=0):
return self.assertPublishedMany(mock, dict, expected_value)
def assertPublishedMany(self, mock, dict, expected_value=1):
for key, value in dict.iteritems():
self.assertPublished(mock, key, value, expected_value)
if type(mock) is list:
for m in mock:
m.reset_mock()
else:
mock.reset_mock()
def assertUnpublishedMetric(self, mock, key, value, expected_value=0):
return self.assertPublishedMetric(mock, key, value, expected_value)
def assertPublishedMetric(self, mock, key, value, expected_value=1):
calls = filter(lambda x: x[0][0].path.find(key) != -1,
mock.call_args_list)
actual_value = len(calls)
message = '%s: actual number of calls %d, expected %d' % (
key, actual_value, expected_value)
self.assertEqual(actual_value, expected_value, message)
actual_value = calls[0][0][0].value
expected_value = value
precision = 0
if isinstance(value, tuple):
expected_value, precision = expected_value
message = '%s: actual %r, expected %r' % (key,
actual_value,
expected_value)
#print message
if precision is not None:
self.assertAlmostEqual(float(actual_value),
float(expected_value),
places=precision,
msg=message)
else:
self.assertEqual(actual_value, expected_value, message)
def assertUnpublishedMetricMany(self, mock, dict, expected_value=0):
return self.assertPublishedMetricMany(mock, dict, expected_value)
def assertPublishedMetricMany(self, mock, dict, expected_value=1):
for key, value in dict.iteritems():
self.assertPublishedMetric(mock, key, value, expected_value)
mock.reset_mock()
collectorTests = {}
def getCollectorTests(path):
for f in os.listdir(path):
cPath = os.path.abspath(os.path.join(path, f))
if (os.path.isfile(cPath)
and len(f) > 3
and f[-3:] == '.py'
and f[0:4] == 'test'):
sys.path.append(os.path.dirname(cPath))
sys.path.append(os.path.dirname(os.path.dirname(cPath)))
modname = f[:-3]
try:
# Import the module
collectorTests[modname] = __import__(modname,
globals(),
locals(),
['*'])
#print "Imported module: %s" % (modname)
except Exception:
print "Failed to import module: %s. %s" % (
modname, traceback.format_exc())
continue
for f in os.listdir(path):
cPath = os.path.abspath(os.path.join(path, f))
if os.path.isdir(cPath):
getCollectorTests(cPath)
###############################################################################
if __name__ == "__main__":
if setproctitle:
setproctitle('test.py')
# Disable log output for the unit tests
log = logging.getLogger("diamond")
log.addHandler(logging.StreamHandler(sys.stderr))
log.disabled = True
# Initialize Options
parser = optparse.OptionParser()
parser.add_option("-c",
"--collector",
dest="collector",
default="",
help="Run a single collector's unit tests")
parser.add_option("-v",
"--verbose",
dest="verbose",
default=1,
action="count",
help="verbose")
# Parse Command Line Args
(options, args) = parser.parse_args()
cPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'src',
'collectors',
options.collector))
dPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'src',
'diamond'))
getCollectorTests(cPath)
getCollectorTests(dPath)
loader = unittest.TestLoader()
tests = []
for test in collectorTests:
for name, c in inspect.getmembers(collectorTests[test],
inspect.isclass):
if not issubclass(c, unittest.TestCase):
continue
tests.append(loader.loadTestsFromTestCase(c))
suite = unittest.TestSuite(tests)
results = unittest.TextTestRunner(verbosity=options.verbose).run(suite)
results = str(results)
results = results.replace('>', '').split()[1:]
resobj = {}
for result in results:
result = result.split('=')
resobj[result[0]] = int(result[1])
if resobj['failures'] > 0:
sys.exit(1)
if resobj['errors'] > 0:
sys.exit(2)
sys.exit(0)
| 32.89375 | 79 | 0.522801 |
ace42825efbb14bad903b98af85d6eb478724073 | 3,171 | py | Python | torch_connectomics/data/dataset/misc.py | al093/pytorch_connectomics | 52821951233b061102380fc0d2521843652c580a | [
"MIT"
] | 2 | 2019-11-16T23:14:00.000Z | 2020-09-25T09:51:46.000Z | torch_connectomics/data/dataset/misc.py | al093/pytorch_connectomics | 52821951233b061102380fc0d2521843652c580a | [
"MIT"
] | 1 | 2020-09-22T08:49:04.000Z | 2020-09-22T08:49:04.000Z | torch_connectomics/data/dataset/misc.py | al093/pytorch_connectomics | 52821951233b061102380fc0d2521843652c580a | [
"MIT"
] | null | null | null | from __future__ import print_function, division
import numpy as np
import random
import torch
####################################################################
## Process image stacks.
####################################################################
def count_volume(data_sz, vol_sz, stride):
return 1 + np.ceil((data_sz - vol_sz) / stride.astype(float)).astype(int)
def crop_volume(data, sz, st=(0, 0, 0)): # C*D*W*H, C=1
return data[st[0]:st[0]+sz[0], st[1]:st[1]+sz[1], st[2]:st[2]+sz[2]]
def crop_volume_mul(data, sz, st=(0, 0, 0)): # C*D*W*H, for multi-channel input
return data[:, st[0]:st[0]+sz[0], st[1]:st[1]+sz[1], st[2]:st[2]+sz[2]]
def check_cropable(data, sz, st):
# check if any postion is negative
if np.any(st<0):
return False
# check if the crop exceeds image bounds
if (st[0] + sz[0] <= data.shape[-3]) and (st[1] + sz[1] <= data.shape[-2]) and (st[2] + sz[2] <= data.shape[-1]):
return True
else:
return False
####################################################################
## Rebalancing.
####################################################################
def rebalance_binary_class(label, mask=None, base_w=1.0):
"""Binary-class rebalancing."""
weight_factor = label.float().sum() / torch.prod(torch.tensor(label.size()).float())
if mask is not None:
weight_factor = (mask*label).float().sum() / mask.sum()
weight_factor = torch.clamp(weight_factor, min=1e-2)
alpha = 1.0
weight = alpha * label*(1-weight_factor)/weight_factor + (1-label)
if mask is not None:
weight = weight * mask
return weight_factor, weight
def rebalance_skeleton_weight(skeleton_mask, seg_mask, alpha=1.0):
num_skel_pixels = skeleton_mask.sum().float()
num_seg_pixels = seg_mask.sum().float()
weight_factor = alpha * num_seg_pixels / (num_skel_pixels + 1e-10)
weight_factor = torch.clamp(weight_factor, max=1e3)
weight = seg_mask.clone().float()
weight[skeleton_mask == 1.0] = weight_factor
return weight
####################################################################
## Affinitize.
####################################################################
def check_volume(data):
"""Ensure that data is a numpy 3D array."""
assert isinstance(data, np.ndarray)
if data.ndim == 2:
data = data[np.newaxis,...]
elif data.ndim == 3:
pass
elif data.ndim == 4:
assert data.shape[0]==1
data = np.reshape(data, data.shape[-3:])
else:
raise RuntimeError('data must be a numpy 3D array')
assert data.ndim==3
return data
# def affinitize(img, dst=(1,1,1), dtype=np.float32):
# """
# Transform segmentation to an affinity map.
# Args:
# img: 3D indexed image, with each index corresponding to each segment.
# Returns:
# ret: an affinity map (4D tensor).
# """
# img = check_volume(img)
# if ret is None:
# ret = np.zeros(img.shape, dtype=dtype)
# # Sanity check.
# (dz,dy,dx) = dst
# assert abs(dx) < img.shape[-1]
# assert abs(dy) < img.shape[-2]
# assert abs(dz) < img.shape[-3] | 33.734043 | 117 | 0.541154 |
ace4283092ed0d5a6c3980ef7ac2ff70559ac041 | 10,447 | py | Python | advbox/attacks/localsearch.py | haofanwang/AdvBox | e374f806ba9b175126fe512a0c5809bb87a58d63 | [
"Apache-2.0"
] | 1 | 2021-04-24T03:44:25.000Z | 2021-04-24T03:44:25.000Z | advbox/attacks/localsearch.py | haofanwang/AdvBox | e374f806ba9b175126fe512a0c5809bb87a58d63 | [
"Apache-2.0"
] | null | null | null | advbox/attacks/localsearch.py | haofanwang/AdvBox | e374f806ba9b175126fe512a0c5809bb87a58d63 | [
"Apache-2.0"
] | 1 | 2019-04-30T02:26:54.000Z | 2019-04-30T02:26:54.000Z | #coding=utf-8
# Copyright 2017 - 2018 Baidu Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provide the attack method for SinglePixelAttack & LocalSearchAttack's implement.
"""
from __future__ import division
import logging
from collections import Iterable
logger=logging.getLogger(__name__)
import numpy as np
from .base import Attack
__all__ = [
'SinglePixelAttack','LocalSearchAttack'
]
#Simple Black-Box Adversarial Perturbations for Deep Networks
#随机在图像中选择max_pixels个点 在多个信道中同时进行修改,修改范围通常为0-255
class SinglePixelAttack(Attack):
def __init__(self, model, support_targeted=True):
super(SinglePixelAttack, self).__init__(model)
self.support_targeted = support_targeted
#如果输入的原始数据,isPreprocessed为False,如果驶入的图像数据被归一化了,设置为True
def _apply(self,adversary,max_pixels=1000,isPreprocessed=False):
if not self.support_targeted:
if adversary.is_targeted_attack:
raise ValueError(
"This attack method doesn't support targeted attack!")
min_, max_ = self.model.bounds()
# 强制拷贝 避免针对adv_img的修改也影响adversary.original
adv_img = np.copy(adversary.original)
'''
adversary.original 原始数据
adversary.original_label 原始数据的标签
adversary.target_label 定向攻击的目标值
adversary.__adversarial_example 保存生成的对抗样本
adversary.adversarial_label 对抗样本的标签
'''
axes = [i for i in range(adversary.original.ndim) if i != self.model.channel_axis()]
#输入的图像必须具有长和宽属性
assert len(axes) == 2
h = adv_img.shape[axes[0]]
w = adv_img.shape[axes[1]]
#print("w={0},h={1}".format(w,h))
#max_pixel为攻击点的最多个数 从原始图像中随机选择max_pixel个进行攻击
pixels = np.random.permutation(h * w)
pixels = pixels[:max_pixels]
for i, pixel in enumerate(pixels):
x = pixel % w
y = pixel // w
location = [x, y]
logging.info("Attack x={0} y={1}".format(x,y))
location.insert(self.model.channel_axis(), slice(None))
location = tuple(location)
if not isPreprocessed:
#logger.info("value in [min_={0}, max_={1}]".format(min_, max_))
#图像没有经过预处理 取值为整数 范围为0-255
for value in [min_, max_]:
perturbed = np.copy(adv_img)
#针对图像的每个信道的点[x,y]同时进行修改
perturbed[location] = value
f = self.model.predict(perturbed)
adv_label = np.argmax(f)
if adversary.try_accept_the_example(adv_img, adv_label):
return adversary
else:
# 图像经过预处理 取值为整数 通常范围为0-1
for value in np.linspace(min_, max_, num=256):
#logger.info("value in [min_={0}, max_={1},step num=256]".format(min_, max_))
perturbed = np.copy(adv_img)
#针对图像的每个信道的点[x,y]同时进行修改
perturbed[location] = value
f = self.model.predict(perturbed)
adv_label = np.argmax(f)
if adversary.try_accept_the_example(adv_img, adv_label):
return adversary
return adversary
#这部分代码参考了foolbox的实现 移植到了paddle平台,原地址为:
#https://github.com/bethgelab/foolbox/blob/master/foolbox/attacks/localsearch.py
#Simple Black-Box Adversarial Perturbations for Deep Networks 函数命名也完全和论文一致
# perturbation factor p 扰动系数
# two perturbation parameters p ∈ R and r ∈ [0,2],
# a budget U ∈ N on the number of trials
# the half side length of the neighborhood square d ∈ N,
# the number of pixels perturbed at each round t ∈ N,
# and an upper bound on the number of rounds R ∈ N.
#
class LocalSearchAttack(Attack):
def __init__(self, model, support_targeted=True):
super(LocalSearchAttack, self).__init__(model)
self.support_targeted = support_targeted
def softmax(self, x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def _apply(self,adversary,r=1.5, p=10., d=5, t=5, R=150):
if not self.support_targeted:
if adversary.is_targeted_attack:
raise ValueError(
"This attack method doesn't support targeted attack!")
#r的范围
assert 0 <= r <= 2
min_, max_ = self.model.bounds()
logger.info("LocalSearchAttack parameter:min_={}, max_={} ".format(min_, max_ ))
# 强制拷贝 避免针对adv_img的修改也影响adversary.original
adv_img = np.copy(adversary.original)
logger.info("adv_img:{}".format(adv_img))
original_label=adversary.original_label
logger.info("LocalSearchAttack parameter:r={0}, p={1}, d={2}, t={3}, R={4}".format(r, p, d, t, R))
'''
adversary.original 原始数据
adversary.original_label 原始数据的标签
adversary.target_label 定向攻击的目标值
adversary.__adversarial_example 保存生成的对抗样本
adversary.adversarial_label 对抗样本的标签
'''
axes = [i for i in range(adversary.original.ndim) if i != self.model.channel_axis()]
if len(axes) == 2:
h = adv_img.shape[axes[0]]
w = adv_img.shape[axes[1]]
else:
h=adv_img.shape[-1]
w=1
#print("w={0},h={1}".format(w,h))
#正则化到[-0.5,0.5]区间内
def normalize(im):
im = im -(min_ + max_) / 2
im = im / (max_ - min_)
LB = -1 / 2
UB = 1 / 2
return im, LB, UB
def unnormalize(im):
im = im * (max_ - min_)
im = im + (min_ + max_) / 2
return im
#归一化
adv_img, LB, UB = normalize(adv_img)
logger.info("normalize adv_img:{}".format(adv_img))
channels = adv_img.shape[self.model.channel_axis()]
#随机选择一部分像素点 总数不超过全部的10% 最大为128个点
def random_locations():
n = int(0.1 * h * w)
n = min(n, 128)
locations = np.random.permutation(h * w)[:n]
p_x = locations % w
p_y = locations // w
pxy = list(zip(p_x, p_y))
pxy = np.array(pxy)
return pxy
#针对图像的每个信道的点[x,y]同时进行修改 修改的值为p * np.sign(Im[location]) 类似FGSM的一次迭代
#不修改Ii的图像 返回修改后的图像
def pert(Ii, p, x, y):
Im = Ii.copy()
location = [x, y]
location.insert(self.model.channel_axis(), slice(None))
location = tuple(location)
Im[location] = p * np.sign(Im[location])
return Im
#截断 确保assert LB <= r * Ibxy <= UB 但是也有可能阶段失败退出 因此可以适当扩大配置的原始数据范围
# 这块的实现没有完全参考论文
def cyclic(r, Ibxy):
#logger.info("cyclic Ibxy:{}".format(Ibxy))
result = r * Ibxy
#logger.info("cyclic result:{}".format(result))
"""
foolbox的实现 存在极端情况 本来只是有一个元素超过UB,结果一减 都完蛋了
if result.any() < LB:
#result = result/r + (UB - LB)
logger.info("cyclic result:{}".format(result))
result = result + (UB - LB)
logger.info("cyclic result:{}".format(result))
#result=LB
elif result.any() > UB:
#result = result/r - (UB - LB)
logger.info("cyclic result:{}".format(result))
result = result - (UB - LB)
logger.info("cyclic result:{}".format(result))
#result=UB
"""
if result.any() < LB:
result = result + (UB - LB)
elif result.any() > UB:
result = result - (UB - LB)
result=result.clip(LB,UB)
#logger.info("cyclic result:{}".format(result))
#assert LB <= np.all(result) <= UB
return result
Ii = adv_img
PxPy = random_locations()
#循环攻击轮
for try_time in range(R):
#重新排序 随机选择不不超过128个点
PxPy = PxPy[np.random.permutation(len(PxPy))[:128]]
L = [pert(Ii, p, x, y) for x, y in PxPy]
#批量返回预测结果 获取原始图像标签的概率
def score(Its):
Its = np.stack(Its)
Its = unnormalize(Its)
"""
original_label为原始图像的标签
"""
scores=[ self.model.predict(unnormalize(Ii))[original_label] for It in Its ]
return scores
#选择影响力最大的t个点进行扰动 抓主要矛盾 这里需要注意 np.argsort是升序 所以要取倒数的几个
scores = score(L)
indices = np.argsort(scores)[:-t]
logger.info("try {0} times selected pixel indices:{1}".format(try_time,str(indices)))
PxPy_star = PxPy[indices]
for x, y in PxPy_star:
#每个颜色通道的[x,y]点进行扰动并截断 扰动算法即放大r倍
for b in range(channels):
location = [x, y]
location.insert(self.model.channel_axis(), b)
location = tuple(location)
Ii[location] = cyclic(r, Ii[location])
f = self.model.predict(unnormalize(Ii))
adv_label = np.argmax(f)
adv_label_pro=self.softmax(f)[adv_label]
logger.info("adv_label={0} adv_label_pro={1}".format(adv_label,adv_label_pro))
# print("adv_label={0}".format(adv_label))
if adversary.try_accept_the_example(unnormalize(Ii), adv_label):
return adversary
#扩大搜索范围,把原有点周围2d乘以2d范围内的点都拉进来 去掉超过【w,h】的点
#"{Update a neighborhood of pixel locations for the next round}"
PxPy = [
(x, y)
for _a, _b in PxPy_star
for x in range(_a - d, _a + d + 1)
for y in range(_b - d, _b + d + 1)]
PxPy = [(x, y) for x, y in PxPy if 0 <= x < w and 0 <= y < h]
PxPy = list(set(PxPy))
PxPy = np.array(PxPy)
return adversary | 32.046012 | 106 | 0.560927 |
ace42867cf276757e87c09a27ea9b840c6b6b353 | 4,063 | py | Python | libraries/linters/gradlerio_versions/lint_gradlerio_versions.py | pjreiniger/GirlsOfSteelFRC | 03ab0fb699dfc14985ce3c95b1eacb145288fc7e | [
"BSD-3-Clause"
] | null | null | null | libraries/linters/gradlerio_versions/lint_gradlerio_versions.py | pjreiniger/GirlsOfSteelFRC | 03ab0fb699dfc14985ce3c95b1eacb145288fc7e | [
"BSD-3-Clause"
] | 44 | 2021-11-09T23:23:48.000Z | 2022-03-22T23:44:15.000Z | libraries/linters/gradlerio_versions/lint_gradlerio_versions.py | pjreiniger/GirlsOfSteelFRC | 03ab0fb699dfc14985ce3c95b1eacb145288fc7e | [
"BSD-3-Clause"
] | 1 | 2021-11-19T03:45:21.000Z | 2021-11-19T03:45:21.000Z | import sys
import os
import re
import collections
import json
import shutil
def get_build_file_versions(build_files):
versions = collections.defaultdict(list)
for build_file in build_files:
with open(build_file, 'r') as f:
for line in f.readlines():
matches = re.search(r'edu\.wpi\.first\.GradleRIO.* version "(.*)"', line)
if matches:
version = matches.group(1)
versions[version].append(build_file)
return versions
def get_vendor_deps_versions(vendor_deps):
vendor_versions = {}
for vendor_name, vendor_files in vendor_deps.items():
versions = collections.defaultdict(list)
for vendor_file in vendor_files:
with open(vendor_file, 'r') as f:
vendor_dep = json.load(f)
version = vendor_dep['version']
versions[version].append(vendor_file)
vendor_versions[vendor_name] = versions
return vendor_versions
def get_versions(base_directory):
build_files = []
vendor_deps = collections.defaultdict(list)
for root, _, files in os.walk(base_directory):
for f in files:
full_file = os.path.join(root, f)
if f == "build.gradle":
build_files.append(full_file)
elif "vendordeps" in os.path.dirname(full_file):
vendor_deps[f].append(full_file)
gradlerio_versions = get_build_file_versions(build_files)
vendor_deps_versions = get_vendor_deps_versions(vendor_deps)
return gradlerio_versions, vendor_deps_versions
def fix_vendordep_version(versions):
sorted_versions = sorted(list(versions.keys()), reverse=True)
newest_version = sorted_versions[0]
newest_file = versions[newest_version][0]
print(f"Using {newest_file}, version {newest_version}")
for version, files in versions.items():
if version == newest_version:
continue
for f in files:
print(f" Fixing {f}")
shutil.copy(newest_file, f)
def fix_gradlerio_build_file(versions):
sorted_versions = sorted(list(versions.keys()), reverse=True)
newest_version = sorted_versions[0]
newest_file = versions[newest_version][0]
print(f"Using {newest_file}, version {newest_version}")
for version, files in versions.items():
if version == newest_version:
continue
for bad_file in files:
print(f" Fixing {bad_file}")
new_content = ""
with open(bad_file, 'r') as f:
for line in f.readlines():
matches = re.search(r'edu\.wpi\.first\.GradleRIO.* version "(.*)"', line)
if matches:
new_content += f' id "edu.wpi.first.GradleRIO" version "{newest_version}"\n'
else:
new_content += line
with open(bad_file, 'w') as f:
f.write(new_content)
def get_this_directory():
try:
from rules_python.python.runfiles import runfiles
r = runfiles.Create()
this_file = r.Rlocation("__main__/libraries/linters/gradlerio_versions/lint_gradlerio_versions.py")
return os.path.dirname(this_file)
except ModuleNotFoundError:
return os.path.dirname(os.path.realpath(__file__))
def main():
base_directory = os.path.join(get_this_directory(), "..", "..", "..")
gradlerio_versions, vendor_deps_versions = get_versions(base_directory)
passed = True
if len(gradlerio_versions) == 0:
raise Exception(f"No build files were found. Check base directory '{base_directory}'")
if len(gradlerio_versions) != 1:
fix_gradlerio_build_file(gradlerio_versions)
passed = False
for vendor_name, vendor_versions in vendor_deps_versions.items():
if len(vendor_versions) != 1:
passed = False
fix_vendordep_version(vendor_versions)
if not passed:
sys.exit(-1)
if __name__ == "__main__":
main()
| 29.875 | 107 | 0.62983 |
ace42aed94605ead3ac0975572433c03f6f3abb6 | 290 | py | Python | libdouya/dataclasses/i/cfg/database_configer.py | xmyeen/douya | d2f7c15ca2e049a8dad9d4deaeba73401c883860 | [
"MIT"
] | null | null | null | libdouya/dataclasses/i/cfg/database_configer.py | xmyeen/douya | d2f7c15ca2e049a8dad9d4deaeba73401c883860 | [
"MIT"
] | null | null | null | libdouya/dataclasses/i/cfg/database_configer.py | xmyeen/douya | d2f7c15ca2e049a8dad9d4deaeba73401c883860 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
#!/usr/bin/env python
from abc import ABCMeta, abstractclassmethod
from ..rdb import IDatabase
class IDatabaseConfiger(metaclass = ABCMeta):
def __init__(self):
pass
@abstractclassmethod
def init_db(db:IDatabase, name:str = None):
pass | 22.307692 | 48 | 0.682759 |
ace42bb62ef59e115cef4be0500dce67c1d3fbb0 | 1,168 | py | Python | pandemic_response_analyzer/reports/migrations/0006_auto_20210201_1735.py | sedatozturke/swe-573-2020f | 3368b352076a57eadcfd40ea408666cc8a00d8df | [
"MIT"
] | null | null | null | pandemic_response_analyzer/reports/migrations/0006_auto_20210201_1735.py | sedatozturke/swe-573-2020f | 3368b352076a57eadcfd40ea408666cc8a00d8df | [
"MIT"
] | 6 | 2020-11-02T19:47:46.000Z | 2020-11-10T15:31:04.000Z | pandemic_response_analyzer/reports/migrations/0006_auto_20210201_1735.py | sedatozturke/swe-573-2020f | 3368b352076a57eadcfd40ea408666cc8a00d8df | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-02-01 14:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reports', '0005_auto_20210201_1259'),
]
operations = [
migrations.AddField(
model_name='reportdetail',
name='noun_count',
field=models.JSONField(null=True),
),
migrations.AddField(
model_name='reportdetail',
name='nouncloud_image_b64',
field=models.CharField(max_length=100000, null=True),
),
migrations.AlterField(
model_name='reportdetail',
name='report',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='reports.report'),
),
migrations.AlterField(
model_name='reportdetail',
name='word_count',
field=models.JSONField(null=True),
),
migrations.AlterField(
model_name='reportdetail',
name='wordcloud_image_b64',
field=models.CharField(max_length=100000, null=True),
),
]
| 29.2 | 105 | 0.592466 |
ace42d40eda5b854d9c58ff3bc38a0a6bb9264ea | 1,707 | py | Python | server.py | gadhagod/pewdiepie-vs-cocomelon | daf85714d0efe7a9e2659740722e1889aaef5923 | [
"MIT"
] | 1 | 2020-11-25T07:40:07.000Z | 2020-11-25T07:40:07.000Z | server.py | gadhagod/pewdiepie-vs-cocomelon | daf85714d0efe7a9e2659740722e1889aaef5923 | [
"MIT"
] | null | null | null | server.py | gadhagod/pewdiepie-vs-cocomelon | daf85714d0efe7a9e2659740722e1889aaef5923 | [
"MIT"
] | null | null | null | import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
import pickle
import flask
app = flask.Flask(__name__)
SCOPES = ['https://www.googleapis.com/auth/youtube.readonly']
creds = None
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
def insert_commas(in_str):
return(
'{:,}'.format(int(in_str))
)
def get_data(channel_id):
data = googleapiclient.discovery.build(
'youtube', 'v3', credentials=creds
).channels().list(
part='snippet,statistics',
id=channel_id
).execute()['items'][0]
data['statistics']['subscriberCount'] = insert_commas(data['statistics']['subscriberCount'].replace('-', ''))
data['statistics']['viewCount'] = insert_commas(data['statistics']['viewCount'].replace('-', ''))
return(data)
pewdiepie = 'UC-lHJZR3Gqxm24_Vd_AJ5Yw'
cocomelon = 'UCbCmjCuTUZos6Inko4u57UQ'
@app.route('/')
def main():
pewdiepie_data = get_data(pewdiepie)
cocomelon_data = get_data(cocomelon)
difference = {
'subscriberCount': insert_commas(int(pewdiepie_data['statistics']['subscriberCount'].replace(',', '')) - int(cocomelon_data['statistics']['subscriberCount'].replace(',', ''))),
'viewCount': insert_commas(int(pewdiepie_data['statistics']['viewCount'].replace(',', '')) - int(cocomelon_data['statistics']['viewCount'].replace(',', '')))
}
return(flask.render_template('index.html', pewdiepie=get_data(pewdiepie), cocomelon=get_data(cocomelon), difference=difference))
@app.errorhandler(404)
def page_not_found(err):
return(flask.render_template('404.html')) | 34.14 | 184 | 0.691271 |
ace42e2d6ef86d122dc19b55cc02becfa324baa8 | 588 | py | Python | photorec/services/recognition.py | MTB90/edx-aws-developer | a3cf461f3437221c0c36ac34d30af15cd5202ff6 | [
"MIT"
] | 2 | 2019-02-22T15:46:51.000Z | 2019-10-11T11:23:12.000Z | photorec/services/recognition.py | MTB90/edx-aws-developer | a3cf461f3437221c0c36ac34d30af15cd5202ff6 | [
"MIT"
] | 31 | 2019-04-20T20:45:42.000Z | 2022-03-12T00:11:58.000Z | photorec/services/recognition.py | MTB90/edx-aws-developer | a3cf461f3437221c0c36ac34d30af15cd5202ff6 | [
"MIT"
] | null | null | null | import boto3
class ServiceRekognition:
def __init__(self, config):
self._service_name = 'rekognition'
self._config = config
self._rekognition_client = boto3.client(
service_name=self._service_name,
endpoint_url=config.AWS_ENDPOINTS.get(self._service_name)
)
def detect_tag(self, photo_key):
response = self._rekognition_client.detect_labels(
Image={'S3Object': {'Bucket': self._config.FILE_STORAGE, 'Name': photo_key}},
MaxLabels=1
)
return response['Labels'][0]['Name']
| 28 | 89 | 0.636054 |
ace42ecbe1a7853774913f3fdec1002edc294a6e | 9,601 | py | Python | utils_navigation.py | shagun30/djambala-2 | 06f14e3dd237d7ebf535c62172cfe238c3934f4d | [
"BSD-3-Clause"
] | null | null | null | utils_navigation.py | shagun30/djambala-2 | 06f14e3dd237d7ebf535c62172cfe238c3934f4d | [
"BSD-3-Clause"
] | null | null | null | utils_navigation.py | shagun30/djambala-2 | 06f14e3dd237d7ebf535c62172cfe238c3934f4d | [
"BSD-3-Clause"
] | null | null | null | #-*-coding: utf-8 -*-
"""
/dms/utils_navigation.py
.. enthaelt Hilfsroutinen zurm Aendern des linken Navigationsbereichs
Django content Management System
Hans Rauch
hans.rauch@gmx.net
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 12.03.2007 Beginn der Arbeit
0.02 11.10.2007 item.is_main_menu
"""
import string
from django.utils.safestring import SafeData, mark_safe, SafeUnicode
from django.utils.translation import ugettext as _
from dms.queries import delete_menuitem_navmenu_left
from dms.queries import get_menuitems_by_menu_id_left
from dms.queries import get_new_navmenu_left
from dms.queries import delete_menuitem_navmenu_top
from dms.queries import get_new_navmenu_top
from dms.encode_decode import decode_html
# -----------------------------------------------------
def get_navmenu_choices_left(menu_id):
""" Auswahl des Navigationsmenus """
ret = []
ret.append( ('|', mark_safe(_('<b><i>(Lokale) Startseite</i></b>'))) )
menu = get_menuitems_by_menu_id_left(menu_id)[0]
lines = string.splitfields(menu.navigation, '\n')
nav_main = ''
nav_sub = ''
for line in lines:
line = string.strip(line)
if line != '' and line[0] != '#':
arr = string.splitfields(line, '|')
if len(arr) > 1:
my_depth = int(string.strip(arr[0]))
my_alias = string.strip(arr[1])
if my_depth == 0:
nav_main = my_alias
nav_sub = ''
else:
nav_sub = my_alias
info = string.strip(arr[3])
if my_depth == 0:
info = '<b>' + info + '</b>'
ret.append( (nav_main + '|' + nav_sub, mark_safe(info)) )
return ret
# -----------------------------------------------------
def get_data_left(line):
""" Beschreibung der linken Navigation """
line = string.strip(line)
if line == '' or line[0] == '#':
return -1, -1, ''
arr = string.splitfields(line, '|')
if len(arr) == 1:
return -999, -1, '<div style="padding:0.3em;"></div>\n'
else:
ret = ''
my_depth = int(string.strip(arr[0]))
my_alias = string.strip(arr[1])
link = string.strip(arr[2])
info = string.strip(arr[3])
if len(arr) > 4:
title = string.replace(string.strip(arr[4]), '"', '"')
else:
title = ''
if len(arr) > 5 :
ret += string.replace(arr[5], '"', '"') + ' '
ret += u'<a class="navLink" href="%s" title="%s">' % (link, title)
ret += '<b>' + info + '</b></a><span class="invisible">|</span>'
else:
ret += u'<a class="navLink" href="%s" title="%s">' % (link, title)
ret += info + '</a><span class="invisible">|</span>'
return my_depth, my_alias, ret
# -----------------------------------------------------
def get_top_navigation_menu_left(lines):
""" Beschreibung der oberen Navigation """
ret = ''
for line in lines:
my_depth, my_alias, res = get_data_left(line)
if my_depth == 0:
if res != '' and string.find(res, '<div') < 0:
res_start = '<div class="menu_border_bottom">'
res_end = '</div>\n'
else:
res_start = ''
res_end = ''
ret += res_start + res + res_end
elif my_depth == -999:
ret += res
return ret
# -----------------------------------------------------
def get_data_top(line, nav_main='', profi_mode=False):
""" get_data_top ??? """
line = string.strip(line)
if line == '' or line[0] == '#':
return -1, ''
arr = string.splitfields(line, '|')
if len(arr) > 1:
ret = ''
my_alias = string.strip(arr[0])
link = string.strip(arr[1])
info = string.strip(arr[2])
if len(arr) > 3:
title = string.replace(string.strip(arr[3]), '"', '"')
else:
title = ''
if len(arr) > 4 :
prefix = string.replace(arr[4], '"', '"') + ' '
else:
prefix = ''
if len(arr) > 5:
target = ' target="_extern"'
else:
target = ''
if nav_main != '' and nav_main == my_alias:
link = u'<span class="navTopBoxSelected"><span class="navTopLinkSelected">' + \
' %s </span></span>' % info
ret += prefix + link
else:
c = 'navTopLink'
start_of_link = ''
end_of_link = ''
ret += u'<a class="%s" href="%s" title="%s"%s>' % (c, link, title, target)
ret += prefix + start_of_link + info + end_of_link + '</a>'
#assert False
return my_alias, ret
# -----------------------------------------------------
def get_top_navigation_menu_top(lines, profi_mode):
""" liefert das oberste Menu """
ret = ''
for line in lines:
my_alias, res = get_data_top(line, 'start', profi_mode)
if my_alias > -1:
if ret != '':
ret += ' <span class="navTop">|</span> '
ret += res
return ret
# -----------------------------------------------------
def get_top_navigation_menu(lines, nav_main, profi_mode):
""" liefert das obere Hauptmenu """
ret = ''
for line in lines:
my_alias, res = get_data_top(line, nav_main, profi_mode)
if my_alias > -1:
if ret != '':
ret += ' <span class="navTop">|</span> '
ret += res
return ret
# -----------------------------------------------------
def get_navigation_menu(lines, *args):
""" liefert das linke Menu """
n_args = 0
menu = []
for n in xrange(4):
try:
menu.append(args[n])
n_args += 1
except:
menu.append('')
ret = u''
select = 0
for line in lines:
my_depth, my_alias, res = get_data_left(line)
if res != '' and string.find(res, '<div') < 0:
res_start = '<div class="menu_border_bottom">'
res_end = '</div>\n'
else:
res_start = ''
res_end = ''
if my_depth == 0:
if select == 2:
ret += '</div>\n</div>\n</div>\n'
select = 3
elif select == 0 and my_alias == menu[0]:
select = 1
ret += '<div class="menu_area">\n<div style="padding-left:2px;">'
if my_depth == -999:
ret += res
elif my_depth == -1:
if res != '':
if select == 2:
ret += '</div>\n</div>\n</div>\n'
select = 3
ret += res_start + res + res_end
elif (select in [1, 2, 3] ) and my_alias == menu[n_args-1]:
ret += '\n<div class="tabHeaderBg">\n' + res_start + res + '»' + res_end + '</div>\n'
elif my_depth == 0 or select == 2:
ret += res_start + res + res_end
if select == 1:
ret += '<div style="margin-left:15px;">\n'
select = 2
if select == 2:
ret += '</div>\n</div>\n</div>\n'
return ret
# -----------------------------------------------------
def get_menu_data(id=1):
""" liefert die Menudaten """
menu = get_menuitems_by_id_navmenu_left(id)[0]
return menu.navigation
# -----------------------------------------------------
def save_menus_left(menu_id, text, is_main_menu=False):
""" speichert die Menues in der Datenbank """
def save_this_menu (menu_id, name, navigation, is_main_menu):
item = get_new_navmenu_left()
item.menu_id = menu_id
item.name = name
item.navigation = navigation
item.is_main_menu = is_main_menu
item.save()
lines = string.splitfields(text, '\n')
delete_menuitem_navmenu_left(menu_id)
menu = get_top_navigation_menu_left(lines)
save_this_menu(menu_id, '|', menu, is_main_menu)
nav_main = ''
nav_sub = ''
nav_sub_sub = ''
for line in lines:
line = string.strip(line)
if line != '' and line[0] != '#':
arr = string.splitfields(line, '|')
if len(arr) > 1:
my_depth = int(string.strip(arr[0]))
my_alias = string.strip(arr[1])
if my_depth == 0:
nav_main = my_alias
nav_sub = ''
nav_sub_sub = ''
elif my_depth == 1:
nav_sub = my_alias
nav_sub_sub = ''
else:
nav_sub_sub = my_alias
info = string.strip(arr[3])
if nav_sub == '':
menu = get_navigation_menu(lines, nav_main)
elif nav_sub_sub == '':
menu = get_navigation_menu(lines, nav_main, nav_sub)
else:
menu = get_navigation_menu(lines, nav_main, nav_sub, nav_sub_sub)
save_this_menu(menu_id, nav_main + '|' + nav_sub, menu, is_main_menu)
# -----------------------------------------------------
def save_menus_top(menu_id, text, profi_mode=False):
""" speichert das obere Hauptmenu """
def save_this_menu (menu_id, name, navigation):
item = get_new_navmenu_top()
item.menu_id = menu_id
item.name = name
item.navigation = navigation
item.save()
#if not profi_mode:
# text = decode_html(text)
lines = string.splitfields(text, '\n')
delete_menuitem_navmenu_top(menu_id)
menu = get_top_navigation_menu_top(lines, profi_mode)
save_this_menu(menu_id, '|', menu)
nav_main = ''
for line in lines:
line = string.strip(line)
if line != '' and line[0] != '#':
arr = string.splitfields(line, '|')
if len(arr) > 1:
my_alias = string.strip(arr[0])
nav_main = my_alias
info = string.strip(arr[3])
menu = get_top_navigation_menu(lines, nav_main, profi_mode)
save_this_menu(menu_id, nav_main, menu)
# -----------------------------------------------------
def save_menu_left_new(menu_id, name, description, navigation, is_main_menu=False):
""" legt neues Menue in der Datenbank an """
item = get_new_navmenu_left()
item.menu_id = menu_id
item.name = name
item.description = description
item.navigation = navigation
item.is_main_menu = is_main_menu
item.save()
| 31.478689 | 97 | 0.559213 |
ace42ede9c02d21eae29a09fd1a6316eaa5cb5e9 | 6,928 | py | Python | views/game.py | albertopoljak/geometry-dash-arcade | c453d8888ada32d7537aacde4a4fb0d2262293f3 | [
"RSA-MD"
] | 1 | 2020-04-09T23:26:59.000Z | 2020-04-09T23:26:59.000Z | views/game.py | albertopoljak/geometry-dash-arcade | c453d8888ada32d7537aacde4a4fb0d2262293f3 | [
"RSA-MD"
] | 1 | 2020-04-09T23:27:32.000Z | 2020-04-09T23:43:19.000Z | views/game.py | albertopoljak/geometry-dash-arcade | c453d8888ada32d7537aacde4a4fb0d2262293f3 | [
"RSA-MD"
] | null | null | null | import arcade
from utils.debug import DebugView
from utils.sprites import ParallaxBackground
from utils.particles import PlayerDashTrail
from player import Player
from typing import Tuple
from level_data_handler import LevelDataHandler
from constants import (SCREEN_WIDTH, SCREEN_HEIGHT, GRAVITY, BOTTOM_VIEWPORT_MARGIN, TOP_VIEWPORT_MARGIN,
PLAYER_START_X, PLAYER_START_Y)
level_data_handler = LevelDataHandler()
class GameView(DebugView):
def __init__(self):
super().__init__()
self.current_level = 1
self.view_bottom = 0
self.view_left = 0
self.level_speed = 1
self.obstacle_list = None
self.trap_list = None
self.jump_pad_list = None
self.background_list = None
self.music = None
self.player = None
self.physics_engine = None
self.player_dash_trail = None
def setup(self, level_number: int):
self.current_level = level_number
# Used to keep track of our scrolling
self.view_bottom = 0
self.view_left = 0
self.level_speed = level_data_handler.get_level_speed(str(level_number))
yield "Loading map"
my_map = arcade.tilemap.read_tmx(f"assets/maps/level_{level_number}.tmx")
self.obstacle_list = arcade.tilemap.process_layer(my_map, "obstacles")
for obstacle in self.obstacle_list:
obstacle.change_x = self.level_speed
self.trap_list = arcade.tilemap.process_layer(my_map, "traps")
for trap in self.trap_list:
trap.change_x = self.level_speed
self.jump_pad_list = arcade.tilemap.process_layer(my_map, "jump_pads")
for jump_pad in self.jump_pad_list:
jump_pad.change_x = self.level_speed
floor = arcade.Sprite("assets/images/floor.png", 2)
floor.position = (SCREEN_WIDTH // 2, 0)
# Move the loaded map sprites x to the right so they "come" to us.
# Move the loaded map sprites y up so they start at floor height.
floor_y_offset = floor.center_y + floor.height // 2
self.obstacle_list.move(SCREEN_WIDTH, floor_y_offset)
self.trap_list.move(SCREEN_WIDTH, floor_y_offset)
self.jump_pad_list.move(SCREEN_WIDTH, floor_y_offset)
# After we're done with loading map add the floor (only after since we change the loaded sprite positions)
self.obstacle_list.append(floor)
yield "Loading backgrounds.."
background_parallax_list = level_data_handler.get_background_parallax_list(level_number)
self.background_list = ParallaxBackground(background_parallax_list)
yield "Loading music"
self.music = arcade.load_sound("assets/sounds/Guitar-Mayhem-5.mp3")
arcade.play_sound(self.music)
yield "Finishing up"
self.player = Player()
self.player.center_x = PLAYER_START_X
self.player.center_y = PLAYER_START_Y
self.player_dash_trail = PlayerDashTrail(self.get_player_dash_trail_position())
self.physics_engine = arcade.PhysicsEnginePlatformer(self.player, self.obstacle_list, GRAVITY)
def get_player_dash_trail_position(self) -> Tuple[float, float]:
# 0.85 because we don't want particle emitter to be at bottom bottom.
return (self.player.center_x - self.player.width // 2,
self.player.center_y - self.player.height * 0.85)
def on_draw(self):
arcade.start_render()
self.background_list.draw(self.view_left)
self.obstacle_list.draw()
self.trap_list.draw()
self.jump_pad_list.draw()
self.player_dash_trail.draw()
self.player.draw()
def on_key_press(self, key, modifiers: int):
if key in (arcade.key.UP, arcade.key.W, arcade.key.SPACE):
if self.physics_engine.can_jump():
self.player.jump()
def on_update(self, delta_time: float):
# We're making obstacles move to player, the player X coordinate will be static.
# For further comments this will be referred as 'player X coordinate hack'
self.player.center_x = PLAYER_START_X
if arcade.check_for_collision_with_list(self.player, self.trap_list):
print("You felt into trap")
self.reset_level()
elif arcade.check_for_collision_with_list(self.player, self.jump_pad_list):
self.player.jump(big_jump=True)
collision_sprite_list = self.physics_engine.update()
for collision_sprite in collision_sprite_list:
if collision_sprite in self.obstacle_list:
self.player_dash_trail.emit()
self.trap_list.update()
self.jump_pad_list.update()
self.background_list.update()
self.player_dash_trail.update()
self.viewport_update()
# Using our player X coordinate hack when we hit a moving wall the Physics engine will try to move us
# (since everything is scrolling right to left we will be moved left) but since we're resetting the player X
# coordinate it will "teleport" us above the obstacle after it moves us a certain number of pixels to the left.
# The 10 is not fixed it can be loose, I just choose it because it worked the best.
if PLAYER_START_X > self.player.center_x + 10:
print("You collided with obstacle")
self.reset_level()
# If player (somehow) falls of screen then reset level (invalid tmx file or users playing around with map code)
if self.player.center_y <= -300:
print("You somehow managed to fell off screen.")
self.reset_level()
def reset_level(self):
arcade.stop_sound(self.music)
# Exhaust generator since we don't need load view for resetting
for _ in self.setup(self.current_level):
pass
def viewport_update(self):
# Flag variable to mark if we should update viewport
changed = False
# Scroll up
top_boundary = self.view_bottom + SCREEN_WIDTH - TOP_VIEWPORT_MARGIN
if self.player.top > top_boundary:
self.view_bottom += self.player.top - top_boundary
changed = True
# Scroll down
bottom_boundary = self.view_bottom + BOTTOM_VIEWPORT_MARGIN
if self.player.bottom < bottom_boundary:
self.view_bottom -= bottom_boundary - self.player.bottom
changed = True
if changed:
# Only scroll to integers. Otherwise we end up with pixels that
# don't line up on the screen
self.view_bottom = int(self.view_bottom)
self.view_left = int(self.view_left)
# Do the scrolling
arcade.set_viewport(self.view_left,
SCREEN_WIDTH + self.view_left,
self.view_bottom,
SCREEN_HEIGHT + self.view_bottom)
| 40.752941 | 119 | 0.662673 |
ace430299cf7f57aa5ca71b7dcbc2bffd69cd103 | 315 | py | Python | src/pyvesync/__init__.py | RedbeardWally/pyvesync | c41eb8a77e68cc357f558e4fa413e42f9ba68658 | [
"MIT"
] | null | null | null | src/pyvesync/__init__.py | RedbeardWally/pyvesync | c41eb8a77e68cc357f558e4fa413e42f9ba68658 | [
"MIT"
] | null | null | null | src/pyvesync/__init__.py | RedbeardWally/pyvesync | c41eb8a77e68cc357f558e4fa413e42f9ba68658 | [
"MIT"
] | null | null | null | """VeSync API Library."""
# pylint: skip-file
# flake8: noqa
from .vesync import VeSync
from .vesyncoutlet import *
from .vesyncswitch import *
from .vesyncoutlet import *
from .vesyncbulb import *
import logging
logging.basicConfig(
level=logging.INFO, format='%(asctime)s - %(levelname)5s - %(message)s'
)
| 19.6875 | 75 | 0.72381 |
ace430cb653cbaa1007bc750dd7745c5c62e1db5 | 3,184 | py | Python | oee_registry/oee_registry/settings.py | impactlab/oeem-registry-prototype- | 46b13bab57686e77542e4e31beb0ccc6d819ed52 | [
"Apache-2.0"
] | null | null | null | oee_registry/oee_registry/settings.py | impactlab/oeem-registry-prototype- | 46b13bab57686e77542e4e31beb0ccc6d819ed52 | [
"Apache-2.0"
] | null | null | null | oee_registry/oee_registry/settings.py | impactlab/oeem-registry-prototype- | 46b13bab57686e77542e4e31beb0ccc6d819ed52 | [
"Apache-2.0"
] | null | null | null | """
Django settings for oee_registry project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#f@^66qax^p(x-8ld^omlzng$*-8m&=7zb7(nehfkd*fzshmwt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'oee_registry.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'oee_registry.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| 26.098361 | 91 | 0.700691 |
ace431915fab55dee4bd17ee139773cd66d137cc | 177 | py | Python | test.py | k-rajmani2k/AI-Chatbot-1 | 91e2cd47795b1393be5bb251ca996517f00fc18b | [
"MIT"
] | null | null | null | test.py | k-rajmani2k/AI-Chatbot-1 | 91e2cd47795b1393be5bb251ca996517f00fc18b | [
"MIT"
] | null | null | null | test.py | k-rajmani2k/AI-Chatbot-1 | 91e2cd47795b1393be5bb251ca996517f00fc18b | [
"MIT"
] | 4 | 2021-07-03T08:24:05.000Z | 2022-03-14T04:00:06.000Z | import aichatbot as bot
filenames = {
"intents": "./data/basic_intents.json",
"dir": "dumps"
}
bot_model = bot.Create(filenames, technique="bow")
bot.start(bot_model)
| 17.7 | 50 | 0.683616 |
ace432a03d62f64144ee2c58fdd224893c1ceee2 | 329 | py | Python | p2p/receipt.py | teotoplak/trinity | 6c67b5debfb94f74d0162c70f92ae3d13918b174 | [
"MIT"
] | 3 | 2019-06-17T13:59:20.000Z | 2021-05-02T22:09:13.000Z | p2p/receipt.py | teotoplak/trinity | 6c67b5debfb94f74d0162c70f92ae3d13918b174 | [
"MIT"
] | 2 | 2019-04-30T06:22:12.000Z | 2019-06-14T04:27:18.000Z | p2p/receipt.py | teotoplak/trinity | 6c67b5debfb94f74d0162c70f92ae3d13918b174 | [
"MIT"
] | 2 | 2019-12-14T02:52:32.000Z | 2021-02-18T23:04:44.000Z | from p2p.abc import (
HandshakeReceiptAPI,
ProtocolAPI,
)
class HandshakeReceipt(HandshakeReceiptAPI):
"""
Data storage object for ephemeral data exchanged during protocol
handshakes.
"""
protocol: ProtocolAPI
def __init__(self, protocol: ProtocolAPI) -> None:
self.protocol = protocol
| 20.5625 | 68 | 0.696049 |
ace433434ae4788c2bbc8b23f38c69e7ca2b0734 | 754 | py | Python | Challenge/generate_parenthesis.py | joyliao07/code_practice_and_review | 093e6ddb6e10df2ce61539b515fe7943f24346c3 | [
"MIT"
] | null | null | null | Challenge/generate_parenthesis.py | joyliao07/code_practice_and_review | 093e6ddb6e10df2ce61539b515fe7943f24346c3 | [
"MIT"
] | null | null | null | Challenge/generate_parenthesis.py | joyliao07/code_practice_and_review | 093e6ddb6e10df2ce61539b515fe7943f24346c3 | [
"MIT"
] | null | null | null | # Given n pairs of parentheses, write a function to generate all combinations of well-formed parentheses.
# For example, given n = 3, a solution set is:
# [
# "((()))",
# "(()())",
# "(())()",
# "()(())",
# "()()()"
# ]
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
"""Runtime 90.57%; Memory 76.82%"""
basket = []
acc = ''
self.generate(n, n, acc, basket)
return basket
def generate(self, left, right, acc, basket):
if left > 0:
self.generate(left - 1, right, acc + '(', basket)
if right > 0 and right > left:
self.generate(left, right - 1, acc + ')', basket)
if left == 0 and right == 0:
basket.append(acc)
| 25.133333 | 105 | 0.51061 |
ace433cbe8c14d8ea77169fcb52b8dd26bdaa3e2 | 2,697 | py | Python | Codewars/8kyu/how-many-stairs-will-suzuki-climb-in-20-years/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | 7 | 2017-09-20T16:40:39.000Z | 2021-08-31T18:15:08.000Z | Codewars/8kyu/how-many-stairs-will-suzuki-climb-in-20-years/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | Codewars/8kyu/how-many-stairs-will-suzuki-climb-in-20-years/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | # Python - 3.4.3
Test.describe("Basic tests")
sunday = [6737, 7244, 5776, 9826, 7057, 9247, 5842, 5484, 6543, 5153, 6832, 8274,
7148, 6152, 5940, 8040, 9174, 7555, 7682, 5252, 8793, 8837, 7320, 8478, 6063,
5751, 9716, 5085, 7315, 7859, 6628, 5425, 6331, 7097, 6249, 8381, 5936, 8496,
6934, 8347, 7036, 6421, 6510, 5821, 8602, 5312, 7836, 8032, 9871, 5990, 6309, 7825]
monday = [9175, 7883, 7596, 8635, 9274, 9675, 5603, 6863, 6442, 9500, 7468, 9719,
6648, 8180, 7944, 5190, 6209, 7175, 5984, 9737, 5548, 6803, 9254, 5932, 7360, 9221,
5702, 5252, 7041, 7287, 5185, 9139, 7187, 8855, 9310, 9105, 9769, 9679, 7842,
7466, 7321, 6785, 8770, 8108, 7985, 5186, 9021, 9098, 6099, 5828, 7217, 9387]
tuesday = [8646, 6945, 6364, 9563, 5627, 5068, 9157, 9439, 5681, 8674, 6379, 8292,
7552, 5370, 7579, 9851, 8520, 5881, 7138, 7890, 6016, 5630, 5985, 9758, 8415, 7313,
7761, 9853, 7937, 9268, 7888, 6589, 9366, 9867, 5093, 6684, 8793, 8116, 8493,
5265, 5815, 7191, 9515, 7825, 9508, 6878, 7180, 8756, 5717, 7555, 9447, 7703]
wednesday = [6353, 9605, 5464, 9752, 9915, 7446, 9419, 6520, 7438, 6512, 7102,
5047, 6601, 8303, 9118, 5093, 8463, 7116, 7378, 9738, 9998, 7125, 6445, 6031, 8710,
5182, 9142, 9415, 9710, 7342, 9425, 7927, 9030, 7742, 8394, 9652, 5783, 7698,
9492, 6973, 6531, 7698, 8994, 8058, 6406, 5738, 7500, 8357, 7378, 9598, 5405, 9493]
thursday = [6149, 6439, 9899, 5897, 8589, 7627, 6348, 9625, 9490, 5502, 5723, 8197,
9866, 6609, 6308, 7163, 9726, 7222, 7549, 6203, 5876, 8836, 6442, 6752, 8695, 8402,
9638, 9925, 5508, 8636, 5226, 9941, 8936, 5047, 6445, 8063, 6083, 7383, 7548, 5066,
7107, 6911, 9302, 5202, 7487, 5593, 8620, 8858, 5360, 6638, 8012, 8701]
friday = [5000, 5642, 9143, 7731, 8477, 8000, 7411, 8813, 8288, 5637, 6244, 6589, 6362,
6200, 6781, 8371, 7082, 5348, 8842, 9513, 5896, 6628, 8164, 8473, 5663, 9501,
9177, 8384, 8229, 8781, 9160, 6955, 9407, 7443, 8934, 8072, 8942, 6859, 5617,
5078, 8910, 6732, 9848, 8951, 9407, 6699, 9842, 7455, 8720, 5725, 6960, 5127]
saturday = [5448, 8041, 6573, 8104, 6208, 5912, 7927, 8909, 7000, 5059, 6412, 6354, 8943,
5460, 9979, 5379, 8501, 6831, 7022, 7575, 5828, 5354, 5115, 9625, 7795, 7003,
5524, 9870, 6591, 8616, 5163, 6656, 8150, 8826, 6875, 5242, 9585, 9649, 9838,
7150, 6567, 8524, 7613, 7809, 5562, 7799, 7179, 5184, 7960, 9455, 5633, 9085]
stairs = [sunday, monday, tuesday, wednesday, thursday, friday, saturday]
Test.assert_equals(stairs_in_20(stairs), 54636040)
| 64.214286 | 96 | 0.6066 |
ace4348906d25da71db84abca1d4caaf89290edf | 982 | py | Python | apps/IF201812/config.py | sgarbirodrigo/ml-sound-classifier | 662ee6ed4554da875626b601b180caef37c0b1ef | [
"MIT"
] | 118 | 2018-08-14T08:26:27.000Z | 2022-03-23T00:33:08.000Z | apps/IF201812/config.py | sgarbirodrigo/ml-sound-classifier | 662ee6ed4554da875626b601b180caef37c0b1ef | [
"MIT"
] | 5 | 2019-03-03T19:06:42.000Z | 2021-03-29T10:52:02.000Z | apps/IF201812/config.py | sgarbirodrigo/ml-sound-classifier | 662ee6ed4554da875626b601b180caef37c0b1ef | [
"MIT"
] | 34 | 2019-03-22T06:16:34.000Z | 2021-05-18T07:48:13.000Z | # Interface 2018/12 sound classification example for shaking snack
# Application configurations
from easydict import EasyDict
conf = EasyDict()
# Basic configurations
conf.sampling_rate = 44100
conf.duration = 2
conf.hop_length = 347*2 # to make time steps 128
conf.fmin = 20
conf.fmax = conf.sampling_rate // 2
conf.n_mels = 128
conf.n_fft = conf.n_mels * 20
conf.model = 'mobilenetv2' # 'alexnet'
# Labels
conf.labels = ['babystar', 'bbq', 'corn', 'kappaebi', 'potechi', 'vegetable']
# Training configurations
conf.folder = '.'
conf.n_fold = 1
conf.valid_limit = None
conf.random_state = 42
conf.test_size = 0.2
conf.samples_per_file = 1
conf.batch_size = 32
conf.learning_rate = 0.0001
conf.metric_save_ckpt = 'val_acc'
conf.epochs = 100
conf.verbose = 2
conf.best_weight_file = 'best_model_weight.h5'
# Runtime conficurations
conf.rt_process_count = 1
conf.rt_oversamples = 10
conf.pred_ensembles = 10
conf.runtime_model_file = None # 'model/mobilenetv2_fsd2018_41cls.pb'
| 24.55 | 77 | 0.759674 |
ace435493dbc17692303b8f78f0e3239a9429158 | 475 | py | Python | setup.py | Mrfantasy752/Frappe-Query-Report | 72a204e511cc3b7eeef7462242cf9933cbdc1fc1 | [
"MIT"
] | null | null | null | setup.py | Mrfantasy752/Frappe-Query-Report | 72a204e511cc3b7eeef7462242cf9933cbdc1fc1 | [
"MIT"
] | null | null | null | setup.py | Mrfantasy752/Frappe-Query-Report | 72a204e511cc3b7eeef7462242cf9933cbdc1fc1 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open("requirements.txt") as f:
install_requires = f.read().strip().split("\n")
# get version from __version__ variable in thirvusoft/__init__.py
from thirvusoft import __version__ as version
setup(
name="thirvusoft",
version=version,
description="nothing",
author="thirvusoft",
author_email="non@gmail.com",
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| 23.75 | 65 | 0.781053 |
ace4356613f942e42026e7bf2e4c60b6af016387 | 10,249 | py | Python | contrib/spendfrom/spendfrom.py | shmutalov/qogecoin | e4de0cbd427eac5d6f58eee67eec0486b3a8942c | [
"MIT"
] | 1 | 2021-11-03T23:06:53.000Z | 2021-11-03T23:06:53.000Z | contrib/spendfrom/spendfrom.py | shmutalov/qogecoin | e4de0cbd427eac5d6f58eee67eec0486b3a8942c | [
"MIT"
] | null | null | null | contrib/spendfrom/spendfrom.py | shmutalov/qogecoin | e4de0cbd427eac5d6f58eee67eec0486b3a8942c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2013 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the qogecoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Qogecoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Qogecoin")
return os.path.expanduser("~/.qogecoin")
def read_bitcoin_config(dbdir):
"""Read the qogecoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "qogecoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a qogecoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19332 if testnet else 9332
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get qogecoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send qogecoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of qogecoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| 37.819188 | 111 | 0.635477 |
ace435d37763978db0408931827f6b3e1a070a48 | 6,321 | py | Python | flask_helloworld_apache2/workflow_handler.py | sidhshar/gcp-explore | fe8ec6b210f878b383a4979984dc4acc31074a15 | [
"Apache-2.0"
] | null | null | null | flask_helloworld_apache2/workflow_handler.py | sidhshar/gcp-explore | fe8ec6b210f878b383a4979984dc4acc31074a15 | [
"Apache-2.0"
] | null | null | null | flask_helloworld_apache2/workflow_handler.py | sidhshar/gcp-explore | fe8ec6b210f878b383a4979984dc4acc31074a15 | [
"Apache-2.0"
] | null | null | null | import datetime
import time
import localsettings as ls
import sqlite3
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class DBHandler(object):
def __init__(self):
self.connection = sqlite3.connect(ls.DBSTORE)
self.connection.isolation_level = None #If you want autocommit mode, then set isolation_level to None.
self.create_table()
def execute_query_via_cursor(self, query):
cursor = self.connection.cursor()
cursor.execute(query)
cursor.close()
def execute_parameterised_query_via_cursor(self, query, parameters):
# TODO: Create cursor pool
cursor = self.connection.cursor()
cursor.execute(query, parameters)
cursor.close()
def execute_parameterised_query_via_cursor_with_results(self, query, parameters):
cursor = self.connection.cursor()
cursor.execute(query, parameters)
results = cursor.fetchall()
cursor.close()
return results
def create_table(self):
create_query = '''CREATE TABLE IF NOT EXISTS EVENT_PROCESSOR
(ID INTEGER PRIMARY KEY, REMOTE_ADDR text, ua text, ph text, ts timestamp, vulassessment int, retrycount int, retry_timestamps text)'''
self.execute_query_via_cursor(create_query)
def write_to_db(self, ip, ua, ph, ts, cvss, rcount, rts):
now = datetime.datetime.now()
insert_query = "INSERT INTO EVENT_PROCESSOR(REMOTE_ADDR, ua, ph, ts, vulassessment, retrycount, retry_timestamps) values (?, ?, ?, ?, ?, ?, ?)"
insert_values = (ip, ua, ph, ts, cvss, rcount, rts)
self.execute_parameterised_query_via_cursor(insert_query, insert_values)
def perform_select_on_ip(self, ip):
selectvalues = (ip,)
select_query = 'SELECT * FROM EVENT_PROCESSOR WHERE REMOTE_ADDR=?'
results = self.execute_parameterised_query_via_cursor_with_results(select_query, selectvalues)
return results
def close(self):
self.connection.close()
class RequestItem(object):
def __init__(self, ip, ua):
self.ip = ip
self.ua_from_istio = ua
self.state = ls.WORKFLOW_STATES[ls.RECEIVED_FROM_ISTIO]
self.retry_count = 0
self.retry_timestamps = []
self.ts = datetime.datetime.now()
self.cvss_score = 0
self.ph = None
def get_ip(self):
return self.ip
def get_verbose_state(self):
return ls.WORKFLOW_STATES_VERBOSE[self.state]
def increment_retry_count(self):
self.retry_count += 1
self.retry_timestamps.append(datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p"))
def change_to_nvm_event_recieved(self):
self.state = ls.WORKFLOW_STATES[ls.RECIEVED_WEBHOOK_TRIGGER]
def mark_vul_assess_done(self):
self.state = ls.WORKFLOW_STATES[ls.VUL_ASSESS_COMPLETE]
def mark_complete(self):
self.state = ls.WORKFLOW_STATES[ls.ALL_PROCESSING_DONE]
def is_complete(self):
return self.state == ls.WORKFLOW_STATES[ls.ALL_PROCESSING_DONE]
def set_cvss_score(self, cvss):
self.cvss_score = cvss
def set_process_hash(self, ph):
self.ph = ph
def save_audit_trail(self):
dbhandle = DBHandler()
dbhandle.write_to_db(self.ip, self.ua_from_istio, self.ph, self.ts, self.cvss_score,
self.retry_count, "+".join(self.retry_timestamps))
dbhandle.close()
class WorkflowManager(object):
__metaclass__ = Singleton
def __init__(self):
self.pending = {}
def get_pending_by_ip(self, ip):
if self.pending.has_key(ip):
return self.pending[ip]
def create_new_request(self, ip, ua):
pending = self.get_pending_by_ip(ip)
if pending is None:
# Create new pending request
reqitem = RequestItem(ip, ua)
self.pending[ip] = reqitem
return reqitem
else:
# Update the retry count
pending.increment_retry_count()
return pending
def mark_nvm_flow_arrival(self, ip, ph):
pending = self.get_pending_by_ip(ip)
if pending is None:
# Error condition. Exit loop here
print 'Did not find workflow object with IP: %s hash: %s. IGNORE Request.' % (ip, ph)
return False
else:
pending.change_to_nvm_event_recieved()
pending.set_process_hash(ph)
cvss_score = self.make_vul_assess_call(ph)
pending.set_cvss_score(cvss_score)
pending.mark_vul_assess_done()
# Write object details to DB and pop the queue object
pending.save_audit_trail()
pending.mark_complete()
self.pending.pop(ip)
return True
def make_vul_assess_call(self, ph):
time.sleep(2)
return ls.TEST_CVSS_SCORE
def wait_for_complete_state(reqitem):
waiting_time = 0
while (waiting_time <= ls.TIMEOUT_IN_SECS):
if reqitem.is_complete():
return True
time.sleep(ls.SLEEP_TIME_SLICE)
waiting_time += ls.SLEEP_TIME_SLICE
return False
def invoke_step1(ip, ua):
print 'Invoke step1'
cvss_score = 0
dbhandle = DBHandler()
results = dbhandle.perform_select_on_ip(ip)
if results:
# Assume 1 result as of now. TODO
result = results[0]
cvss_score = result[4]
print 'Got data from DB'
else:
print 'Starting Workflow Manager..'
wobj = WorkflowManager()
reqitem = wobj.create_new_request(ip, ua)
status = wait_for_complete_state(reqitem)
if not status:
# Timeout occured. Return negative response
print 'Timeout in Step1'
return { ls.INSERT_HEADER_NAME: False }
cvss_score = reqitem.cvss_score
if ls.TEST_CVSS_THRESHOLD <= cvss_score:
# Return Positive response
response = { ls.INSERT_HEADER_NAME: True }
else:
# Return negative response
response = { ls.INSERT_HEADER_NAME: False }
print 'Step 1 response: ',response
return response
def invoke_step2(host, ph):
print 'Invoke step2'
wobj = WorkflowManager()
response = wobj.mark_nvm_flow_arrival(host, ph)
print 'Step 2 response: ',response
return response
def invoke_test_step1():
istio_request = {'X-Initiator-Remote-Addr-1': '72.163.208.155, 72.163.217.103',
'X-Initiator-Ua': 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'}
remote_addr = istio_request['X-Initiator-Remote-Addr-1'].split(',')[0]
result = invoke_step1(remote_addr, istio_request['X-Initiator-Ua'])
def invoke_test_step2():
splunk_webhook_data = { 'host': '72.163.208.155', 'ph': '072041FA70BB351030C516E1B6F7F21D15495DA158F3890826BA5B978AF8900E' }
invoke_step2(splunk_webhook_data['host'], splunk_webhook_data['ph'])
if __name__ == '__main__':
#invoke_test_step1()
#time.sleep(2)
invoke_test_step2()
| 30.536232 | 145 | 0.740706 |
ace436954b3abf0084abc1fbce2748abc96a3221 | 5,176 | py | Python | open-hackathon-server/src/hackathon/expr/docker_expr_starter.py | overbest/open-hackathon | 62e085fbe603bcb00ca56d2b96cfc43bf44c710b | [
"MIT"
] | null | null | null | open-hackathon-server/src/hackathon/expr/docker_expr_starter.py | overbest/open-hackathon | 62e085fbe603bcb00ca56d2b96cfc43bf44c710b | [
"MIT"
] | null | null | null | open-hackathon-server/src/hackathon/expr/docker_expr_starter.py | overbest/open-hackathon | 62e085fbe603bcb00ca56d2b96cfc43bf44c710b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
sys.path.append("..")
import random
import string
import pexpect
from os.path import abspath, dirname, realpath
from hackathon.hmongo.models import Experiment, VirtualEnvironment
from hackathon.constants import VE_PROVIDER, VEStatus, VERemoteProvider, EStatus
from expr_starter import ExprStarter
class DockerExprStarter(ExprStarter):
def _internal_rollback(self, context):
# currently rollback share the same process as stop
self._internal_stop_expr(context)
def _stop_virtual_environment(self, virtual_environment, experiment, context):
pass
def _internal_start_expr(self, context):
for unit in context.template_content.units:
try:
self.__start_virtual_environment(context, unit)
except Exception as e:
self.log.error(e)
self._on_virtual_environment_failed(context)
def _internal_start_virtual_environment(self, context):
raise NotImplementedError()
def _get_docker_proxy(self):
raise NotImplementedError()
def _internal_stop_expr(self, context):
expr = Experiment.objects(id=context.experiment_id).first()
if not expr:
return
if len(expr.virtual_environments) == 0:
expr.status = EStatus.ROLL_BACKED
expr.save()
return
# delete containers and change expr status
for ve in expr.virtual_environments:
context = context.copy() # create new context for every virtual_environment
context.virtual_environment_name = ve.name
self._stop_virtual_environment(ve, expr, context)
def __start_virtual_environment(self, context, docker_template_unit):
origin_name = docker_template_unit.get_name()
prefix = str(context.experiment_id)[0:9]
suffix = "".join(random.sample(string.ascii_letters + string.digits, 8))
new_name = '%s-%s-%s' % (prefix, origin_name, suffix.lower())
docker_template_unit.set_name(new_name)
self.log.debug("starting to start container: %s" % new_name)
# db document for VirtualEnvironment
ve = VirtualEnvironment(provider=VE_PROVIDER.DOCKER,
name=new_name,
image=docker_template_unit.get_image_with_tag(),
status=VEStatus.INIT,
remote_provider=VERemoteProvider.Guacamole)
# create a new context for current ve only
context = context.copy()
experiment = Experiment.objects(id=context.experiment_id).no_dereference().only("virtual_environments").first()
experiment.virtual_environments.append(ve)
experiment.save()
# start container remotely , use hosted docker or alauda docker
context.virtual_environment_name = ve.name
context.unit = docker_template_unit
self._internal_start_virtual_environment(context)
def _enable_guacd_file_transfer(self, context):
"""
This function should be invoked after container is started in alauda_docker.py and hosted_docker.py
:param ve: virtual environment
"""
expr = Experiment.objects(id=context.experiment_id).no_dereference().first()
virtual_env = expr.virtual_environments.get(name=context.virtual_environment_name)
remote = virtual_env.remote_paras
p = pexpect.spawn("scp -P %s %s %s@%s:/usr/local/sbin/guacctl" %
(remote["port"],
abspath("%s/../expr/guacctl" % dirname(realpath(__file__))),
remote["username"],
remote["hostname"]))
i = p.expect([pexpect.TIMEOUT, 'yes/no', 'password: '])
if i == 1:
p.sendline("yes")
i = p.expect([pexpect.TIMEOUT, 'password:'])
if i != 0:
p.sendline(remote["password"])
p.expect(pexpect.EOF)
p.close()
| 41.079365 | 119 | 0.673879 |
ace436c6bea1ae6a25bcc3bbe56b95740901906b | 22,359 | py | Python | app/stats/tests.py | Dhruv-Sachdev1313/macports-webapp | ed3d8efceac4cfb694a51241cf81023ed10aade6 | [
"BSD-2-Clause"
] | 39 | 2018-05-09T02:14:52.000Z | 2022-03-07T20:31:44.000Z | app/stats/tests.py | Dhruv-Sachdev1313/macports-webapp | ed3d8efceac4cfb694a51241cf81023ed10aade6 | [
"BSD-2-Clause"
] | 337 | 2018-05-07T20:09:07.000Z | 2022-03-31T14:16:01.000Z | app/stats/tests.py | Dhruv-Sachdev1313/macports-webapp | ed3d8efceac4cfb694a51241cf81023ed10aade6 | [
"BSD-2-Clause"
] | 28 | 2018-05-09T18:03:07.000Z | 2022-03-30T02:21:56.000Z | import json
import datetime
from django.test import TransactionTestCase, Client
from django.urls import reverse
from rest_framework.test import APIClient
from tests import setup
from stats.models import Submission, PortInstallation, UUID
from stats.utilities import sort_by_version
from port.models import Port
from config import TEST_SUBMISSION_JSON
def initial_data_setup():
with open(TEST_SUBMISSION_JSON, 'r', encoding='utf-8') as file:
data = json.loads(file.read())
for i in data:
submission_id = Submission.populate(i, datetime.datetime.now(tz=datetime.timezone.utc))
PortInstallation.populate(i['active_ports'], submission_id)
QUICK_SUBMISSION_JSON = json.loads("""{
"id": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXX6",
"os": {
"macports_version": "2.5.4",
"osx_version": "10.14",
"os_arch": "i386",
"os_platform": "darwin",
"cxx_stdlib": "libc++",
"build_arch": "x86_64",
"gcc_version": "none",
"prefix": "/opt/local",
"xcode_version": "10.3"
},
"active_ports": [
{"name": "port-A1", "version": "0.9"},
{"name": "port-A2", "version": "0.9.1"},
{"name": "port-B1", "version": "1.0"},
{"name": "port-C1", "version": "1.1.2"}
]
}""")
class TestURLsStats(TransactionTestCase):
reset_sequences = True
def setUp(self):
self.client = Client()
def test_stats(self):
response = self.client.get(reverse('stats'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, template_name='stats/stats.html')
def test_port_installations(self):
response = self.client.get(reverse('stats_port_installations'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, template_name='stats/stats_port_installations.html')
def test_port_installations_filter(self):
response = self.client.get(reverse('stats_port_installations_filter'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, template_name='stats/port_installations_table.html')
def test_stats_faq(self):
response = self.client.get(reverse('stats_faq'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, template_name='stats/stats_faq.html')
class TestStatsViews(TransactionTestCase):
reset_sequences = True
def setUp(self):
self.client = Client()
# load stats
initial_data_setup()
# load data for ports
setup.setup_test_data()
def test_submission(self):
submission_body = """submission[data]={
"id": "974EEF9C-XXXX-XXXX-XXXX-XXXXXXXXXXX1",
"os": {
"macports_version": "2.5.4",
"osx_version": "10.14",
"os_arch": "i386",
"os_platform": "darwin",
"cxx_stdlib": "libc++",
"build_arch": "x86_64",
"gcc_version": "none",
"prefix": "/opt/local",
"xcode_version": "10.3"
},
"active_ports": [
{"name": "db48", "version": "4.8.30_4"},
{"name": "expat", "version": "2.2.6_1"},
{"name": "ncurses", "version": "6.1_0"},
{"name": "bzip2", "version": "1.0.6_0"},
{"name": "mpstats-gsoc", "version": "0.1.8_2", "requested": "true"}
]
}"""
self.client.generic('POST', reverse('stats_submit'), submission_body)
self.assertEquals(UUID.objects.count(), 6)
self.assertEquals(Submission.objects.count(), 7)
self.assertEquals(PortInstallation.objects.count(), 29)
def test_port_installation_counts(self):
response1 = self.client.get(reverse('port_stats', kwargs={
'name': 'port-A1'
}))
self.assertEquals(response1.context['count']['all'], 4)
self.assertEquals(response1.context['count']['requested'], 2)
def test_time_travel(self):
time_now = datetime.datetime.now(tz=datetime.timezone.utc)
# Go back in time 35 days
time_35_days_ago = time_now - datetime.timedelta(days=35)
submission = QUICK_SUBMISSION_JSON
# Make a submission dated 35 days ago
submission_id = Submission.populate(submission, time_35_days_ago)
PortInstallation.populate(submission['active_ports'], submission_id)
# Call for stats between 30-60 days
response1 = self.client.get(reverse('port_stats', kwargs={'name': 'port-B1'}), data={
'days': 30,
'days_ago': 30
})
# Call for stats between 30-37 days
response2 = self.client.get(reverse('port_stats', kwargs={'name': 'port-B1'}), data={
'days': 7,
'days_ago': 30
})
# Call for stats of some other port between 30-60 days
response3 = self.client.get(reverse('port_stats', kwargs={'name': 'port-A4'}), data={
'days': 30,
'days_ago': 30
})
Port.add_or_update([
{
"name": "port-A4",
"version": "1.2.3",
"portdir": "categoryA/port-A4"
}
])
self.assertEquals(response1.context['count']['all'], 1)
self.assertEquals(response1.context['count']['requested'], 0)
self.assertEquals(response2.context['count']['all'], 1)
self.assertEquals(response2.context['count']['requested'], 0)
self.assertEquals(response3.context['count']['all'], 0)
def test_users_count(self):
today_day = datetime.datetime.now().day
three_months_ago = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(days=int(today_day) + 90)
eleven_months_ago = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(days=int(today_day) + 335)
eleven_months_ago_month = str(eleven_months_ago.month)
three_months_ago_month = str(three_months_ago.month)
eleven_months_ago_year = str(eleven_months_ago.year)
three_months_ago_year = str(three_months_ago.year)
submission = QUICK_SUBMISSION_JSON
for i in three_months_ago, eleven_months_ago:
submission_id = Submission.populate(submission, i)
PortInstallation.populate(submission['active_ports'], submission_id)
response = self.client.get(reverse('stats'))
three_months_count = 0
eleven_months_count = 0
for i in response.context['users_by_month']:
if i['month'] == datetime.datetime.strptime(three_months_ago_year + "-" + three_months_ago_month + "-01 00:00:00-+0000", '%Y-%m-%d %H:%M:%S-%z'):
three_months_count = i['num']
if i['month'] == datetime.datetime.strptime(eleven_months_ago_year + "-" + eleven_months_ago_month + "-01 00:00:00-+0000", '%Y-%m-%d %H:%M:%S-%z'):
eleven_months_count = i['num']
self.assertEquals(three_months_count, 1)
self.assertEquals(eleven_months_count, 1)
self.assertEquals(response.context['total_submissions'], 8)
self.assertEquals(response.context['unique_users'], 6)
self.assertEquals(response.context['current_week'], 5)
self.assertEquals(response.context['last_week'], 0)
def test_validation_general_stats(self):
response1 = self.client.get(reverse('stats'), data={
'days': 91
})
response2 = self.client.get(reverse('stats'), data={
'days': "randomString"
})
response3 = self.client.get(reverse('stats'), data={
'days': 30
})
self.assertEquals(response1.content, b"'91' is an invalid value. Allowed values are: [0, 7, 30, 90, 180, 365]")
self.assertEquals(response2.content, b"Received 'randomString'. Expecting an integer.")
self.assertIsInstance(response3.context['days'], int)
def test_validation_port_stats(self):
response1 = self.client.get(reverse('port_stats', kwargs={'name': 'port-B1'}), data={
'days': 91
})
response2 = self.client.get(reverse('port_stats', kwargs={'name': 'port-B1'}), data={
'days': "randomString"
})
response3 = self.client.get(reverse('port_stats', kwargs={'name': 'port-B1'}), data={
'days': 30
})
self.assertEquals(response1.content, b"'91' is an invalid value. Allowed values are: [0, 7, 30, 90, 180, 365]")
self.assertEquals(response2.content, b"Received 'randomString'. Expecting an integer.")
self.assertIsInstance(response3.context['days'], int)
class TestGeneralStatsAPIViews(TransactionTestCase):
reset_sequences = True
def setUp(self):
self.client = APIClient()
# load stats
initial_data_setup()
submission = QUICK_SUBMISSION_JSON
submission_id = Submission.populate(submission, datetime.datetime.now(datetime.timezone.utc))
PortInstallation.populate(submission['active_ports'], submission_id)
def test_macports_version_general_stats(self):
response = self.client.get(reverse('general-stats'), data={
'property': 'macports_version'
}, format='json')
data = response.data
count_2_5_1 = 0
count_2_5_4 = 0
for i in data['result']:
if i['macports_version'] == '2.5.1':
count_2_5_1 = i['count']
if i['macports_version'] == '2.5.4':
count_2_5_4 = i['count']
self.assertEquals(count_2_5_1, 5)
self.assertEquals(count_2_5_4, 1)
def test_macos_version_general_stats(self):
response = self.client.get(reverse('general-stats'), data={
'property': ['os_version', 'build_arch', 'cxx_stdlib']
}, format='json')
data = response.data
count_10_14 = 0
count_10_14_no_build_arch = 0
count_10_13 = 0
for i in data['result']:
if i['os_version'] == '10.14' and i['build_arch'] == 'x86_64':
count_10_14 = i['count']
if i['os_version'] == '10.14' and i['build_arch'] == '':
count_10_14_no_build_arch = i['count']
if i['os_version'] == '10.13':
count_10_13 = i['count']
self.assertEquals(count_10_14, 3)
self.assertEquals(count_10_14_no_build_arch, 1)
self.assertEquals(count_10_13, 1)
def test_xcode_version_general_stats(self):
response = self.client.get(reverse('general-stats'), data={
'property': ['xcode_version', 'os_version']
}, format='json')
data = response.data
count_10_3 = 0
for i in data['result']:
if i['os_version'] == '10.14' and i['xcode_version'] == '10.3':
count_10_3 = i['count']
self.assertEquals(count_10_3, 3)
def test_invalid_property(self):
response = self.client.get(reverse('general-stats'), data={
'property': ['xcode_version', 'os_version', 'something_wrong']
}, format='json')
data = response.data['result']
self.assertEquals(len(data), 0)
def test_invalid_sorting(self):
response1 = self.client.get(reverse('general-stats'), data={
'property': ['xcode_version', 'os_version'],
'sort_by': 'xcode'
}, format='json')
data = response1.data['result']
self.assertEquals(len(data), 4)
def test_invalid_duration(self):
response1 = self.client.get(reverse('general-stats'), data={
'property': ['xcode_version', 'os_version'],
'days': '45'
}, format='json')
data = response1.data['result']
self.assertEquals(len(data), 0)
class TestPortStatsAPIViews(TransactionTestCase):
reset_sequences = True
def setUp(self):
self.client = APIClient()
# load stats
initial_data_setup()
submission = QUICK_SUBMISSION_JSON
submission_id = Submission.populate(submission, datetime.datetime.now(datetime.timezone.utc))
PortInstallation.populate(submission['active_ports'], submission_id)
def test_port_version(self):
response = self.client.get(reverse('port-stats'), data={
'name': 'port-A1',
'property': 'version'
}, format='json')
data = response.data
count_1_1 = 0
count_0_9 = 0
for i in data['result']:
if i['version'] == '1.1':
count_1_1 = i['count']
if i['version'] == '0.9':
count_0_9 = i['count']
self.assertEquals(count_1_1, 4)
self.assertEquals(count_0_9, 1)
def test_macos_version(self):
response = self.client.get(reverse('port-stats'), data={
'name': 'port-A1',
'property': ['submission__os_version', 'submission__build_arch', 'submission__cxx_stdlib']
}, format='json')
data = response.data
count_10_14 = 0
count_10_13 = 0
for i in data['result']:
if i['submission__os_version'] == '10.14' and i['submission__build_arch'] == 'x86_64':
count_10_14 = i['count']
if i['submission__os_version'] == '10.13' and i['submission__build_arch'] == 'x86_64':
count_10_13 = i['count']
self.assertEquals(count_10_14, 2)
self.assertEquals(count_10_13, 1)
def test_clt_version(self):
response = self.client.get(reverse('port-stats'), data={
'name': 'port-A1',
'property': ['submission__os_version', 'submission__clt_version']
}, format='json')
data = response.data
count_10_14 = 0
for i in data['result']:
if i['submission__os_version'] == '10.14' and i['submission__clt_version'] == '10.3.0.0.2':
count_10_14 = i['count']
self.assertEquals(count_10_14, 1)
def test_invalid_property(self):
response = self.client.get(reverse('port-stats'), data={
'name': 'port-A1',
'property': ['submission__os_version', 'submission__x_version']
}, format='json')
data = response.data['result']
self.assertEquals(len(data), 0)
def test_invalid_duration(self):
response = self.client.get(reverse('port-stats'), data={
'name': 'port-A1',
'property': ['submission__os_version'],
'days': 30,
'days_ago': 46
}, format='json')
data = response.data['result']
self.assertEquals(len(data), 0)
def test_invalid_port(self):
response = self.client.get(reverse('port-stats'), data={
'name': 'port-AX',
'property': ['submission__os_version'],
'days': 30,
}, format='json')
data = response.data['result']
self.assertEquals(len(data), 0)
def test_invalid_sorting(self):
response = self.client.get(reverse('port-stats'), data={
'name': 'port-A1',
'property': ['submission__os_version'],
'days': 30,
'sort_by': 'version'
}, format='json')
data = response.data['result']
self.assertEquals(len(data), 3)
def test_monthly(self):
today_day = datetime.datetime.now().day
three_months_ago = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(
days=int(today_day) + 90)
eleven_months_ago = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(
days=int(today_day) + 335)
eleven_months_ago_month = str(eleven_months_ago.month)
three_months_ago_month = str(three_months_ago.month)
eleven_months_ago_year = str(eleven_months_ago.year)
three_months_ago_year = str(three_months_ago.year)
submission = QUICK_SUBMISSION_JSON
for i in three_months_ago, eleven_months_ago:
submission_id = Submission.populate(submission, i)
PortInstallation.populate(submission['active_ports'], submission_id)
response = self.client.get(reverse('port-monthly-stats'), data={
'name': 'port-A2'
})
data = response.data
three_months_count = 0
eleven_months_count = 0
for i in data['result']:
if i['month'] == "{},{}".format(three_months_ago_year, three_months_ago_month):
three_months_count = i['count']
if i['month'] == "{},{}".format(eleven_months_ago_year, eleven_months_ago_month):
eleven_months_count = i['count']
self.assertEquals(three_months_count, 1)
self.assertEquals(eleven_months_count, 1)
response2 = self.client.get(reverse('port-monthly-stats'), data={
'name': 'port-A2',
'include_versions': 'yes'
})
data2 = response2.data
three_months_count = 0
eleven_months_count = 0
current_month_count = 0
for i in data2['result']:
if i['month'] == "{},{}".format(three_months_ago_year, three_months_ago_month) and i['version'] == '0.9.1':
three_months_count = i['count']
if i['month'] == "{},{}".format(eleven_months_ago_year, eleven_months_ago_month) and i['version'] == '0.9.1':
eleven_months_count = i['count']
if i['month'] == "{},{}".format(datetime.datetime.utcnow().year, datetime.datetime.utcnow().month) and i['version'] == '1.2':
current_month_count = i['count']
self.assertEquals(three_months_count, 1)
self.assertEquals(eleven_months_count, 1)
self.assertEquals(current_month_count, 2)
class TestVersionsSorting(TransactionTestCase):
def test_numeric_dict(self):
versions = [
{"version": "10.10.1_1"},
{"version": "10.10.1_0"},
{"version": "10.10"},
{"version": "10.10.1"},
{"version": "10.10.11"},
{"version": "10.9"},
{"version": "10.1"},
{"version": "10.1.11"},
{"version": "10.1.10"},
{"version": "10.1.9"},
{"version": "9.9.9.9"},
{"version": "9.9.9"},
{"version": "9.9"},
{"version": "9"},
{"version": "9.1.0.0"},
{"version": "9.1.0.A"},
{"version": "9.1.0.a"},
{"version": "9.100.1"},
]
versions = sort_by_version.sort_list_of_dicts_by_version(versions, "version")
expected = [
{"version": "10.10.11"},
{"version": "10.10.1_1"},
{"version": "10.10.1_0"},
{"version": "10.10.1"},
{"version": "10.10"},
{"version": "10.9"},
{"version": "10.1.11"},
{"version": "10.1.10"},
{"version": "10.1.9"},
{"version": "10.1"},
{"version": "9.100.1"},
{"version": "9.9.9.9"},
{"version": "9.9.9"},
{"version": "9.9"},
{"version": "9.1.0.0"},
{"version": "9.1.0.a"},
{"version": "9.1.0.A"},
{"version": "9"},
]
self.assertEquals(versions, expected)
def test_alphanumeric_dict(self):
versions = [
{"version": "10.10.a"},
{"version": "10.10.a1"},
{"version": "10.10.a2"},
{"version": "10.10.a12"},
{"version": "10.a.1"},
{"version": "10.aa.1"},
{"version": "10.ab.1"},
{"version": "10.b.1"},
{"version": "10.a"},
{"version": "10.b"},
{"version": "10.1.a"},
{"version": "10.1-a_0"},
{"version": "10.1-c_0"},
{"version": "10.+-=!ABC_0"},
{"version": "AAAAAA"},
{"version": "AAA.AAA"},
{"version": "AAA-A-A"},
{"version": "AAA-A+B"},
]
versions = sort_by_version.sort_list_of_dicts_by_version(versions, "version")
expected = [
{"version": "10.10.a2"},
{"version": "10.10.a12"},
{"version": "10.10.a1"},
{"version": "10.10.a"},
{"version": "10.1-c_0"},
{"version": "10.1-a_0"},
{"version": "10.1.a"},
{"version": "10.b.1"},
{"version": "10.b"},
{"version": "10.ab.1"},
{"version": "10.aa.1"},
{"version": "10.a.1"},
{"version": "10.a"},
{"version": "10.+-=!ABC_0"},
{"version": "AAAAAA"},
{"version": "AAA.AAA"},
{"version": "AAA-A+B"},
{"version": "AAA-A-A"},
]
self.assertEquals(versions, expected)
def test_version_list(self):
versions = [
"10.10.a",
"10.1_0.a",
"10.10.a1",
"10.10.a2",
"10.10.a12",
"10.a.a",
"10.aa.1",
"10.ab.1",
"10.b.1",
"10.a",
"10.b",
"10.1_0.a",
"10.1.a",
"10.1-a_0",
"10.1-c_0",
"10.+-=!ABC_0",
"AAAAAA",
"AAA.AAA",
"AAA-A-A",
"AAA-A+B"
]
versions = sort_by_version.sort_list_by_version(versions)
expected = [
"10.10.a2",
"10.10.a12",
"10.10.a1",
"10.10.a",
"10.1_0.a",
"10.1_0.a",
"10.1-c_0",
"10.1-a_0",
"10.1.a",
"10.b.1",
"10.b",
"10.ab.1",
"10.aa.1",
"10.a.a",
"10.a",
"10.+-=!ABC_0",
"AAAAAA",
"AAA.AAA",
"AAA-A+B",
"AAA-A-A",
]
self.assertEquals(versions, expected)
| 34.935938 | 159 | 0.54582 |
ace4380ac7746bbaf33f42ab077b83e00045b516 | 38,592 | py | Python | skodaconnect/dashboard.py | stefanuc111/skodaconnect | 106c83825fa009a238cdedebd67d0157fc950e90 | [
"Apache-2.0"
] | null | null | null | skodaconnect/dashboard.py | stefanuc111/skodaconnect | 106c83825fa009a238cdedebd67d0157fc950e90 | [
"Apache-2.0"
] | null | null | null | skodaconnect/dashboard.py | stefanuc111/skodaconnect | 106c83825fa009a238cdedebd67d0157fc950e90 | [
"Apache-2.0"
] | null | null | null | # Utilities for integration with Home Assistant
# Thanks to molobrakos
import logging
from datetime import datetime
from skodaconnect.utilities import camel2slug
_LOGGER = logging.getLogger(__name__)
class Instrument:
def __init__(self, component, attr, name, icon=None):
self.attr = attr
self.component = component
self.name = name
self.vehicle = None
self.icon = icon
self.callback = None
def __repr__(self):
return self.full_name
def configurate(self, **args):
pass
@property
def slug_attr(self):
return camel2slug(self.attr.replace(".", "_"))
def setup(self, vehicle, **config):
self.vehicle = vehicle
if not self.is_supported:
return False
self.configurate(**config)
return True
@property
def vehicle_name(self):
return self.vehicle.vin
@property
def full_name(self):
return f"{self.vehicle_name} {self.name}"
@property
def is_mutable(self):
raise NotImplementedError("Must be set")
@property
def str_state(self):
return self.state
@property
def state(self):
if hasattr(self.vehicle, self.attr):
return getattr(self.vehicle, self.attr)
else:
_LOGGER.debug(f'Could not find attribute "{self.attr}"')
return self.vehicle.get_attr(self.attr)
@property
def attributes(self):
return {}
@property
def is_supported(self):
supported = 'is_' + self.attr + "_supported"
if hasattr(self.vehicle, supported):
return getattr(self.vehicle, supported)
else:
return False
class Sensor(Instrument):
def __init__(self, attr, name, icon, unit=None, device_class=None):
super().__init__(component="sensor", attr=attr, name=name, icon=icon)
self.device_class = device_class
self.unit = unit
self.convert = False
def configurate(self, **config):
if self.unit and config.get('miles', False) is True:
if "km" == self.unit:
self.unit = "mi"
self.convert = True
elif "km/h" == self.unit:
self.unit = "mi/h"
self.convert = True
elif "l/100 km" == self.unit:
self.unit = "l/100 mi"
self.convert = True
elif "kWh/100 km" == self.unit:
self.unit = "kWh/100 mi"
self.convert = True
elif self.unit and config.get('scandinavian_miles', False) is True:
if "km" == self.unit:
self.unit = "mil"
elif "km/h" == self.unit:
self.unit = "mil/h"
elif "l/100 km" == self.unit:
self.unit = "l/100 mil"
elif "kWh/100 km" == self.unit:
self.unit = "kWh/100 mil"
# Init placeholder for parking heater duration
config.get('parkingheater', 30)
if "pheater_duration" == self.attr:
setValue = config.get('climatisation_duration', 30)
self.vehicle.pheater_duration = setValue
@property
def is_mutable(self):
return False
@property
def str_state(self):
if self.unit:
return f'{self.state} {self.unit}'
else:
return f'{self.state}'
@property
def state(self):
val = super().state
if val and self.unit and "mi" in self.unit and self.convert is True:
return int(round(val / 1.609344))
elif val and self.unit and "mi/h" in self.unit and self.convert is True:
return int(round(val / 1.609344))
elif val and self.unit and "gal/100 mi" in self.unit and self.convert is True:
return round(val * 0.4251438, 1)
elif val and self.unit and "kWh/100 mi" in self.unit and self.convert is True:
return round(val * 0.4251438, 1)
elif val and self.unit and "°F" in self.unit and self.convert is True:
temp = round((val * 9 / 5) + 32, 1)
return temp
elif val and self.unit in ['mil', 'mil/h']:
return val / 10
else:
return val
class BinarySensor(Instrument):
def __init__(self, attr, name, device_class, icon='', reverse_state=False):
super().__init__(component="binary_sensor", attr=attr, name=name, icon=icon)
self.device_class = device_class
self.reverse_state = reverse_state
@property
def is_mutable(self):
return False
@property
def str_state(self):
if self.device_class in ["door", "window"]:
return "Closed" if self.state else "Open"
if self.device_class == "lock":
return "Locked" if self.state else "Unlocked"
if self.device_class == "safety":
return "Warning!" if self.state else "OK"
if self.device_class == "plug":
return "Connected" if self.state else "Disconnected"
if self.state is None:
_LOGGER.error(f"Can not encode state {self.attr} {self.state}")
return "?"
return "On" if self.state else "Off"
@property
def state(self):
val = super().state
if isinstance(val, (bool, list)):
if self.reverse_state:
if bool(val):
return False
else:
return True
else:
return bool(val)
elif isinstance(val, str):
return val != "Normal"
return val
@property
def is_on(self):
return self.state
class Switch(Instrument):
def __init__(self, attr, name, icon):
super().__init__(component="switch", attr=attr, name=name, icon=icon)
@property
def is_mutable(self):
return True
@property
def str_state(self):
return "On" if self.state else "Off"
def is_on(self):
return self.state
def turn_on(self):
pass
def turn_off(self):
pass
@property
def assumed_state(self):
return True
class Climate(Instrument):
def __init__(self, attr, name, icon):
super().__init__(component="climate", attr=attr, name=name, icon=icon)
@property
def hvac_mode(self):
pass
@property
def target_temperature(self):
pass
def set_temperature(self, **kwargs):
pass
def set_hvac_mode(self, hvac_mode):
pass
class ElectricClimatisationClimate(Climate):
def __init__(self):
super().__init__(attr="electric_climatisation", name="Electric Climatisation", icon="mdi:radiator")
@property
def hvac_mode(self):
return self.vehicle.electric_climatisation
@property
def target_temperature(self):
return self.vehicle.climatisation_target_temperature
async def set_temperature(self, temperature):
await self.vehicle.climatisation_target(temperature)
async def set_hvac_mode(self, hvac_mode):
if hvac_mode:
await self.vehicle.climatisation('electric')
else:
await self.vehicle.climatisation('off')
class CombustionClimatisationClimate(Climate):
def __init__(self):
super().__init__(attr="pheater_heating", name="Parking Heater Climatisation", icon="mdi:radiator")
def configurate(self, **config):
self.spin = config.get('spin', '')
self.duration = config.get('combustionengineheatingduration', 30)
@property
def hvac_mode(self):
return self.vehicle.pheater_heating
@property
def target_temperature(self):
return self.vehicle.climatisation_target_temperature
async def set_temperature(self, temperature):
await self.vehicle.setClimatisationTargetTemperature(temperature)
async def set_hvac_mode(self, hvac_mode):
if hvac_mode:
await self.vehicle.pheater_climatisation(spin=self.spin, duration=self.duration, mode='heating')
else:
await self.vehicle.pheater_climatisation(spin=self.spin, mode='off')
class Position(Instrument):
def __init__(self):
super().__init__(component="device_tracker", attr="position", name="Position")
@property
def is_mutable(self):
return False
@property
def state(self):
state = super().state #or {}
return (
state.get("lat", "?"),
state.get("lng", "?"),
state.get("timestamp", None),
)
@property
def str_state(self):
state = super().state #or {}
ts = state.get("timestamp", None)
if isinstance(ts, str):
time = str(datetime.strptime(ts,'%Y-%m-%dT%H:%M:%SZ').astimezone(tz=None))
elif isinstance(ts, datetime):
time = str(ts.astimezone(tz=None))
else:
time = None
return (
state.get("lat", "?"),
state.get("lng", "?"),
time,
)
class DoorLock(Instrument):
def __init__(self):
super().__init__(component="lock", attr="door_locked", name="Door locked")
def configurate(self, **config):
self.spin = config.get('spin', '')
@property
def is_mutable(self):
return True
@property
def str_state(self):
return "Locked" if self.state else "Unlocked"
@property
def state(self):
return self.vehicle.door_locked
@property
def is_locked(self):
return self.state
async def lock(self):
try:
response = await self.vehicle.set_lock('lock', self.spin)
await self.vehicle.update()
if self.callback is not None:
self.callback()
return response
except Exception as e:
_LOGGER.error(f"Lock failed: {e}")
return False
async def unlock(self):
try:
response = await self.vehicle.set_lock('unlock', self.spin)
await self.vehicle.update()
if self.callback is not None:
self.callback()
return response
except Exception as e:
_LOGGER.error(f"Unlock failed: {e}")
return False
@property
def attributes(self):
return {
'last_result': self.vehicle.lock_action_status,
'last_timestamp': self.vehicle.lock_action_timestamp
}
class TrunkLock(Instrument):
def __init__(self):
super().__init__(component="lock", attr="trunk_locked", name="Trunk locked")
@property
def is_mutable(self):
return True
@property
def str_state(self):
return "Locked" if self.state else "Unlocked"
@property
def state(self):
return self.vehicle.trunk_locked
@property
def is_locked(self):
return self.state
async def lock(self):
return None
async def unlock(self):
return None
# Switches
class RequestHonkAndFlash(Switch):
def __init__(self):
super().__init__(attr="request_honkandflash", name="Start honking and flashing", icon="mdi:car-emergency")
@property
def state(self):
return self.vehicle.request_honkandflash
async def turn_on(self):
await self.vehicle.set_honkandflash('honkandflash')
await self.vehicle.update()
if self.callback is not None:
self.callback()
async def turn_off(self):
pass
@property
def assumed_state(self):
return False
@property
def attributes(self):
return {
'last_result': self.vehicle.honkandflash_action_status,
'last_timestamp': self.vehicle.honkandflash_action_timestamp
}
class RequestFlash(Switch):
def __init__(self):
super().__init__(attr="request_flash", name="Start flashing", icon="mdi:car-parking-lights")
@property
def state(self):
return self.vehicle.request_flash
async def turn_on(self):
await self.vehicle.set_honkandflash('flash')
await self.vehicle.update()
if self.callback is not None:
self.callback()
async def turn_off(self):
pass
@property
def assumed_state(self):
return False
@property
def attributes(self):
return {
'last_result': self.vehicle.honkandflash_action_status,
'last_timestamp': self.vehicle.honkandflash_action_timestamp
}
class RequestUpdate(Switch):
def __init__(self):
super().__init__(attr="refresh_data", name="Force data refresh", icon="mdi:car-connected")
@property
def state(self):
if self.vehicle.refresh_data is None:
return False
else:
return self.vehicle.refresh_data
async def turn_on(self):
await self.vehicle.set_refresh()
await self.vehicle.update()
if self.callback is not None:
self.callback()
async def turn_off(self):
pass
@property
def assumed_state(self):
return False
@property
def attributes(self):
return {
'last_result': self.vehicle.refresh_action_status,
'last_timestamp': self.vehicle.refresh_action_timestamp
}
class ElectricClimatisation(Switch):
def __init__(self):
super().__init__(attr="electric_climatisation", name="Electric Climatisation", icon="mdi:radiator")
@property
def state(self):
return self.vehicle.electric_climatisation
async def turn_on(self):
await self.vehicle.set_climatisation(mode = 'electric')
await self.vehicle.update()
async def turn_off(self):
await self.vehicle.set_climatisation(mode = 'off')
await self.vehicle.update()
@property
def assumed_state(self):
return False
@property
def attributes(self):
attrs = {}
if self.vehicle.is_electric_climatisation_attributes_supported:
attrs = self.vehicle.electric_climatisation_attributes
attrs['last_result'] = self.vehicle.climater_action_status
attrs['last_timestamp'] = self.vehicle.climater_action_timestamp
else:
attrs['last_result'] = self.vehicle.climater_action_status
attrs['last_timestamp'] = self.vehicle.climater_action_timestamp
return attrs
class AuxiliaryClimatisation(Switch):
def __init__(self):
super().__init__(attr="auxiliary_climatisation", name="Auxiliary Climatisation", icon="mdi:radiator")
def configurate(self, **config):
self.spin = config.get('spin', '')
@property
def state(self):
return self.vehicle.auxiliary_climatisation
async def turn_on(self):
await self.vehicle.set_climatisation(mode = 'auxiliary', spin = self.spin)
await self.vehicle.update()
async def turn_off(self):
await self.vehicle.set_climatisation(mode = 'off')
await self.vehicle.update()
@property
def assumed_state(self):
return False
@property
def attributes(self):
return {
'last_result': self.vehicle.climater_action_status,
'last_timestamp': self.vehicle.climater_action_timestamp
}
class Charging(Switch):
def __init__(self):
super().__init__(attr="charging", name="Charging", icon="mdi:battery")
@property
def state(self):
return self.vehicle.charging
async def turn_on(self):
await self.vehicle.set_charger('start')
await self.vehicle.update()
async def turn_off(self):
await self.vehicle.set_charger('stop')
await self.vehicle.update()
@property
def assumed_state(self):
return False
@property
def attributes(self):
return {
'last_result': self.vehicle.charger_action_status,
'last_timestamp': self.vehicle.charger_action_timestamp
}
class WindowHeater(Switch):
def __init__(self):
super().__init__(attr="window_heater", name="Window Heater", icon="mdi:car-defrost-rear")
@property
def state(self):
return self.vehicle.window_heater
async def turn_on(self):
await self.vehicle.set_window_heating('start')
await self.vehicle.update()
async def turn_off(self):
await self.vehicle.set_window_heating('stop')
await self.vehicle.update()
@property
def assumed_state(self):
return False
@property
def attributes(self):
return {
'last_result': self.vehicle.climater_action_status,
'last_timestamp': self.vehicle.climater_action_timestamp
}
class SeatHeatingFrontLeft(Switch):
def __init__(self):
super().__init__(attr="seat_heating_front_left", name="Seat heating front left", icon="mdi:seat-recline-normal")
@property
def state(self):
return self.vehicle.seat_heating_front_left
async def turn_on(self):
#await self.vehicle.set_seat_heating('start')
#await self.vehicle.update()
pass
async def turn_off(self):
#await self.vehicle.set_seat_heating('stop')
#await self.vehicle.update()
pass
@property
def assumed_state(self):
return False
@property
def attributes(self):
return {
'last_result': self.vehicle.aircon_action_status,
'last_timestamp': self.vehicle.aircon_action_timestamp
}
class SeatHeatingFrontRight(Switch):
def __init__(self):
super().__init__(attr="seat_heating_front_right", name="Seat heating front right", icon="mdi:seat-recline-normal")
@property
def state(self):
return self.vehicle.seat_heating_front_right
async def turn_on(self):
#await self.vehicle.set_seat_heating('start')
#await self.vehicle.update()
pass
async def turn_off(self):
#await self.vehicle.set_seat_heating('stop')
#await self.vehicle.update()
pass
@property
def assumed_state(self):
return False
@property
def attributes(self):
return {
'last_result': self.vehicle.aircon_action_status,
'last_timestamp': self.vehicle.aircon_action_timestamp
}
class SeatHeatingRearLeft(Switch):
def __init__(self):
super().__init__(attr="seat_heating_rear_left", name="Seat heating rear left", icon="mdi:seat-recline-normal")
@property
def state(self):
return self.vehicle.seat_heating_rear_left
async def turn_on(self):
#await self.vehicle.set_seat_heating('start')
#await self.vehicle.update()
pass
async def turn_off(self):
#await self.vehicle.set_seat_heating('stop')
#await self.vehicle.update()
pass
@property
def assumed_state(self):
return False
@property
def attributes(self):
return {
'last_result': self.vehicle.aircon_action_status,
'last_timestamp': self.vehicle.aircon_action_timestamp
}
class SeatHeatingRearRight(Switch):
def __init__(self):
super().__init__(attr="seat_heating_rear_right", name="Seat heating rear right", icon="mdi:seat-recline-normal")
@property
def state(self):
return self.vehicle.seat_heating_rear_right
async def turn_on(self):
#await self.vehicle.set_seat_heating('start')
#await self.vehicle.update()
pass
async def turn_off(self):
#await self.vehicle.set_seat_heating('stop')
#await self.vehicle.update()
pass
@property
def assumed_state(self):
return False
@property
def attributes(self):
return {
'last_result': self.vehicle.aircon_action_status,
'last_timestamp': self.vehicle.aircon_action_timestamp
}
class AirConditionAtUnlock(Switch):
def __init__(self):
super().__init__(attr="aircon_at_unlock", name="Air-conditioning at unlock", icon="mdi:power-plug")
@property
def state(self):
return self.vehicle.aircon_at_unlock
async def turn_on(self):
pass
async def turn_off(self):
pass
@property
def assumed_state(self):
return False
@property
def attributes(self):
return {
'last_result': self.vehicle.aircon_action_status,
'last_timestamp': self.vehicle.aircon_action_timestamp
}
class BatteryClimatisation(Switch):
def __init__(self):
super().__init__(attr="climatisation_without_external_power", name="Climatisation from battery", icon="mdi:power-plug")
@property
def state(self):
return self.vehicle.climatisation_without_external_power
async def turn_on(self):
await self.vehicle.set_battery_climatisation(True)
await self.vehicle.update()
async def turn_off(self):
await self.vehicle.set_battery_climatisation(False)
await self.vehicle.update()
@property
def assumed_state(self):
return False
@property
def attributes(self):
return {
'last_result': self.vehicle.climater_action_status,
'last_timestamp': self.vehicle.climater_action_timestamp
}
class PHeaterHeating(Switch):
def __init__(self):
super().__init__(attr="pheater_heating", name="Parking Heater Heating", icon="mdi:radiator")
def configurate(self, **config):
self.spin = config.get('spin', '')
self.duration = config.get('combustionengineheatingduration', 30)
@property
def state(self):
return self.vehicle.pheater_heating
async def turn_on(self):
await self.vehicle.set_pheater(mode='heating', spin=self.spin)
await self.vehicle.update()
async def turn_off(self):
await self.vehicle.set_pheater(mode='off', spin=self.spin)
await self.vehicle.update()
@property
def assumed_state(self):
return False
@property
def attributes(self):
return {
'last_result': self.vehicle.pheater_action_status,
'last_timestamp': self.vehicle.pheater_action_timestamp
}
class PHeaterVentilation(Switch):
def __init__(self):
super().__init__(attr="pheater_ventilation", name="Parking Heater Ventilation", icon="mdi:radiator")
def configurate(self, **config):
self.spin = config.get('spin', '')
self.duration = config.get('combustionengineclimatisationduration', 30)
@property
def state(self):
return self.vehicle.pheater_ventilation
async def turn_on(self):
await self.vehicle.set_pheater(mode='ventilation', spin=self.spin)
await self.vehicle.update()
async def turn_off(self):
await self.vehicle.set_pheater(mode='off', spin=self.spin)
await self.vehicle.update()
@property
def assumed_state(self):
return False
@property
def attributes(self):
return {
'last_result': self.vehicle.pheater_action_status,
'last_timestamp': self.vehicle.pheater_action_timestamp
}
class DepartureTimer1(Switch):
def __init__(self):
super().__init__(attr="departure1", name="Departure timer 1", icon="mdi:radiator")
def configurate(self, **config):
self.spin = config.get('spin', '')
@property
def state(self):
status1 = self.vehicle.departure1.get("timerProgrammedStatus", "")
status2 = self.vehicle.departure1.get("enabled", False)
if status1 == "programmed":
return True
elif status2 is True:
return True
else:
return False
async def turn_on(self):
await self.vehicle.set_timer_active(id=1, action="on")
await self.vehicle.update()
async def turn_off(self):
await self.vehicle.set_timer_active(id=1, action="off")
await self.vehicle.update()
@property
def assumed_state(self):
return False
@property
def attributes(self):
return dict(self.vehicle.departure1)
class DepartureTimer2(Switch):
def __init__(self):
super().__init__(attr="departure2", name="Departure timer 2", icon="mdi:radiator")
def configurate(self, **config):
self.spin = config.get('spin', '')
@property
def state(self):
status1 = self.vehicle.departure2.get("timerProgrammedStatus", "")
status2 = self.vehicle.departure2.get("enabled", False)
if status1 == "programmed":
return True
elif status2 is True:
return True
else:
return False
async def turn_on(self):
await self.vehicle.set_timer_active(id=2, action="on")
await self.vehicle.update()
async def turn_off(self):
await self.vehicle.set_timer_active(id=2, action="off")
await self.vehicle.update()
@property
def assumed_state(self):
return False
@property
def attributes(self):
return dict(self.vehicle.departure2)
class DepartureTimer3(Switch):
def __init__(self):
super().__init__(attr="departure3", name="Departure timer 3", icon="mdi:radiator")
def configurate(self, **config):
self.spin = config.get('spin', '')
@property
def state(self):
status1 = self.vehicle.departure3.get("timerProgrammedStatus", "")
status2 = self.vehicle.departure3.get("enabled", False)
if status1 == "programmed":
return True
elif status2 is True:
return True
else:
return False
async def turn_on(self):
await self.vehicle.set_timer_active(id=3, action="on")
await self.vehicle.update()
async def turn_off(self):
await self.vehicle.set_timer_active(id=3, action="off")
await self.vehicle.update()
@property
def assumed_state(self):
return False
@property
def attributes(self):
return dict(self.vehicle.departure3)
class RequestResults(Sensor):
def __init__(self):
super().__init__(attr="request_results", name="Request results", icon="mdi:chat-alert", unit=None)
@property
def state(self):
if self.vehicle.request_results.get('state', False):
return self.vehicle.request_results.get('state')
return 'N/A'
@property
def assumed_state(self):
return False
@property
def attributes(self):
return dict(self.vehicle.request_results)
def create_instruments():
return [
Position(),
DoorLock(),
TrunkLock(),
RequestFlash(),
RequestHonkAndFlash(),
RequestUpdate(),
WindowHeater(),
#SeatHeatingFrontLeft(), # Not yet implemented
#SeatHeatingFrontRight(), # Not yet implemented
#SeatHeatingRearLeft(), # Not yet implemented
#SeatHeatingRearRight(), # Not yet implemented
#AirConditionAtUnlock(), # Not yet implemented
BatteryClimatisation(),
ElectricClimatisation(),
AuxiliaryClimatisation(),
PHeaterVentilation(),
PHeaterHeating(),
#ElectricClimatisationClimate(),
#CombustionClimatisationClimate(),
Charging(),
RequestResults(),
DepartureTimer1(),
DepartureTimer2(),
DepartureTimer3(),
Sensor(
attr="distance",
name="Odometer",
icon="mdi:speedometer",
unit="km",
),
Sensor(
attr="battery_level",
name="Battery level",
icon="mdi:battery",
unit="%",
device_class="battery"
),
Sensor(
attr="min_charge_level",
name="Minimum charge level",
icon="mdi:battery-positive",
unit="%",
device_class="battery"
),
Sensor(
attr="adblue_level",
name="Adblue level",
icon="mdi:fuel",
unit="km",
),
Sensor(
attr="fuel_level",
name="Fuel level",
icon="mdi:fuel",
unit="%",
),
Sensor(
attr="service_inspection",
name="Service inspection days",
icon="mdi:garage",
unit="days",
),
Sensor(
attr="service_inspection_distance",
name="Service inspection distance",
icon="mdi:garage",
unit="km",
),
Sensor(
attr="oil_inspection",
name="Oil inspection days",
icon="mdi:oil",
unit="days",
),
Sensor(
attr="oil_inspection_distance",
name="Oil inspection distance",
icon="mdi:oil",
unit="km",
),
Sensor(
attr="last_connected",
name="Last connected",
icon="mdi:clock",
device_class="timestamp"
),
Sensor(
attr="parking_time",
name="Parking time",
icon="mdi:clock",
device_class="timestamp"
),
Sensor(
attr="charging_time_left",
name="Charging time left",
icon="mdi:battery-charging-100",
unit="h",
),
Sensor(
attr="charging_power",
name="Charging power",
icon="mdi:flash",
unit="W",
device_class="power"
),
Sensor(
attr="charge_rate",
name="Charging rate",
icon="mdi:battery-heart",
unit="km/h"
),
Sensor(
attr="electric_range",
name="Electric range",
icon="mdi:car-electric",
unit="km",
),
Sensor(
attr="combustion_range",
name="Combustion range",
icon="mdi:car",
unit="km",
),
Sensor(
attr="combined_range",
name="Combined range",
icon="mdi:car",
unit="km",
),
Sensor(
attr="charge_max_ampere",
name="Charger max ampere",
icon="mdi:flash",
unit="A",
device_class="current"
),
Sensor(
attr="climatisation_target_temperature",
name="Climatisation target temperature",
icon="mdi:thermometer",
unit="°C",
device_class="temperature"
),
Sensor(
attr="climatisation_time_left",
name="Climatisation time left",
icon="mdi:clock",
unit="h",
),
Sensor(
attr="trip_last_average_speed",
name="Last trip average speed",
icon="mdi:speedometer",
unit="km/h",
),
Sensor(
attr="trip_last_average_electric_consumption",
name="Last trip average electric consumption",
icon="mdi:car-battery",
unit="kWh/100 km",
),
Sensor(
attr="trip_last_average_fuel_consumption",
name="Last trip average fuel consumption",
icon="mdi:fuel",
unit="l/100 km",
),
Sensor(
attr="trip_last_duration",
name="Last trip duration",
icon="mdi:clock",
unit="min",
),
Sensor(
attr="trip_last_length",
name="Last trip length",
icon="mdi:map-marker-distance",
unit="km",
),
Sensor(
attr="trip_last_recuperation",
name="Last trip recuperation",
icon="mdi:battery-plus",
unit="kWh/100 km",
),
Sensor(
attr="trip_last_average_recuperation",
name="Last trip average recuperation",
icon="mdi:battery-plus",
unit="kWh/100 km",
),
Sensor(
attr="trip_last_average_auxillary_consumption",
name="Last trip average auxillary consumption",
icon="mdi:flash",
unit="kWh/100 km",
),
Sensor(
attr="trip_last_average_aux_consumer_consumption",
name="Last trip average auxillary consumer consumption",
icon="mdi:flash",
unit="kWh/100 km",
),
Sensor(
attr="model_image_large",
name="Model image URL (Large)",
icon="mdi:file-image",
),
Sensor(
attr="model_image_small",
name="Model image URL (Small)",
icon="mdi:file-image",
),
Sensor(
attr="trip_last_total_electric_consumption",
name="Last trip total electric consumption",
icon="mdi:car-battery",
unit="kWh/100 km",
),
Sensor(
attr="pheater_status",
name="Parking Heater heating/ventilation status",
icon="mdi:radiator",
),
Sensor(
attr="pheater_duration",
name="Parking Heater heating/ventilation duration",
icon="mdi:timer",
unit="minutes",
),
Sensor(
attr="outside_temperature",
name="Outside temperature",
icon="mdi:thermometer",
unit="°C",
device_class="temperature"
),
Sensor(
attr="requests_remaining",
name="Requests remaining",
icon="mdi:chat-alert",
unit=""
),
BinarySensor(
attr="external_power",
name="External power",
device_class="power"
),
BinarySensor(
attr="energy_flow",
name="Energy flow",
device_class="power"
),
BinarySensor(
attr="parking_light",
name="Parking light",
device_class="light",
icon="mdi:car-parking-lights"
),
BinarySensor(
attr="door_locked",
name="Doors locked",
device_class="lock",
reverse_state=False
),
BinarySensor(
attr="door_closed_left_front",
name="Door closed left front",
device_class="door",
reverse_state=False,
icon="mdi:car-door"
),
BinarySensor(
attr="door_closed_right_front",
name="Door closed right front",
device_class="door",
reverse_state=False,
icon="mdi:car-door"
),
BinarySensor(
attr="door_closed_left_back",
name="Door closed left back",
device_class="door",
reverse_state=False,
icon="mdi:car-door"
),
BinarySensor(
attr="door_closed_right_back",
name="Door closed right back",
device_class="door",
reverse_state=False,
icon="mdi:car-door"
),
BinarySensor(
attr="trunk_locked",
name="Trunk locked",
device_class="lock",
reverse_state=False
),
BinarySensor(
attr="trunk_closed",
name="Trunk closed",
device_class="door",
reverse_state=False
),
BinarySensor(
attr="hood_closed",
name="Hood closed",
device_class="door",
reverse_state=False
),
BinarySensor(
attr="charging_cable_connected",
name="Charging cable connected",
device_class="plug",
reverse_state=False
),
BinarySensor(
attr="charging_cable_locked",
name="Charging cable locked",
device_class="lock",
reverse_state=False
),
BinarySensor(
attr="sunroof_closed",
name="Sunroof closed",
device_class="window",
reverse_state=False
),
BinarySensor(
attr="windows_closed",
name="Windows closed",
device_class="window",
reverse_state=False
),
BinarySensor(
attr="window_closed_left_front",
name="Window closed left front",
device_class="window",
reverse_state=False
),
BinarySensor(
attr="window_closed_left_back",
name="Window closed left back",
device_class="window",
reverse_state=False
),
BinarySensor(
attr="window_closed_right_front",
name="Window closed right front",
device_class="window",
reverse_state=False
),
BinarySensor(
attr="window_closed_right_back",
name="Window closed right back",
device_class="window",
reverse_state=False
),
BinarySensor(
attr="vehicle_moving",
name="Vehicle Moving",
device_class="moving"
),
BinarySensor(
attr="request_in_progress",
name="Request in progress",
device_class="connectivity"
),
BinarySensor(
attr="seat_heating_front_left",
name="Seat heating front left",
device_class="heat"
),
BinarySensor(
attr="seat_heating_front_right",
name="Seat heating front right",
device_class="heat"
),
BinarySensor(
attr="seat_heating_rear_left",
name="Seat heating rear left",
device_class="heat"
),
BinarySensor(
attr="seat_heating_rear_right",
name="Seat heating rear right",
device_class="heat"
),
BinarySensor(
attr="aircon_at_unlock",
name="Air-conditioning at unlock",
device_class=None
),
]
class Dashboard:
def __init__(self, vehicle, **config):
self._config = config
self.instruments = [
instrument
for instrument in create_instruments()
if instrument.setup(vehicle, **config)
]
_LOGGER.debug("Supported instruments: " + ", ".join(str(inst.attr) for inst in self.instruments))
| 28.148796 | 127 | 0.580068 |
ace4396b869de80fc56e6d91a6a1447f0e9c8c06 | 16,266 | py | Python | DataServer/DataServer.py | cdogemaru/geoip-attack-map | c55e7b9a0bcd9a944b45ee6c02150a383135a4a0 | [
"Apache-2.0"
] | 5 | 2021-07-11T17:30:07.000Z | 2022-01-31T10:31:03.000Z | DataServer/DataServer.py | cdogemaru/geoip-attack-map | c55e7b9a0bcd9a944b45ee6c02150a383135a4a0 | [
"Apache-2.0"
] | null | null | null | DataServer/DataServer.py | cdogemaru/geoip-attack-map | c55e7b9a0bcd9a944b45ee6c02150a383135a4a0 | [
"Apache-2.0"
] | 1 | 2021-04-12T11:53:17.000Z | 2021-04-12T11:53:17.000Z | #!/usr/bin/python3
"""
AUTHOR: Matthew May - mcmay.web@gmail.com
"""
# Imports
import json
#import logging
import maxminddb
#import re
import redis
import io
import geoip2.database
import pickle as pkl
from const import META, PORTMAP
import traceback
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from os import getuid
from sys import exit
#from textwrap import dedent
from time import gmtime, localtime, sleep, strftime
import math
# start the Redis server if it isn't started already.
# $ redis-server
# default port is 6379
# make sure system can use a lot of memory and overcommit memory
redis_ip = '127.0.0.1'
redis_instance = None
# required input paths
syslog_path = '/var/log/syslog'
#syslog_path = '/var/log/reverse-proxy.log'
db_path = '../DataServerDB/GeoLite2-City.mmdb'
asn_path= '../DataServerDB/GeoLite2-ASN.mmdb'
asn_geo_dict_path = "../DataServerDB/ASN_GEO_DICT.pkl"
# file to log data
#log_file_out = '/var/log/map_data_server.out'
# ip for headquarters
hq_ip = '8.8.8.8'
# stats
server_start_time = strftime("%d-%m-%Y %H:%M:%S", localtime()) # local time
event_count = 0
continents_tracked = {}
countries_tracked = {}
country_to_code = {}
ip_to_code = {}
ips_tracked = {}
unknowns = {}
# @IDEA
#---------------------------------------------------------
# Use a class to nicely wrap everything:
# Could attempt to do an access here
# now without worrying about key errors,
# or just keep the filled data structure
#
#class Instance(dict):
#
# defaults = {
# 'city': {'names':{'en':None}},
# 'continent': {'names':{'en':None}},
# 'continent': {'code':None},
# 'country': {'names':{'en':None}},
# 'country': {'iso_code':None},
# 'location': {'latitude':None},
# 'location': {'longitude':None},
# 'location': {'metro_code':None},
# 'postal': {'code':None}
# }
#
# def __init__(self, seed):
# self(seed)
# backfill()
#
# def backfill(self):
# for default in self.defaults:
# if default not in self:
# self[default] = defaults[default]
#---------------------------------------------------------
# Create clean dictionary using unclean db dictionary contents
def clean_db(unclean):
selected = {}
for tag in META:
head = None
if tag['tag'] in unclean:
head = unclean[tag['tag']]
for node in tag['path']:
if node in head:
head = head[node]
else:
head = None
break
selected[tag['lookup']] = head
return selected
def connect_redis(redis_ip):
r = redis.StrictRedis(host=redis_ip, port=6379, db=0)
return r
def get_msg_type():
# @TODO
# Add support for more message types later
return "Traffic"
# Check to see if packet is using an interesting TCP/UDP protocol based on source or destination port
def get_tcp_udp_proto(src_port, dst_port):
src_port = int(src_port)
dst_port = int(dst_port)
if src_port in PORTMAP:
return PORTMAP[src_port]
if dst_port in PORTMAP:
return PORTMAP[dst_port]
return "OTHER"
def find_hq_lat_long(hq_ip):
hq_ip_db_unclean = parse_maxminddb(db_path, hq_ip)
if hq_ip_db_unclean:
hq_ip_db_clean = clean_db(hq_ip_db_unclean)
dst_lat = hq_ip_db_clean['latitude']
dst_long = hq_ip_db_clean['longitude']
hq_dict = {
'dst_lat': dst_lat,
'dst_long': dst_long
}
return hq_dict
else:
print('Please provide a valid IP address for headquarters')
exit()
def parse_maxminddb(db_path, ip):
try:
reader = maxminddb.open_database(db_path)
response = reader.get(ip)
reader.close()
return response
except FileNotFoundError:
print('DB not found')
print('SHUTTING DOWN')
exit()
except ValueError:
return False
# @TODO
# Refactor/improve parsing
# This function depends heavily on which appliances are generating logs
# For now it is only here for testing
def parse_syslog(line):
line = line.strip()
line = line.split("^")
# print(line)
data = line[-1]
data = data.split('|')
if len(data) != 3:
print('NOT A VALID LOG')
return False
else:
try:
a_id = data[0]
a = json.loads(data[1])
a_info = json.loads(data[2])
except Exception:
traceback.print_exc()
print(data)
return False
###
# 需要用的信息
# type
# prefix
# attacker
# victim
# vp_info
###
time = a[1]
prefix = a[4]
# prefix = None
t = a_info["type"]
if t == 0:
victim = a_info["oldhomeas"]
attacker = a_info["newhomeas"]
elif t == 1:
bad_segment = a_info["bad_path_segment"].split(" ")
victim = bad_segment[1]
attacker = bad_segment[0]
else:
print('ATTACK TYPE NOT VALID')
return False
# normal_paths = set()
# abnormal_paths = set()
normal_paths = []
abnormal_paths = []
vp_info = a_info["vps"]
for vp in vp_info:
if vp["is_affected"] != 0:
abnormal_paths.append(vp["path"])
normal_paths.append(vp["before_path"])
# abnormal_paths.add(vp["path"])
# normal_paths.add(vp["before_path"])
# normal_paths = list(normal_paths)
# abnormal_paths = list(abnormal_paths)
data_dict = {
"timestamp" : time,
"index" : data[0],
"prefix": prefix,
"attacker": attacker,
"victim": victim,
"type": t,
"normal_paths": normal_paths,
"abnormal_paths": abnormal_paths
}
return data_dict
# ASNreader = maxminddb.open_database(asn_path)
cityreader = maxminddb.open_database(db_path)
f = open(asn_geo_dict_path, "rb")
asn_geo_dict = pkl.load(f)
def dist_func(p1, p2):
t = 0
t += (p1[0] - p2[0]) * (p1[0] - p2[0])
t += (p1[1] - p2[1]) * (p1[1] - p2[1])
t = math.sqrt(t)
return t
import ipaddress
import numpy as np
def get_geolocation(data_dict):
thresh = 10
global cityreader, asn_geo_dict
### origin location
prefix = data_dict["prefix"]
network = ipaddress.ip_network(prefix)
for i in network.hosts():
rand = np.random.uniform(0,1)
if rand > 0.05:
rand_host = i
break
### TODO: deal with empty response
response = cityreader.get(rand_host)
origin_country_name = response["registered_country"]["names"]["en"]
origin_country_code = response["registered_country"]["iso_code"]
origin_long = response["location"]["longitude"]
origin_lati = response["location"]["latitude"]
### attacker location
if not data_dict["attacker"] in asn_geo_dict:
print("Can't locate the attacker.")
return None
locations = asn_geo_dict[data_dict["attacker"]]
max_dist = 0
attacker_country_code = None
attacker_country_name = None
attacker_long = None
attacker_lati = None
last_node = (origin_long, origin_lati)
for location in locations:
cur_node = (location[0], location[1])
dist = (cur_node[0]-last_node[0])*(cur_node[0]-last_node[0]) + \
(cur_node[1]-last_node[1])*(cur_node[1]-last_node[1])
if dist > max_dist:
max_dist = dist
attacker_long = location[0]
attacker_lati = location[1]
attacker_country_name = location[2]
attacker_country_code = location[3]
if attacker_long is None or dist_func((attacker_long, attacker_lati), (origin_long, origin_lati)) < thresh:
print("Attacker too near to victim.")
return None
victim_node = (origin_long, origin_lati)
attacker_node = (attacker_long, attacker_lati)
print(data_dict["victim"], origin_country_code, origin_country_name, origin_long, origin_lati,
data_dict["attacker"], attacker_long, attacker_lati)
### convert the AS-path to long-lati path
normal_paths = data_dict["normal_paths"]
abnormal_paths = data_dict["abnormal_paths"]
normal_path_geos = []
abnormal_path_geos = []
for normal_path, abnormal_path in zip(normal_paths, abnormal_paths):
### path: from attacker/victim to vantage point
normal_path = list(reversed(normal_path.split(" ")))
abnormal_path = list(reversed(abnormal_path.split(" ")))
### path hijacking
if data_dict["type"] == 1:
abnormal_path = abnormal_path[1:]
attacker = data_dict["attacker"]
victim = data_dict["victim"]
if (normal_path[-1] != abnormal_path[-1]):
print("The vantage points of the normal_path and abnormal_path are different")
continue
vp = normal_path[-1]
if (vp not in asn_geo_dict):
print("VP not in asn_geo_dict")
continue
max_dist = 0
### deciding vp location
vp_node = None
locations = asn_geo_dict[vp]
for location in locations:
cur_node = (location[0], location[1])
d = dist_func(cur_node, victim_node) + dist_func(cur_node, attacker_node)
if d > max_dist:
vp_node = cur_node
max_dist = d
if dist_func(vp_node, victim_node) < thresh or dist_func(vp_node, victim_node) < thresh:
print("VP is too near to the attacker/victim.")
continue
### deciding remaining as locations in normal_path
normal_path_geo = [(origin_long, origin_lati)]
last_node = (origin_long, origin_lati)
t = None
for asn in normal_path[1:-1]:
### 如果asn_geo_dict中不存在这个AS,那么就直接跳过
if not asn in asn_geo_dict:
continue
min_dist=math.inf
locations=asn_geo_dict[asn]
# print(asn, locations)
for location in locations:
cur_node=(location[0], location[1])
dist= dist_func(cur_node, last_node)
if dist > 0 and dist < min_dist:
t=cur_node
min_dist = dist
if not t is None:
normal_path_geo.append(t)
last_node=t
t=None
normal_path_geo.append(vp_node)
normal_path_geos.append(normal_path_geo)
### deciding remaining as locations in abnormal_path
abnormal_path_geo = [(attacker_long, attacker_lati)]
last_node = (attacker_long, attacker_lati)
t = None
for asn in abnormal_path[1:-1]:
### 如果asn_geo_dict中不存在这个AS,那么就直接跳过
if not asn in asn_geo_dict:
continue
min_dist = math.inf
locations=asn_geo_dict[asn]
for location in locations:
cur_node=(location[0], location[1])
dist=dist_func(cur_node, last_node)
if dist > 0 and dist < min_dist:
t=cur_node
min_dist = dist
if not t is None:
abnormal_path_geo.append(t)
last_node=t
t=None
abnormal_path_geo.append(vp_node)
abnormal_path_geos.append(abnormal_path_geo)
### choose the paths to show
if len(abnormal_path_geos) > 3:
abnormal_path_geos = abnormal_path_geos[:3]
if len(normal_path_geos) > 3:
normal_path_geos = normal_path_geos[:3]
geo_dict = {
"victim_country_code": origin_country_code,
"victim_country_name": origin_country_name,
"attacker_country_code": attacker_country_code,
"attacker_country_name": attacker_country_name,
"normal_path_geos": normal_path_geos,
"abnormal_path_geos": abnormal_path_geos
}
return geo_dict
def shutdown_and_report_stats():
print('\nSHUTTING DOWN')
# Report stats tracked
print('\nREPORTING STATS...')
print('\nEvent Count: {}'.format(event_count)) # report event count
print('\nContinent Stats...') # report continents stats
for key in continents_tracked:
print('{}: {}'.format(key, continents_tracked[key]))
print('\nCountry Stats...') # report country stats
for country in countries_tracked:
print('{}: {}'.format(country, countries_tracked[country]))
print('\nCountries to iso_codes...')
for key in country_to_code:
print('{}: {}'.format(key, country_to_code[key]))
print('\nIP Stats...') # report IP stats
for ip in ips_tracked:
print('{}: {}'.format(ip, ips_tracked[ip]))
print('\nIPs to iso_codes...')
for key in ip_to_code:
print('{}: {}'.format(key, ip_to_code[key]))
print('\nUnknowns...')
for key in unknowns:
print('{}: {}'.format(key, unknowns[key]))
with open("./available_infos.json", "w") as f:
json.dump(available_infos, f)
exit()
def merge_dicts(*args):
super_dict = {}
for arg in args:
super_dict.update(arg)
return super_dict
def track_flags(super_dict, tracking_dict, key1, key2):
if key1 in super_dict:
if key2 in super_dict:
if key1 in tracking_dict:
return None
else:
tracking_dict[super_dict[key1]] = super_dict[key2]
else:
return None
else:
return None
def track_stats(super_dict, tracking_dict, key):
if key in super_dict:
node = super_dict[key]
if node in tracking_dict:
tracking_dict[node] += 1
else:
tracking_dict[node] = 1
else:
if key in unknowns:
unknowns[key] += 1
else:
unknowns[key] = 1
available_infos = {}
def main():
if getuid() != 0:
print('Please run this script as root')
print('SHUTTING DOWN')
exit()
global db_path, log_file_out, redis_ip, redis_instance, syslog_path, hq_ip
global continents_tracked, countries_tracked, ips_tracked, postal_codes_tracked, event_count, unknown, ip_to_code, country_to_code
#args = menu()
# Connect to Redis
redis_instance = connect_redis(redis_ip)
# Find HQ lat/long
hq_dict = find_hq_lat_long(hq_ip)
available_indexs = set()
# Follow/parse/format/publish syslog data
with io.open(syslog_path, "r", encoding='ISO-8859-1') as syslog_file:
syslog_file.readlines()
while True:
where = syslog_file.tell()
line = syslog_file.readline()
if not line:
sleep(.1)
syslog_file.seek(where)
else:
syslog_data_dict = parse_syslog(line)
if syslog_data_dict:
geo_dict = get_geolocation(syslog_data_dict)
if geo_dict is None:
continue
if len(geo_dict["normal_path_geos"]) == 0:
continue
super_dict = merge_dicts(
syslog_data_dict,
geo_dict
)
json_data = json.dumps(super_dict)
print(json_data)
redis_instance.publish('attack-map-production', json_data)
index = syslog_data_dict["index"]
if not index in available_infos:
available_infos[index] = super_dict
with open("./available_infos.json", "w") as f:
json.dump(available_infos, f)
# with open("./avalaible_indexs.txt", "w") as f:
# print(available_indexs)
# f.write(str(list(available_indexs)))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
shutdown_and_report_stats()
| 30.690566 | 134 | 0.579614 |
ace4399f3f4bc6790eee225646130a682fe0d94e | 12,317 | py | Python | acousticweighting.py | ol-MEGA/olMEGA_DataService_Tools | b064c519445becc5535066a4890a762b85eb939d | [
"BSD-3-Clause"
] | null | null | null | acousticweighting.py | ol-MEGA/olMEGA_DataService_Tools | b064c519445becc5535066a4890a762b85eb939d | [
"BSD-3-Clause"
] | null | null | null | acousticweighting.py | ol-MEGA/olMEGA_DataService_Tools | b064c519445becc5535066a4890a762b85eb939d | [
"BSD-3-Clause"
] | null | null | null | """
consists of functions to get a and c weighting information
(DIN EN 61672-1) in different forms:
as weighting values for a given frequency numpy array
as a weighting vector (numpy array) for typical fft proecessing
as a weighting vector (numpy array) for a third-octave filterbank
as a weighting vector (numpy array) for an octave filterbank
as the complex transfer function
as filter coefficients (to be used by lfilter)
"""
# Author: J. Bitzer @ Jade Hochschule (copyright owner)
# License: BSD 3-clause license (https://opensource.org/licenses/BSD-3-Clause)
# Date: 12.12.2021
# used sources: DIN 61672-1, octave toolbox by Christophe COUVREUR
# version 1.0 12.12.2021 first build
# 1.1 14.12.2021 added lower() to allow A and a
import math
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sp
def __get_frequency_constants():
"""helper function with mathematical definition of the design frequencies (f1 to f4) for a and c-weighting curves"""
fr = 1000 # section 5.4.6 in DIN 61672
fl = 10**1.5 # section 5.4.6 in DIN 61672
fh = 10**3.9 # section 5.4.6 in DIN 61672
D = math.sqrt(1/2) # section 5.4.6 in DIN 61672
b = (1/(1-D))*(fr**2 + fl**2 * fh**2/fr**2 - D*(fl**2 + fh**2) ) # eq 11 in DIN 61672
c = fl**2 * fh**2 # eq 12 in DIN 61672
f1 = ((-b - math.sqrt(b**2 - 4*c))*0.5)**0.5 # eq 9 in DIN 61672
f4 = ((-b + math.sqrt(b**2 - 4*c))*0.5)**0.5 # eq 10 in DIN 61672
fa = 10**2.45
f2 = ((3-math.sqrt(5))*0.5)*fa # eq 13 in DIN 61672
f3 = ((3+math.sqrt(5))*0.5)*fa # eq 14 in DIN 61672
return f1,f2,f3,f4
def transfermagnitude2minimumphase(magH):
'''Computes the minimum phase by a given magnitude of the transfer function by using the Hilbert transform'''
second_half = magH[-1:0:-1]
transfer_function = np.append(magH, second_half)
tf_length = len(transfer_function)
hilbert_in = np.log(np.abs(transfer_function))
hilbert_out = -sp.hilbert(hilbert_in)
phase = hilbert_out[:tf_length//2+1].imag
return phase
def FDLS_design(order_of_numerator, order_of_denominator, mag_h, phase_h, freq_vek, fs = -1):
if (len(freq_vek) <= 1):
omega = np.linspace(start = 0, stop = np.pi, num = len(mag_h))
else:
omega = 2*freq_vek/fs*np.pi
y = mag_h*np.cos(phase_h);
X_D = -mag_h * np.cos(-1 * omega + phase_h);
X_D = [X_D]
for k in range(order_of_denominator-1):
X_D.append(-mag_h * np.cos(-(k+2) * omega + phase_h))
X_D = np.array(X_D).T
# non recursive part
X_N = np.cos(-1 * omega);
X_N = [X_N]
for k in range(order_of_numerator-1):
X_N.append(np.cos(-(k+2) * omega))
X_N = np.array(X_N).T
#and define X as input matrix
X = np.hstack([X_D, np.ones([len(mag_h), 1]), X_N])
coeff = np.linalg.lstsq(X, y, rcond=None)
a = [1, *coeff[0][:order_of_denominator]] # ANN: einfach eine Liste statt hstack (* fügt Elemente ein, anstatt die ganze Liste)
b = coeff[0][order_of_denominator:order_of_denominator+order_of_numerator+1]
return b,a
def get_complex_tf_weighting(f_Hz, weight_func = 'a'):
"""returns the complex transfer function for a given frequency vector f_Hz"""
f1,f2,f3,f4 = __get_frequency_constants()
om_vek = 2*np.pi*1j*f_Hz
if weight_func.lower() == 'c':
c1000 = -0.062
cweight = ((4*np.pi**2*f4**2*om_vek**2)/((om_vek + 2*np.pi*f1)**2 *(om_vek + 2*np.pi*f4)**2))/(10**(c1000/20))
return cweight
if weight_func.lower() == 'a':
a1000 = -2
aweight = ((4*np.pi**2*f4**2 * om_vek**4)/((om_vek + 2*np.pi*f1)**2 *(om_vek + 2*np.pi*f2)*
(om_vek + 2*np.pi*f3)*(om_vek + 2*np.pi*f4)**2))/(10**(a1000/20))
return aweight
def get_weight_value(f_Hz, weight_func = 'a', return_mode = 'log'):
"""returns the weighting values for a given frequency vector f_Hz in dB(log) or linear, specified by return_mode (default = 'log')"""
f_Hz[f_Hz == 0] = 0.1 # prevent division by zero
f1,f2,f3,f4 = __get_frequency_constants()
if weight_func.lower() == 'c':
c1000 = -0.062
# eq 6 in DIN 61672
if return_mode == 'log':
cweight = 20*np.log10((f4**2*f_Hz**2)/((f_Hz**2 + f1**2)*(f_Hz**2 + f4**2))) - c1000
else:
cweight = ((f4**2*f_Hz**2)/((f_Hz**2 + f1**2)*(f_Hz**2 + f4**2)))/(10**(c1000/20))
return cweight
if weight_func.lower() == 'a':
a1000 = -2
if return_mode == 'log':
# eq 7 in DIN 61672
aweight = 20*np.log10((f4**2*f_Hz**4)/((f_Hz**2 + f1**2)*(f_Hz**2 + f2**2)**0.5*(f_Hz**2 + f3**2)**0.5*(f_Hz**2 + f4**2))) - a1000
else:
aweight = ((f4**2*f_Hz**4)/((f_Hz**2 + f1**2)*(f_Hz**2 + f2**2)**0.5*(f_Hz**2 + f3**2)**0.5*(f_Hz**2 + f4**2)))/(10**(a1000/20))
return aweight
def get_fftweight_vector(fft_size, fs, weight_func = 'a', return_mode = 'log'):
""" for a given fft_size the a or c weighting vector (fft_size/2 +1 elements from 0 to fs/2 Hz) is returned"""
freq_vek = np.linspace(0, fs/2, num = int(fft_size/2+1))
return get_weight_value(freq_vek, weight_func, return_mode), freq_vek
def get_onethirdweight_vector(weight_func = 'a'):
""" the weights and frequencies for a 34 band one-third filterbank (startfreq = 10, endfreq = 20k) are returned"""
a, freq_vek = get_spec(weight_func)
return get_weight_value(freq_vek, weight_func), freq_vek
def get_octaveweight_vector(weight_func = 'a'):
"""the weights and frequencies for a 11 band octave filterbank (startfreq = 16, endfreq = 16k) are returned """
freq_vek = np.array([16, 31.5, 63, 125, 250, 500, 1000, 2000, 4000, 8000, 16000])
return get_weight_value(freq_vek, weight_func), freq_vek
def get_weight_coefficients(fs, weight_func = 'a'):
"""designs an IIR filter that implements the desired weighting function for a given sampling rate fs
(fs > 40 kHz to fullfill class1 specification )
design idea is based on the matlab octave toolbox. better solutions for lower fs should be possible
see EOF for some ideas"""
if fs < 40000:
print("WARNING: the resulting filter coefficients will not fullfill the class1 constraint")
f1,f2,f3,f4 = __get_frequency_constants()
den = np.convolve([1., +4*np.pi * f4, (2*np.pi * f4)**2], [1., +4*np.pi * f1, (2*np.pi * f1)**2])
if weight_func.lower() == 'c' :
c1000 = -0.062
num = [(2*np.pi*f4)**2 * (10**(-c1000 / 20.0)), 0., 0.]
if weight_func.lower() == 'a':
a1000 = -2
den = np.convolve(np.convolve(den, [1., 2*np.pi * f3]),[1., 2*np.pi * f2])
num = [(2*np.pi*f4)**2 * (10**(-a1000 / 20.0)), 0., 0., 0., 0.]
b,a = sp.bilinear(num, den, fs)
return b,a
def get_spec(weight_func = 'a'):
""" returns the specification of a- and c-weighting function and the class1 limits
parameter:
weight_func: 'a' (default) or 'c'
returns:
tf_spec: a 34 element numpy-array the specification for the transfer function of the weighting curve in dB
f_reference: the corresponding vector with the reference frequencies
class_1_upperlimit: corresponding vector of the allowed deviation in dB (positive)
class_1_lowerlimit: corresponding vector of the allowed deviation in dB (negative dB, so you had to add this vector to tf_spec).
"""
# all values from table 2 in DIN 61672
fref = np.array([10, 12.5, 16, 20, 25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800,
1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000, 10000, 12500, 16000, 20000 ])
class1_upper_limit = np.array([3.5, 3.0,2.5,2.5, 2.5,2.0,1.5, 1.5,1.5,1.5, 1.5,1.5,1.5, 1.5,1.4,1.4, 1.4,1.4,1.4,
1.4,1.1,1.4, 1.6,1.6,1.6, 1.6,1.6,2.1, 2.1,2.1,2.6, 3,3.5,4 ])
class1_lower_limit = np.array([-np.inf, -np.inf,-4.5,-2.5, -2,-2,-1.5, -1.5,-1.5,-1.5, -1.5,-1.5,-1.5, -1.5,-1.4,-1.4,
-1.4,-1.4,-1.4, -1.4,-1.1,-1.4, -1.6,-1.6,-1.6, -1.6,-1.6,-2.1, -2.6,-3.1,-3.6, -6.0,-17.0,-np.inf ])
if (weight_func.lower() == 'a'):
a_desired = np.array([-70.4, -63.4, -56.7, -50.5, -44.7, -39.4, -34.6, -30.2, -26.2, -22.5, -19.1, -16.1, -13.4,
-10.9, -8.6, -6.6, -4.8, -3.2, -1.9, -.8, 0, .6, 1, 1.2, 1.3, 1.2, 1, .5, -.1, -1.1, -2.5, -4.3, -6.6, -9.3 ])
return a_desired, fref, class1_lower_limit, class1_upper_limit
if (weight_func.lower() == 'c'):
c_desired = np.array([ -14.3, -11.2, -8.5, -6.2, -4.4, -3.0, -2.0, -1.3, -.8, -.5, -.3, -.2, -.1, 0, 0, 0, 0, 0, 0,
0, 0, 0, -.1, -.2, -.3, -.5, -.8, -1.3, -2.0, -3.0, -4.4, -6.2, -8.5, -11.2 ])
return c_desired, fref, class1_lower_limit, class1_upper_limit
if __name__ == '__main__':
print('called as script')
weight_type = 'A'
# print(get_weight_value(np.array([10,100,1000]),weight_type))
a, f, ll, hl = get_spec(weight_type)
fig,ax = plt.subplots()
ax.plot(np.log(f),a, 'r')
ax.plot(np.log(f),a+hl,'g:')
ax.plot(np.log(f),a+ll, 'g:')
fs = 48000
afft,f_fft = get_fftweight_vector(1024,fs, weight_type)
ax.plot(np.log(f_fft),afft, 'y+')
b,a = get_weight_coefficients(fs,weight_type)
#print (b)
#print (a)
w, H = sp.freqz(b, a, worN =2048, fs = fs)
ax.plot(np.log(w[1:]),20*np.log10(np.abs(H[1:])),'k')
ax.set_ylim([-70.0, 5.0])
ax.set_xlim([np.log(10), np.log(20000)])
ax.set_xticks(np.log(f[:-1:4]))
ax.set_xticklabels((f[:-1:4]))
plt.show()
"""
an idea to design the a and c filter by FDLS. it does not work,
another better solution would be, wo use SOS filter and correct the tf at Nyquist.
next idea: use a better IIR arbitrary designb routine
#fft_size = 2*4096
#tf,f = get_fftweight_vector(fft_size,fs,weight_func,'lin')
#phase = transfermagnitude2minimumphase(np.sqrt(tf))
# log weighting for LS Design
#f_log = np.logspace(1.0,4.05, num = 150)
f_log = np.linspace(100,10000, num = 200)
# print(f_log)
#index_f = (np.round( (2*f_log/fs*(fft_size/2+1)))).astype(int)
#print(index_f)
tf = get_complex_tf_weighting(f_log, weight_func)
tf_mag = np.abs(tf)
tf_phase = np.angle(tf)
fig2,ax2 = plt.subplots(ncols=2)
ax2[0].plot(f_log,20*np.log10(tf_mag))
ax2[1].plot(f_log,np.unwrap(tf_phase))
#plt.show()
if weight_func == 'a':
order_num = 8
order_den = 8
else:
order_num = 4
order_den = 4
b,a = FDLS_design(order_num, order_den,tf_mag, np.unwrap(tf_phase), f_log, fs)
return b,a
"""
"""Copyright <2021> <Joerg Bitzer>
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" | 46.131086 | 755 | 0.621093 |
ace439a7a22feb6ba53ca16c322abff35e7d3664 | 112 | py | Python | mookfist_lled_controller/__init__.py | mookfist/mookfist-lled-controller | e13c3e4cf4e4d73ec55e30ba733bea5c56cd45f5 | [
"MIT"
] | null | null | null | mookfist_lled_controller/__init__.py | mookfist/mookfist-lled-controller | e13c3e4cf4e4d73ec55e30ba733bea5c56cd45f5 | [
"MIT"
] | 9 | 2017-04-18T17:24:52.000Z | 2018-12-27T23:13:29.000Z | mookfist_lled_controller/__init__.py | mookfist/mookfist-limitlessled-controller | e13c3e4cf4e4d73ec55e30ba733bea5c56cd45f5 | [
"MIT"
] | null | null | null | from .bridge import scan_bridges
from .bridge import create_bridge
__all__ = ['scan_bridges', 'create_bridge']
| 22.4 | 43 | 0.794643 |
ace439c7e0103525dc9c133537fafc9223405a3a | 23,391 | py | Python | google/appengine/ext/webapp/_webapp25.py | luduvigo/app-blog-code | 2de41573955f8dfb5f6d964d3cae2bbee6db2f9a | [
"Apache-2.0"
] | null | null | null | google/appengine/ext/webapp/_webapp25.py | luduvigo/app-blog-code | 2de41573955f8dfb5f6d964d3cae2bbee6db2f9a | [
"Apache-2.0"
] | null | null | null | google/appengine/ext/webapp/_webapp25.py | luduvigo/app-blog-code | 2de41573955f8dfb5f6d964d3cae2bbee6db2f9a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An extremely simple WSGI web application framework.
This module exports three primary classes: Request, Response, and
RequestHandler. You implement a web application by subclassing RequestHandler.
As WSGI requests come in, they are passed to instances of your RequestHandlers.
The RequestHandler class provides access to the easy-to-use Request and
Response objects so you can interpret the request and write the response with
no knowledge of the esoteric WSGI semantics. Here is a simple example:
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class MainPage(webapp.RequestHandler):
def get(self):
self.response.out.write(
'<html><body><form action="/hello" method="post">'
'Name: <input name="name" type="text" size="20"> '
'<input type="submit" value="Say Hello"></form></body></html>')
class HelloPage(webapp.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('Hello, %s' % self.request.get('name'))
application = webapp.WSGIApplication([
('/', MainPage),
('/hello', HelloPage)
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
The WSGIApplication class maps URI regular expressions to your RequestHandler
classes. It is a WSGI-compatible application object, so you can use it in
conjunction with wsgiref to make your web application into, e.g., a CGI
script or a simple HTTP server, as in the example above.
The framework does not support streaming output. All output from a response
is stored in memory before it is written.
"""
import cgi
import logging
import re
import StringIO
import sys
import traceback
import urlparse
import webob
import wsgiref.handlers
import wsgiref.headers
import wsgiref.util
wsgiref.handlers.BaseHandler.os_environ = {}
RE_FIND_GROUPS = re.compile('\(.*?\)')
_CHARSET_RE = re.compile(r';\s*charset=([^;\s]*)', re.I)
class Error(Exception):
"""Base of all exceptions in the webapp module."""
pass
class CannotReversePattern(Error):
"""Thrown when a url_pattern cannot be reversed."""
pass
class NoUrlFoundError(Error):
"""Thrown when RequestHandler.get_url() fails."""
pass
class Request(webob.Request):
"""Abstraction for an HTTP request.
Properties:
uri: the complete URI requested by the user
scheme: 'http' or 'https'
host: the host, including the port
path: the path up to the ';' or '?' in the URL
parameters: the part of the URL between the ';' and the '?', if any
query: the part of the URL after the '?'
You can access parsed query and POST values with the get() method; do not
parse the query string yourself.
"""
request_body_tempfile_limit = 0
uri = property(lambda self: self.url)
query = property(lambda self: self.query_string)
def __init__(self, environ):
"""Constructs a Request object from a WSGI environment.
If the charset isn't specified in the Content-Type header, defaults
to UTF-8.
Args:
environ: A WSGI-compliant environment dictionary.
"""
match = _CHARSET_RE.search(environ.get('CONTENT_TYPE', ''))
if match:
charset = match.group(1).lower()
else:
charset = 'utf-8'
webob.Request.__init__(self, environ, charset=charset,
unicode_errors= 'ignore', decode_param_names=True)
def get(self, argument_name, default_value='', allow_multiple=False):
"""Returns the query or POST argument with the given name.
We parse the query string and POST payload lazily, so this will be a
slower operation on the first call.
Args:
argument_name: the name of the query or POST argument
default_value: the value to return if the given argument is not present
allow_multiple: return a list of values with the given name (deprecated)
Returns:
If allow_multiple is False (which it is by default), we return the first
value with the given name given in the request. If it is True, we always
return a list.
"""
param_value = self.get_all(argument_name)
if allow_multiple:
logging.warning('allow_multiple is a deprecated param, please use the '
'Request.get_all() method instead.')
if len(param_value) > 0:
if allow_multiple:
return param_value
return param_value[0]
else:
if allow_multiple and not default_value:
return []
return default_value
def get_all(self, argument_name, default_value=None):
"""Returns a list of query or POST arguments with the given name.
We parse the query string and POST payload lazily, so this will be a
slower operation on the first call.
Args:
argument_name: the name of the query or POST argument
default_value: the value to return if the given argument is not present,
None may not be used as a default, if it is then an empty list will be
returned instead.
Returns:
A (possibly empty) list of values.
"""
if self.charset:
argument_name = argument_name.encode(self.charset)
if default_value is None:
default_value = []
param_value = self.params.getall(argument_name)
if param_value is None or len(param_value) == 0:
return default_value
for i in xrange(len(param_value)):
if isinstance(param_value[i], cgi.FieldStorage):
param_value[i] = param_value[i].value
return param_value
def arguments(self):
"""Returns a list of the arguments provided in the query and/or POST.
The return value is a list of strings.
"""
return list(set(self.params.keys()))
def get_range(self, name, min_value=None, max_value=None, default=0):
"""Parses the given int argument, limiting it to the given range.
Args:
name: the name of the argument
min_value: the minimum int value of the argument (if any)
max_value: the maximum int value of the argument (if any)
default: the default value of the argument if it is not given
Returns:
An int within the given range for the argument
"""
value = self.get(name, default)
if value is None:
return value
try:
value = int(value)
except ValueError:
value = default
if value is not None:
if max_value is not None:
value = min(value, max_value)
if min_value is not None:
value = max(value, min_value)
return value
class Response(object):
"""Abstraction for an HTTP response.
Properties:
out: file pointer for the output stream
headers: wsgiref.headers.Headers instance representing the output headers
"""
def __init__(self):
"""Constructs a response with the default settings."""
self.out = StringIO.StringIO()
self.__wsgi_headers = []
self.headers = wsgiref.headers.Headers(self.__wsgi_headers)
self.headers['Content-Type'] = 'text/html; charset=utf-8'
self.headers['Cache-Control'] = 'no-cache'
self.set_status(200)
@property
def status(self):
"""Returns current request status code."""
return self.__status[0]
@property
def status_message(self):
"""Returns current request status message."""
return self.__status[1]
def set_status(self, code, message=None):
"""Sets the HTTP status code of this response.
Args:
message: the HTTP status string to use
If no status string is given, we use the default from the HTTP/1.1
specification.
"""
if not message:
message = Response.http_status_message(code)
self.__status = (code, message)
def has_error(self):
"""Indicates whether the response was an error response."""
return self.__status[0] >= 400
def clear(self):
"""Clears all data written to the output stream so that it is empty."""
self.out.seek(0)
self.out.truncate(0)
def wsgi_write(self, start_response):
"""Writes this response using WSGI semantics with the given WSGI function.
Args:
start_response: the WSGI-compatible start_response function
"""
body = self.out.getvalue()
if isinstance(body, unicode):
body = body.encode('utf-8')
elif self.headers.get('Content-Type', '').endswith('; charset=utf-8'):
try:
body.decode('utf-8')
except UnicodeError, e:
logging.warning('Response written is not UTF-8: %s', e)
if (self.headers.get('Cache-Control') == 'no-cache' and
not self.headers.get('Expires')):
self.headers['Expires'] = 'Fri, 01 Jan 1990 00:00:00 GMT'
self.headers['Content-Length'] = str(len(body))
new_headers = []
for header, value in self.__wsgi_headers:
if not isinstance(value, basestring):
value = unicode(value)
if ('\n' in header or '\r' in header or
'\n' in value or '\r' in value):
logging.warning('Replacing newline in header: %s', repr((header,value)))
value = value.replace('\n','').replace('\r','')
header = header.replace('\n','').replace('\r','')
new_headers.append((header, value))
self.__wsgi_headers = new_headers
write = start_response('%d %s' % self.__status, self.__wsgi_headers)
write(body)
self.out.close()
def http_status_message(code):
"""Returns the default HTTP status message for the given code.
Args:
code: the HTTP code for which we want a message
"""
if not Response.__HTTP_STATUS_MESSAGES.has_key(code):
raise Error('Invalid HTTP status code: %d' % code)
return Response.__HTTP_STATUS_MESSAGES[code]
http_status_message = staticmethod(http_status_message)
__HTTP_STATUS_MESSAGES = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Moved Temporarily',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: 'Unused',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Time-out',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Large',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Time-out',
505: 'HTTP Version not supported'
}
class RequestHandler(object):
"""Our base HTTP request handler. Clients should subclass this class.
Subclasses should override get(), post(), head(), options(), etc to handle
different HTTP methods.
"""
def initialize(self, request, response):
"""Initializes this request handler with the given Request and Response."""
self.request = request
self.response = response
def get(self, *args):
"""Handler method for GET requests."""
self.error(405)
def post(self, *args):
"""Handler method for POST requests."""
self.error(405)
def head(self, *args):
"""Handler method for HEAD requests."""
self.error(405)
def options(self, *args):
"""Handler method for OPTIONS requests."""
self.error(405)
def put(self, *args):
"""Handler method for PUT requests."""
self.error(405)
def delete(self, *args):
"""Handler method for DELETE requests."""
self.error(405)
def trace(self, *args):
"""Handler method for TRACE requests."""
self.error(405)
def error(self, code):
"""Clears the response output stream and sets the given HTTP error code.
Args:
code: the HTTP status error code (e.g., 501)
"""
self.response.set_status(code)
self.response.clear()
def redirect(self, uri, permanent=False):
"""Issues an HTTP redirect to the given relative URL.
Args:
uri: a relative or absolute URI (e.g., '../flowers.html')
permanent: if true, we use a 301 redirect instead of a 302 redirect
"""
if permanent:
self.response.set_status(301)
else:
self.response.set_status(302)
absolute_url = urlparse.urljoin(self.request.uri, uri)
self.response.headers['Location'] = str(absolute_url)
self.response.clear()
def handle_exception(self, exception, debug_mode):
"""Called if this handler throws an exception during execution.
The default behavior is to call self.error(500) and print a stack trace
if debug_mode is True.
Args:
exception: the exception that was thrown
debug_mode: True if the web application is running in debug mode
"""
self.error(500)
logging.exception(exception)
if debug_mode:
lines = ''.join(traceback.format_exception(*sys.exc_info()))
self.response.clear()
self.response.out.write('<pre>%s</pre>' % (cgi.escape(lines, quote=True)))
@classmethod
def new_factory(cls, *args, **kwargs):
"""Create new request handler factory.
Use factory method to create reusable request handlers that just
require a few configuration parameters to construct. Also useful
for injecting shared state between multiple request handler
instances without relying on global variables. For example, to
create a set of post handlers that will do simple text transformations
you can write:
class ChangeTextHandler(webapp.RequestHandler):
def __init__(self, transform):
self.transform = transform
def post(self):
response_text = self.transform(
self.request.request.body_file.getvalue())
self.response.out.write(response_text)
application = webapp.WSGIApplication(
[('/to_lower', ChangeTextHandler.new_factory(str.lower)),
('/to_upper', ChangeTextHandler.new_factory(str.upper)),
],
debug=True)
Text POSTed to /to_lower will be lower cased.
Text POSTed to /to_upper will be upper cased.
"""
def new_instance():
return cls(*args, **kwargs)
new_instance.__name__ = cls.__name__ + 'Factory'
return new_instance
@classmethod
def get_url(cls, *args, **kargs):
"""Returns the url for the given handler.
The default implementation uses the patterns passed to the active
WSGIApplication to create a url. However, it is different from Django's
urlresolvers.reverse() in the following ways:
- It does not try to resolve handlers via module loading
- It does not support named arguments
- It performs some post-prosessing on the url to remove some regex
operators.
- It will try to fill in the left-most missing arguments with the args
used in the active request.
Args:
args: Parameters for the url pattern's groups.
kwargs: Optionally contains 'implicit_args' that can either be a boolean
or a tuple. When it is True, it will use the arguments to the
active request as implicit arguments. When it is False (default),
it will not use any implicit arguments. When it is a tuple, it
will use the tuple as the implicit arguments.
the left-most args if some are missing from args.
Returns:
The url for this handler/args combination.
Raises:
NoUrlFoundError: No url pattern for this handler has the same
number of args that were passed in.
"""
app = WSGIApplication.active_instance
pattern_map = app._pattern_map
implicit_args = kargs.get('implicit_args', ())
if implicit_args == True:
implicit_args = app.current_request_args
min_params = len(args)
for pattern_tuple in pattern_map.get(cls, ()):
num_params_in_pattern = pattern_tuple[1]
if num_params_in_pattern < min_params:
continue
try:
num_implicit_args = max(0, num_params_in_pattern - len(args))
merged_args = implicit_args[:num_implicit_args] + args
url = _reverse_url_pattern(pattern_tuple[0], *merged_args)
url = url.replace('\\', '')
url = url.replace('?', '')
return url
except CannotReversePattern:
continue
logging.warning('get_url failed for Handler name: %r, Args: %r',
cls.__name__, args)
raise NoUrlFoundError
def _reverse_url_pattern(url_pattern, *args):
"""Turns a regex that matches a url back into a url by replacing
the url pattern's groups with the given args. Removes '^' and '$'
from the result.
Args:
url_pattern: A pattern used to match a URL.
args: list of values corresponding to groups in url_pattern.
Returns:
A string with url_pattern's groups filled in values from args.
Raises:
CannotReversePattern if either there aren't enough args to fill
url_pattern's groups, or if any arg isn't matched by the regular
expression fragment in the corresponding group.
"""
group_index = [0]
def expand_group(match):
group = match.group(1)
try:
value = str(args[group_index[0]])
group_index[0] += 1
except IndexError:
raise CannotReversePattern('Not enough arguments in url tag')
if not re.match(group + '$', value):
raise CannotReversePattern("Value %r doesn't match (%r)" % (value, group))
return value
result = re.sub(r'\(([^)]+)\)', expand_group, url_pattern.pattern)
result = result.replace('^', '')
result = result.replace('$', '')
return result
class RedirectHandler(RequestHandler):
"""Simple redirection handler.
Easily configure URLs to redirect to alternate targets. For example,
to configure a web application so that the root URL is always redirected
to the /home path, do:
application = webapp.WSGIApplication(
[('/', webapp.RedirectHandler.new_factory('/home', permanent=True)),
('/home', HomeHandler),
],
debug=True)
Handler also useful for setting up obsolete URLs to redirect to new paths.
"""
def __init__(self, path, permanent=False):
"""Constructor.
Do not use directly. Configure using new_factory method.
Args:
path: Path to redirect to.
permanent: if true, we use a 301 redirect instead of a 302 redirect.
"""
self.path = path
self.permanent = permanent
def get(self):
self.redirect(self.path, permanent=self.permanent)
class WSGIApplication(object):
"""Wraps a set of webapp RequestHandlers in a WSGI-compatible application.
To use this class, pass a list of (URI regular expression, RequestHandler)
pairs to the constructor, and pass the class instance to a WSGI handler.
See the example in the module comments for details.
The URL mapping is first-match based on the list ordering.
"""
REQUEST_CLASS = Request
RESPONSE_CLASS = Response
def __init__(self, url_mapping, debug=False):
"""Initializes this application with the given URL mapping.
Args:
url_mapping: list of (URI regular expression, RequestHandler) pairs
(e.g., [('/', ReqHan)])
debug: if true, we send Python stack traces to the browser on errors
"""
self._init_url_mappings(url_mapping)
self.__debug = debug
WSGIApplication.active_instance = self
self.current_request_args = ()
def __call__(self, environ, start_response):
"""Called by WSGI when a request comes in."""
request = self.REQUEST_CLASS(environ)
response = self.RESPONSE_CLASS()
WSGIApplication.active_instance = self
handler = None
groups = ()
for regexp, handler_class in self._url_mapping:
match = regexp.match(request.path)
if match:
try:
handler = handler_class()
handler.initialize(request, response)
except Exception, e:
if handler is None:
handler = RequestHandler()
handler.response = response
handler.handle_exception(e, self.__debug)
response.wsgi_write(start_response)
return ['']
groups = match.groups()
break
self.current_request_args = groups
if handler:
try:
method = environ['REQUEST_METHOD']
if method == 'GET':
handler.get(*groups)
elif method == 'POST':
handler.post(*groups)
elif method == 'HEAD':
handler.head(*groups)
elif method == 'OPTIONS':
handler.options(*groups)
elif method == 'PUT':
handler.put(*groups)
elif method == 'DELETE':
handler.delete(*groups)
elif method == 'TRACE':
handler.trace(*groups)
else:
handler.error(501)
except Exception, e:
handler.handle_exception(e, self.__debug)
else:
response.set_status(404)
response.wsgi_write(start_response)
return ['']
def _init_url_mappings(self, handler_tuples):
"""Initializes the maps needed for mapping urls to handlers and handlers
to urls.
Args:
handler_tuples: list of (URI, RequestHandler) pairs.
"""
handler_map = {}
pattern_map = {}
url_mapping = []
for regexp, handler in handler_tuples:
try:
handler_name = handler.__name__
except AttributeError:
pass
else:
handler_map[handler_name] = handler
if not regexp.startswith('^'):
regexp = '^' + regexp
if not regexp.endswith('$'):
regexp += '$'
if regexp == '^/form$':
logging.warning('The URL "/form" is reserved and will not be matched.')
compiled = re.compile(regexp)
url_mapping.append((compiled, handler))
num_groups = len(RE_FIND_GROUPS.findall(regexp))
handler_patterns = pattern_map.setdefault(handler, [])
handler_patterns.append((compiled, num_groups))
self._handler_map = handler_map
self._pattern_map = pattern_map
self._url_mapping = url_mapping
def get_registered_handler_by_name(self, handler_name):
"""Returns the handler given the handler's name.
This uses the application's url mapping.
Args:
handler_name: The __name__ of a handler to return.
Returns:
The handler with the given name.
Raises:
KeyError: If the handler name is not found in the parent application.
"""
try:
return self._handler_map[handler_name]
except:
logging.error('Handler does not map to any urls: %s', handler_name)
raise
| 29.165835 | 80 | 0.667265 |
ace43a530afb1e55771ba4ce482cd087325ebdcb | 5,105 | py | Python | release/stubs.min/Autodesk/Revit/DB/__init___parts/RenderingSettings.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Autodesk/Revit/DB/__init___parts/RenderingSettings.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Autodesk/Revit/DB/__init___parts/RenderingSettings.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | class RenderingSettings(object, IDisposable):
""" Represents the rendering settings for a 3d view. """
def Dispose(self):
""" Dispose(self: RenderingSettings) """
pass
def GetBackgroundSettings(self):
"""
GetBackgroundSettings(self: RenderingSettings) -> BackgroundSettings
Returns an object that represents the rendering background settings.
Returns: The rendering background settings.
"""
pass
def GetRenderingImageExposureSettings(self):
"""
GetRenderingImageExposureSettings(self: RenderingSettings) -> RenderingImageExposureSettings
Returns an object that represents the rendering image exposure settings.
Returns: The rendering image exposure settings.
"""
pass
def GetRenderingQualitySettings(self):
"""
GetRenderingQualitySettings(self: RenderingSettings) -> RenderingQualitySettings
Returns an object that represents the rendering quality settings.
Returns: The rendering quality settings.
"""
pass
def GetRenderingRegionOutline(self):
"""
GetRenderingRegionOutline(self: RenderingSettings) -> Outline
Returns the outline of the rendering region.
Returns: The outline of the rendering region.
"""
pass
def ReleaseUnmanagedResources(self, *args):
""" ReleaseUnmanagedResources(self: RenderingSettings,disposing: bool) """
pass
def SetBackgroundSettings(self, background):
"""
SetBackgroundSettings(self: RenderingSettings,background: BackgroundSettings)
Changes the rendering background settings details for the current background
style.
background: An instance of the new rendering background settings.
"""
pass
def SetRenderingImageExposureSettings(self, exposure):
"""
SetRenderingImageExposureSettings(self: RenderingSettings,exposure: RenderingImageExposureSettings)
Changes the rendering image exposure settings.
exposure: An instance of the new rendering image exposure settings.
"""
pass
def SetRenderingQualitySettings(self, settings):
"""
SetRenderingQualitySettings(self: RenderingSettings,settings: RenderingQualitySettings)
Change rendering quality settings.
settings: An instance of the new rendering quality settings.
"""
pass
def __enter__(self, *args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self, *args):
""" __repr__(self: object) -> str """
pass
BackgroundStyle = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The enum value that controls the background style for rendering.
Get: BackgroundStyle(self: RenderingSettings) -> BackgroundStyle
Set: BackgroundStyle(self: RenderingSettings)=value
"""
IsValidObject = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: RenderingSettings) -> bool
"""
LightingSource = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The lighting scheme type.
Get: LightingSource(self: RenderingSettings) -> LightingSource
Set: LightingSource(self: RenderingSettings)=value
"""
PrinterResolution = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The resolution level when using printer.
Get: PrinterResolution(self: RenderingSettings) -> PrinterResolution
Set: PrinterResolution(self: RenderingSettings)=value
"""
ResolutionTarget = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The resolution target.
Get: ResolutionTarget(self: RenderingSettings) -> ResolutionTarget
Set: ResolutionTarget(self: RenderingSettings)=value
"""
ResolutionValue = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The rendering resolution in dots per inch (DPI).
Get: ResolutionValue(self: RenderingSettings) -> int
"""
UsesRegionRendering = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The bool value that indicates whether to use region rendering.
Get: UsesRegionRendering(self: RenderingSettings) -> bool
Set: UsesRegionRendering(self: RenderingSettings)=value
"""
| 23.634259 | 221 | 0.663663 |
ace43a6ffd0b5958a0993442eb1a7d3d7bc44c45 | 17,295 | py | Python | gevent/pool.py | pubnub/gevent | e2ec8bb71fcccd01cee76fa97ca22bd467741a8b | [
"MIT"
] | 1 | 2020-03-21T05:34:18.000Z | 2020-03-21T05:34:18.000Z | gevent/pool.py | pubnub/gevent | e2ec8bb71fcccd01cee76fa97ca22bd467741a8b | [
"MIT"
] | null | null | null | gevent/pool.py | pubnub/gevent | e2ec8bb71fcccd01cee76fa97ca22bd467741a8b | [
"MIT"
] | 1 | 2021-01-13T11:20:12.000Z | 2021-01-13T11:20:12.000Z | # Copyright (c) 2009-2011 Denis Bilenko. See LICENSE for details.
"""
Managing greenlets in a group.
The :class:`Group` class in this module abstracts a group of running
greenlets. When a greenlet dies, it's automatically removed from the
group. All running greenlets in a group can be waited on with
:meth:`Group.joinall`, or all running greenlets can be killed with
:meth:`Group.kill`.
The :class:`Pool` class, which is a subclass of :class:`Group`,
provides a way to limit concurrency: its :meth:`spawn <Pool.spawn>`
method blocks if the number of greenlets in the pool has already
reached the limit, until there is a free slot.
"""
from bisect import insort_right
try:
from itertools import izip
except ImportError:
# Python 3
izip = zip
from gevent.hub import GreenletExit, getcurrent, kill as _kill
from gevent.greenlet import joinall, Greenlet
from gevent.timeout import Timeout
from gevent.event import Event
from gevent.lock import Semaphore, DummySemaphore
__all__ = ['Group', 'Pool']
class IMapUnordered(Greenlet):
_zipped = False
def __init__(self, func, iterable, spawn=None, _zipped=False):
from gevent.queue import Queue
Greenlet.__init__(self)
if spawn is not None:
self.spawn = spawn
if _zipped:
self._zipped = _zipped
self.func = func
self.iterable = iterable
self.queue = Queue()
self.count = 0
self.finished = False
self.rawlink(self._on_finish)
def __iter__(self):
return self
def next(self):
value = self._inext()
if isinstance(value, Failure):
raise value.exc
return value
__next__ = next
def _inext(self):
return self.queue.get()
def _ispawn(self, func, item):
self.count += 1
g = self.spawn(func, item) if not self._zipped else self.spawn(func, *item)
g.rawlink(self._on_result)
return g
def _run(self):
try:
func = self.func
for item in self.iterable:
self._ispawn(func, item)
finally:
self.__dict__.pop('spawn', None)
self.__dict__.pop('func', None)
self.__dict__.pop('iterable', None)
def _on_result(self, greenlet):
self.count -= 1
if greenlet.successful():
self.queue.put(self._iqueue_value_for_success(greenlet))
else:
self.queue.put(self._iqueue_value_for_failure(greenlet))
if self.ready() and self.count <= 0 and not self.finished:
self.queue.put(self._iqueue_value_for_finished())
self.finished = True
def _on_finish(self, _self):
if self.finished:
return
if not self.successful():
self.queue.put(self._iqueue_value_for_self_failure())
self.finished = True
return
if self.count <= 0:
self.queue.put(self._iqueue_value_for_finished())
self.finished = True
def _iqueue_value_for_success(self, greenlet):
return greenlet.value
def _iqueue_value_for_failure(self, greenlet):
return Failure(greenlet.exception, getattr(greenlet, '_raise_exception'))
def _iqueue_value_for_finished(self):
return Failure(StopIteration)
def _iqueue_value_for_self_failure(self):
return Failure(self.exception, self._raise_exception)
class IMap(IMapUnordered):
# A specialization of IMapUnordered that returns items
# in the order in which they were generated, not
# the order in which they finish.
# We do this by storing tuples (order, value) in the queue
# not just value.
def __init__(self, func, iterable, spawn=None, _zipped=False):
self.waiting = [] # QQQ maybe deque will work faster there?
self.index = 0
self.maxindex = -1
IMapUnordered.__init__(self, func, iterable, spawn, _zipped)
def _inext(self):
while True:
if self.waiting and self.waiting[0][0] <= self.index:
_, value = self.waiting.pop(0)
else:
index, value = self.queue.get()
if index > self.index:
insort_right(self.waiting, (index, value))
continue
self.index += 1
return value
def _ispawn(self, func, item):
g = IMapUnordered._ispawn(self, func, item)
self.maxindex += 1
g.index = self.maxindex
return g
def _iqueue_value_for_success(self, greenlet):
return (greenlet.index, IMapUnordered._iqueue_value_for_success(self, greenlet))
def _iqueue_value_for_failure(self, greenlet):
return (greenlet.index, IMapUnordered._iqueue_value_for_failure(self, greenlet))
def _iqueue_value_for_finished(self):
self.maxindex += 1
return (self.maxindex, IMapUnordered._iqueue_value_for_finished(self))
def _iqueue_value_for_self_failure(self):
self.maxindex += 1
return (self.maxindex, IMapUnordered._iqueue_value_for_self_failure(self))
class GroupMappingMixin(object):
# Internal, non-public API class.
# Provides mixin methods for implementing mapping pools. Subclasses must define:
# - self.spawn(func, *args, **kwargs): a function that runs `func` with `args`
# and `awargs`, potentially asynchronously. Return a value with a `get` method that
# blocks until the results of func are available
# - self._apply_immediately(): should the function passed to apply be called immediately,
# synchronously?
# - self._apply_async_use_greenlet(): Should apply_async directly call
# Greenlet.spawn(), bypassing self.spawn? Return true when self.spawn would block
# - self._apply_async_cb_spawn(callback, result): Run the given callback function, possiblly
# asynchronously, possibly synchronously.
def apply_cb(self, func, args=None, kwds=None, callback=None):
result = self.apply(func, args, kwds)
if callback is not None:
self._apply_async_cb_spawn(callback, result)
return result
def apply_async(self, func, args=None, kwds=None, callback=None):
"""A variant of the apply() method which returns a Greenlet object.
If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready
callback is applied to it (unless the call failed)."""
if args is None:
args = ()
if kwds is None:
kwds = {}
if self._apply_async_use_greenlet():
# cannot call spawn() directly because it will block
return Greenlet.spawn(self.apply_cb, func, args, kwds, callback)
greenlet = self.spawn(func, *args, **kwds)
if callback is not None:
greenlet.link(pass_value(callback))
return greenlet
def apply(self, func, args=None, kwds=None):
"""
Rough quivalent of the :func:`apply()` builtin function blocking until
the result is ready and returning it.
The ``func`` will *usually*, but not *always*, be run in a way
that allows the current greenlet to switch out (for example,
in a new greenlet or thread, depending on implementation). But
if the current greenlet or thread is already one that was
spawned by this pool, the pool may choose to immediately run
the `func` synchronously.
"""
if args is None:
args = ()
if kwds is None:
kwds = {}
if self._apply_immediately():
return func(*args, **kwds)
else:
return self.spawn(func, *args, **kwds).get()
def map(self, func, iterable):
"""Return a list made by applying the *func* to each element of
the iterable.
.. seealso:: :meth:`imap`
"""
return list(self.imap(func, iterable))
def map_cb(self, func, iterable, callback=None):
result = self.map(func, iterable)
if callback is not None:
callback(result)
return result
def map_async(self, func, iterable, callback=None):
"""
A variant of the map() method which returns a Greenlet object that is executing
the map function.
If callback is specified then it should be a callable which accepts a
single argument.
"""
return Greenlet.spawn(self.map_cb, func, iterable, callback)
def imap(self, func, *iterables):
"""An equivalent of itertools.imap()"""
return IMap.spawn(func, izip(*iterables), spawn=self.spawn,
_zipped=True)
def imap_unordered(self, func, *iterables):
"""The same as imap() except that the ordering of the results from the
returned iterator should be considered in arbitrary order."""
return IMapUnordered.spawn(func, izip(*iterables), spawn=self.spawn,
_zipped=True)
class Group(GroupMappingMixin):
"""Maintain a group of greenlets that are still running.
Links to each item and removes it upon notification.
"""
greenlet_class = Greenlet
def __init__(self, *args):
assert len(args) <= 1, args
self.greenlets = set(*args)
if args:
for greenlet in args[0]:
greenlet.rawlink(self._discard)
# each item we kill we place in dying, to avoid killing the same greenlet twice
self.dying = set()
self._empty_event = Event()
self._empty_event.set()
def __repr__(self):
return '<%s at 0x%x %s>' % (self.__class__.__name__, id(self), self.greenlets)
def __len__(self):
return len(self.greenlets)
def __contains__(self, item):
return item in self.greenlets
def __iter__(self):
return iter(self.greenlets)
def add(self, greenlet):
try:
rawlink = greenlet.rawlink
except AttributeError:
pass # non-Greenlet greenlet, like MAIN
else:
rawlink(self._discard)
self.greenlets.add(greenlet)
self._empty_event.clear()
def _discard(self, greenlet):
self.greenlets.discard(greenlet)
self.dying.discard(greenlet)
if not self.greenlets:
self._empty_event.set()
def discard(self, greenlet):
self._discard(greenlet)
try:
unlink = greenlet.unlink
except AttributeError:
pass # non-Greenlet greenlet, like MAIN
else:
unlink(self._discard)
def start(self, greenlet):
"""
Start the un-started *greenlet* and add it to the collection of greenlets
this group is monitoring.
"""
self.add(greenlet)
greenlet.start()
def spawn(self, *args, **kwargs):
"""
Begin a new greenlet with the given arguments (which are passed
to the greenlet constructor) and add it to the collection of greenlets
this group is monitoring.
:return: The newly started greenlet.
"""
greenlet = self.greenlet_class(*args, **kwargs)
self.start(greenlet)
return greenlet
# def close(self):
# """Prevents any more tasks from being submitted to the pool"""
# self.add = RaiseException("This %s has been closed" % self.__class__.__name__)
def join(self, timeout=None, raise_error=False):
if raise_error:
greenlets = self.greenlets.copy()
self._empty_event.wait(timeout=timeout)
for greenlet in greenlets:
if greenlet.exception is not None:
if hasattr(greenlet, '_raise_exception'):
greenlet._raise_exception()
raise greenlet.exception
else:
self._empty_event.wait(timeout=timeout)
def kill(self, exception=GreenletExit, block=True, timeout=None):
timer = Timeout.start_new(timeout)
try:
try:
while self.greenlets:
for greenlet in list(self.greenlets):
if greenlet not in self.dying:
try:
kill = greenlet.kill
except AttributeError:
_kill(greenlet, exception)
else:
kill(exception, block=False)
self.dying.add(greenlet)
if not block:
break
joinall(self.greenlets)
except Timeout as ex:
if ex is not timer:
raise
finally:
timer.cancel()
def killone(self, greenlet, exception=GreenletExit, block=True, timeout=None):
if greenlet not in self.dying and greenlet in self.greenlets:
greenlet.kill(exception, block=False)
self.dying.add(greenlet)
if block:
greenlet.join(timeout)
def full(self):
return False
def wait_available(self):
pass
# MappingMixin methods
def _apply_immediately(self):
# If apply() is called from one of our own
# worker greenlets, don't spawn a new one
return getcurrent() in self
def _apply_async_cb_spawn(self, callback, result):
Greenlet.spawn(callback, result)
def _apply_async_use_greenlet(self):
return self.full() # cannot call self.spawn() because it will block
class Failure(object):
__slots__ = ['exc', '_raise_exception']
def __init__(self, exc, raise_exception=None):
self.exc = exc
self._raise_exception = raise_exception
def raise_exc(self):
if self._raise_exception:
self._raise_exception()
else:
raise self.exc
class Pool(Group):
def __init__(self, size=None, greenlet_class=None):
"""
Create a new pool.
A pool is like a group, but the maximum number of members
is governed by the *size* parameter.
:keyword int size: If given, this non-negative integer is the
maximum count of active greenlets that will be allowed in
this pool. A few values have special significance:
* ``None`` (the default) places no limit on the number of
greenlets. This is useful when you need to track, but not limit,
greenlets, as with :class:`gevent.pywsgi.WSGIServer`
* ``0`` creates a pool that can never have any active greenlets. Attempting
to spawn in this pool will block forever. This is only useful
if an application uses :meth:`wait_available` with a timeout and checks
:meth:`free_count` before attempting to spawn.
"""
if size is not None and size < 0:
raise ValueError('size must not be negative: %r' % (size, ))
Group.__init__(self)
self.size = size
if greenlet_class is not None:
self.greenlet_class = greenlet_class
if size is None:
self._semaphore = DummySemaphore()
else:
self._semaphore = Semaphore(size)
def wait_available(self, timeout=None):
"""
Wait until it's possible to spawn a greenlet in this pool.
:param float timeout: If given, only wait the specified number
of seconds.
.. warning:: If the pool was initialized with a size of 0, this
method will block forever unless a timeout is given.
:return: A number indicating how many new greenlets can be put into
the pool without blocking.
.. versionchanged:: 1.1a3
Added the ``timeout`` parameter.
"""
return self._semaphore.wait(timeout=timeout)
def full(self):
"""
Return a boolean indicating whether this pool has any room for
members. (True if it does, False if it doesn't.)
"""
return self.free_count() <= 0
def free_count(self):
"""
Return a number indicating approximately how many more members
can be added to this pool.
"""
if self.size is None:
return 1
return max(0, self.size - len(self))
def add(self, greenlet):
self._semaphore.acquire()
try:
Group.add(self, greenlet)
except:
self._semaphore.release()
raise
def _discard(self, greenlet):
Group._discard(self, greenlet)
self._semaphore.release()
class pass_value(object):
__slots__ = ['callback']
def __init__(self, callback):
self.callback = callback
def __call__(self, source):
if source.successful():
self.callback(source.value)
def __hash__(self):
return hash(self.callback)
def __eq__(self, other):
return self.callback == getattr(other, 'callback', other)
def __str__(self):
return str(self.callback)
def __repr__(self):
return repr(self.callback)
def __getattr__(self, item):
assert item != 'callback'
return getattr(self.callback, item)
| 33.259615 | 124 | 0.61272 |
ace43ab0e6fef861e55a72e7c09241eac5e6a959 | 63,824 | py | Python | furl/furl.py | brilliantorg/furl | d0bee9a27d7f432b047194a94f64cd4ff0319f6a | [
"Unlicense"
] | 1 | 2021-04-29T08:03:34.000Z | 2021-04-29T08:03:34.000Z | furl/furl.py | brilliantorg/furl | d0bee9a27d7f432b047194a94f64cd4ff0319f6a | [
"Unlicense"
] | null | null | null | furl/furl.py | brilliantorg/furl | d0bee9a27d7f432b047194a94f64cd4ff0319f6a | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
#
# furl - URL manipulation made simple.
#
# Ansgar Grunseid
# grunseid.com
# grunseid@gmail.com
#
# License: Build Amazing Things (Unlicense)
#
import re
import abc
import warnings
from copy import deepcopy
from posixpath import normpath
import six
from six.moves import urllib
from six.moves.urllib.parse import quote, unquote
try:
from icecream import ic
except ImportError: # Graceful fallback if IceCream isn't installed.
ic = lambda *a: None if not a else (a[0] if len(a) == 1 else a) # noqa
from .omdict1D import omdict1D
from .compat import string_types, UnicodeMixin
from .common import (
callable_attr, is_iterable_but_not_string, absent as _absent)
# Map of common protocols, as suggested by the common protocols included in
# urllib/parse.py, to their default ports. Protocol scheme strings are
# lowercase.
#
# TODO(Ans): Is there a public map of schemes to their default ports? If not,
# create one? Best I (Ansgar) could find is
#
# https://gist.github.com/mahmoud/2fe281a8daaff26cfe9c15d2c5bf5c8b
#
DEFAULT_PORTS = {
'acap': 674,
'afp': 548,
'dict': 2628,
'dns': 53,
'ftp': 21,
'git': 9418,
'gopher': 70,
'hdl': 2641,
'http': 80,
'https': 443,
'imap': 143,
'ipp': 631,
'ipps': 631,
'irc': 194,
'ircs': 6697,
'ldap': 389,
'ldaps': 636,
'mms': 1755,
'msrp': 2855,
'mtqp': 1038,
'nfs': 111,
'nntp': 119,
'nntps': 563,
'pop': 110,
'prospero': 1525,
'redis': 6379,
'rsync': 873,
'rtsp': 554,
'rtsps': 322,
'rtspu': 5005,
'sftp': 22,
'sip': 5060,
'sips': 5061,
'smb': 445,
'snews': 563,
'snmp': 161,
'ssh': 22,
'svn': 3690,
'telnet': 23,
'tftp': 69,
'ventrilo': 3784,
'vnc': 5900,
'wais': 210,
'ws': 80,
'wss': 443,
'xmpp': 5222,
}
def lget(lst, index, default=None):
try:
return lst[index]
except IndexError:
return default
def attemptstr(o):
try:
return str(o)
except Exception:
return o
def utf8(o, default=_absent):
try:
return o.encode('utf8')
except Exception:
return o if default is _absent else default
def non_string_iterable(o):
return callable_attr(o, '__iter__') and not isinstance(o, string_types)
# TODO(grun): Support IDNA2008 via the third party idna module. See
# https://github.com/gruns/furl/issues/73.
def idna_encode(o):
if callable_attr(o, 'encode'):
return str(o.encode('idna').decode('utf8'))
return o
def idna_decode(o):
if callable_attr(utf8(o), 'decode'):
return utf8(o).decode('idna')
return o
def is_valid_port(port):
port = str(port)
if not port.isdigit() or not 0 < int(port) <= 65535:
return False
return True
def static_vars(**kwargs):
def decorator(func):
for key, value in six.iteritems(kwargs):
setattr(func, key, value)
return func
return decorator
def create_quote_fn(safe_charset, quote_plus):
def quote_fn(s, dont_quote):
if dont_quote is True:
safe = safe_charset
elif dont_quote is False:
safe = ''
else: # <dont_quote> is expected to be a string.
safe = dont_quote
# Prune duplicates and characters not in <safe_charset>.
safe = ''.join(set(safe) & set(safe_charset)) # E.g. '?^#?' -> '?'.
quoted = quote(s, safe)
if quote_plus:
quoted = quoted.replace('%20', '+')
return quoted
return quote_fn
#
# TODO(grun): Update some of the regex functions below to reflect the fact that
# the valid encoding of Path segments differs slightly from the valid encoding
# of Fragment Path segments. Similarly, the valid encodings of Query keys and
# values differ slightly from the valid encodings of Fragment Query keys and
# values.
#
# For example, '?' and '#' don't need to be encoded in Fragment Path segments
# but they must be encoded in Path segments. Similarly, '#' doesn't need to be
# encoded in Fragment Query keys and values, but must be encoded in Query keys
# and values.
#
# Perhaps merge them with URLPath, FragmentPath, URLQuery, and
# FragmentQuery when those new classes are created (see the TODO
# currently at the top of the source, 02/03/2012).
#
# RFC 3986 (https://www.ietf.org/rfc/rfc3986.txt)
#
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
#
# pct-encoded = "%" HEXDIG HEXDIG
#
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
#
# pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
#
# === Path ===
# segment = *pchar
#
# === Query ===
# query = *( pchar / "/" / "?" )
#
# === Scheme ===
# scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
#
PERCENT_REGEX = r'\%[a-fA-F\d][a-fA-F\d]'
INVALID_HOST_CHARS = '!@#$%^&\'\"*()+=:;/'
@static_vars(regex=re.compile(
r'^([\w%s]|(%s))*$' % (re.escape('-.~:@!$&\'()*+,;='), PERCENT_REGEX)))
def is_valid_encoded_path_segment(segment):
return is_valid_encoded_path_segment.regex.match(segment) is not None
@static_vars(regex=re.compile(
r'^([\w%s]|(%s))*$' % (re.escape('-.~:@!$&\'()*+,;/?'), PERCENT_REGEX)))
def is_valid_encoded_query_key(key):
return is_valid_encoded_query_key.regex.match(key) is not None
@static_vars(regex=re.compile(
r'^([\w%s]|(%s))*$' % (re.escape('-.~:@!$&\'()*+,;/?='), PERCENT_REGEX)))
def is_valid_encoded_query_value(value):
return is_valid_encoded_query_value.regex.match(value) is not None
@static_vars(regex=re.compile(r'[a-zA-Z][a-zA-Z\-\.\+]*'))
def is_valid_scheme(scheme):
return is_valid_scheme.regex.match(scheme) is not None
@static_vars(regex=re.compile('[%s]' % re.escape(INVALID_HOST_CHARS)))
def is_valid_host(hostname):
toks = hostname.split('.')
if toks[-1] == '': # Trailing '.' in a fully qualified domain name.
toks.pop()
for tok in toks:
if is_valid_host.regex.search(tok) is not None:
return False
return '' not in toks # Adjacent periods aren't allowed.
def get_scheme(url):
if url.startswith(':'):
return ''
# Avoid incorrect scheme extraction with url.find(':') when other URL
# components, like the path, query, fragment, etc, may have a colon in
# them. For example, the URL 'a?query:', whose query has a ':' in it.
no_fragment = url.split('#', 1)[0]
no_query = no_fragment.split('?', 1)[0]
no_path_or_netloc = no_query.split('/', 1)[0]
scheme = url[:max(0, no_path_or_netloc.find(':'))] or None
if scheme is not None and not is_valid_scheme(scheme):
return None
return scheme
def strip_scheme(url):
scheme = get_scheme(url) or ''
url = url[len(scheme):]
if url.startswith(':'):
url = url[1:]
return url
def set_scheme(url, scheme):
after_scheme = strip_scheme(url)
if scheme is None:
return after_scheme
else:
return '%s:%s' % (scheme, after_scheme)
# 'netloc' in Python parlance, 'authority' in RFC 3986 parlance.
def has_netloc(url):
scheme = get_scheme(url)
return url.startswith('//' if scheme is None else scheme + '://')
def urlsplit(url):
"""
Parameters:
url: URL string to split.
Returns: urlparse.SplitResult tuple subclass, just like
urlparse.urlsplit() returns, with fields (scheme, netloc, path,
query, fragment, username, password, hostname, port). See
http://docs.python.org/library/urlparse.html#urlparse.urlsplit
for more details on urlsplit().
"""
original_scheme = get_scheme(url)
# urlsplit() parses URLs differently depending on whether or not the URL's
# scheme is in any of
#
# urllib.parse.uses_fragment
# urllib.parse.uses_netloc
# urllib.parse.uses_params
# urllib.parse.uses_query
# urllib.parse.uses_relative
#
# For consistent URL parsing, switch the URL's scheme to 'http', a scheme
# in all of the aforementioned uses_* lists, and afterwards revert to the
# original scheme (which may or may not be in some, or all, of the the
# uses_* lists).
if original_scheme is not None:
url = set_scheme(url, 'http')
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
# Detect and preserve the '//' before the netloc, if present. E.g. preserve
# URLs like 'http:', 'http://', and '///sup' correctly.
after_scheme = strip_scheme(url)
if after_scheme.startswith('//'):
netloc = netloc or ''
else:
netloc = None
scheme = original_scheme
return urllib.parse.SplitResult(scheme, netloc, path, query, fragment)
def urljoin(base, url):
"""
Parameters:
base: Base URL to join with <url>.
url: Relative or absolute URL to join with <base>.
Returns: The resultant URL from joining <base> and <url>.
"""
base_scheme = get_scheme(base) if has_netloc(base) else None
url_scheme = get_scheme(url) if has_netloc(url) else None
if base_scheme is not None:
# For consistent URL joining, switch the base URL's scheme to
# 'http'. urllib.parse.urljoin() behaves differently depending on the
# scheme. E.g.
#
# >>> urllib.parse.urljoin('http://google.com/', 'hi')
# 'http://google.com/hi'
#
# vs
#
# >>> urllib.parse.urljoin('asdf://google.com/', 'hi')
# 'hi'
root = set_scheme(base, 'http')
else:
root = base
joined = urllib.parse.urljoin(root, url)
new_scheme = url_scheme if url_scheme is not None else base_scheme
if new_scheme is not None and has_netloc(joined):
joined = set_scheme(joined, new_scheme)
return joined
def join_path_segments(*args):
"""
Join multiple lists of path segments together, intelligently
handling path segments borders to preserve intended slashes of the
final constructed path.
This function is not encoding aware. It doesn't test for, or change,
the encoding of path segments it is passed.
Examples:
join_path_segments(['a'], ['b']) == ['a','b']
join_path_segments(['a',''], ['b']) == ['a','b']
join_path_segments(['a'], ['','b']) == ['a','b']
join_path_segments(['a',''], ['','b']) == ['a','','b']
join_path_segments(['a','b'], ['c','d']) == ['a','b','c','d']
Returns: A list containing the joined path segments.
"""
finals = []
for segments in args:
if not segments or segments == ['']:
continue
elif not finals:
finals.extend(segments)
else:
# Example #1: ['a',''] + ['b'] == ['a','b']
# Example #2: ['a',''] + ['','b'] == ['a','','b']
if finals[-1] == '' and (segments[0] != '' or len(segments) > 1):
finals.pop(-1)
# Example: ['a'] + ['','b'] == ['a','b']
elif finals[-1] != '' and segments[0] == '' and len(segments) > 1:
segments = segments[1:]
finals.extend(segments)
return finals
def remove_path_segments(segments, remove):
"""
Removes the path segments of <remove> from the end of the path
segments <segments>.
Examples:
# ('/a/b/c', 'b/c') -> '/a/'
remove_path_segments(['','a','b','c'], ['b','c']) == ['','a','']
# ('/a/b/c', '/b/c') -> '/a'
remove_path_segments(['','a','b','c'], ['','b','c']) == ['','a']
Returns: The list of all remaining path segments after the segments
in <remove> have been removed from the end of <segments>. If no
segments from <remove> were removed from <segments>, <segments> is
returned unmodified.
"""
# [''] means a '/', which is properly represented by ['', ''].
if segments == ['']:
segments.append('')
if remove == ['']:
remove.append('')
ret = None
if remove == segments:
ret = []
elif len(remove) > len(segments):
ret = segments
else:
toremove = list(remove)
if len(remove) > 1 and remove[0] == '':
toremove.pop(0)
if toremove and toremove == segments[-1 * len(toremove):]:
ret = segments[:len(segments) - len(toremove)]
if remove[0] != '' and ret:
ret.append('')
else:
ret = segments
return ret
def quacks_like_a_path_with_segments(obj):
return (
hasattr(obj, 'segments') and
is_iterable_but_not_string(obj.segments))
class Path(object):
"""
Represents a path comprised of zero or more path segments.
http://tools.ietf.org/html/rfc3986#section-3.3
Path parameters aren't supported.
Attributes:
_force_absolute: Function whos boolean return value specifies
whether self.isabsolute should be forced to True or not. If
_force_absolute(self) returns True, isabsolute is read only and
raises an AttributeError if assigned to. If
_force_absolute(self) returns False, isabsolute is mutable and
can be set to True or False. URL paths use _force_absolute and
return True if the netloc is non-empty (not equal to
''). Fragment paths are never read-only and their
_force_absolute(self) always returns False.
segments: List of zero or more path segments comprising this
path. If the path string has a trailing '/', the last segment
will be '' and self.isdir will be True and self.isfile will be
False. An empty segment list represents an empty path, not '/'
(though they have the same meaning).
isabsolute: Boolean whether or not this is an absolute path or
not. An absolute path starts with a '/'. self.isabsolute is
False if the path is empty (self.segments == [] and str(path) ==
'').
strict: Boolean whether or not UserWarnings should be raised if
improperly encoded path strings are provided to methods that
take such strings, like load(), add(), set(), remove(), etc.
"""
# From RFC 3986:
# segment = *pchar
# pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
SAFE_SEGMENT_CHARS = ":@-._~!$&'()*+,;="
def __init__(self, path='', force_absolute=lambda _: False, strict=False):
self.segments = []
self.strict = strict
self._isabsolute = False
self._force_absolute = force_absolute
self.load(path)
def load(self, path):
"""
Load <path>, replacing any existing path. <path> can either be
a Path instance, a list of segments, a path string to adopt.
Returns: <self>.
"""
if not path:
segments = []
elif quacks_like_a_path_with_segments(path): # Path interface.
segments = path.segments
elif is_iterable_but_not_string(path): # List interface.
segments = path
else: # String interface.
segments = self._segments_from_path(path)
if self._force_absolute(self):
self._isabsolute = True if segments else False
else:
self._isabsolute = (segments and segments[0] == '')
if self.isabsolute and len(segments) > 1 and segments[0] == '':
segments.pop(0)
self.segments = segments
return self
def add(self, path):
"""
Add <path> to the existing path. <path> can either be a Path instance,
a list of segments, or a path string to append to the existing path.
Returns: <self>.
"""
if quacks_like_a_path_with_segments(path): # Path interface.
newsegments = path.segments
elif is_iterable_but_not_string(path): # List interface.
newsegments = path
else: # String interface.
newsegments = self._segments_from_path(path)
# Preserve the opening '/' if one exists already (self.segments
# == ['']).
if self.segments == [''] and newsegments and newsegments[0] != '':
newsegments.insert(0, '')
segments = self.segments
if self.isabsolute and self.segments and self.segments[0] != '':
segments.insert(0, '')
self.load(join_path_segments(segments, newsegments))
return self
def set(self, path):
self.load(path)
return self
def remove(self, path):
if path is True:
self.load('')
else:
if is_iterable_but_not_string(path): # List interface.
segments = path
else: # String interface.
segments = self._segments_from_path(path)
base = ([''] if self.isabsolute else []) + self.segments
self.load(remove_path_segments(base, segments))
return self
def normalize(self):
"""
Normalize the path. Turn '//a/./b/../c//' into '/a/c/'.
Returns: <self>.
"""
if str(self):
normalized = normpath(str(self)) + ('/' * self.isdir)
if normalized.startswith('//'): # http://bugs.python.org/636648
normalized = '/' + normalized.lstrip('/')
self.load(normalized)
return self
def asdict(self):
return {
'encoded': str(self),
'isdir': self.isdir,
'isfile': self.isfile,
'segments': self.segments,
'isabsolute': self.isabsolute,
}
@property
def isabsolute(self):
if self._force_absolute(self):
return True
return self._isabsolute
@isabsolute.setter
def isabsolute(self, isabsolute):
"""
Raises: AttributeError if _force_absolute(self) returns True.
"""
if self._force_absolute(self):
s = ('Path.isabsolute is True and read-only for URLs with a netloc'
' (a username, password, host, and/or port). A URL path must '
"start with a '/' to separate itself from a netloc.")
raise AttributeError(s)
self._isabsolute = isabsolute
@property
def isdir(self):
"""
Returns: True if the path ends on a directory, False
otherwise. If True, the last segment is '', representing the
trailing '/' of the path.
"""
return (self.segments == [] or
(self.segments and self.segments[-1] == ''))
@property
def isfile(self):
"""
Returns: True if the path ends on a file, False otherwise. If
True, the last segment is not '', representing some file as the
last segment of the path.
"""
return not self.isdir
def __truediv__(self, path):
copy = deepcopy(self)
return copy.add(path)
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return not self == other
def __bool__(self):
return len(self.segments) > 0
__nonzero__ = __bool__
def __str__(self):
segments = list(self.segments)
if self.isabsolute:
if not segments:
segments = ['', '']
else:
segments.insert(0, '')
return self._path_from_segments(segments)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, str(self))
def _segments_from_path(self, path):
"""
Returns: The list of path segments from the path string <path>.
Raises: UserWarning if <path> is an improperly encoded path
string and self.strict is True.
TODO(grun): Accept both list values and string values and
refactor the list vs string interface testing to this common
method.
"""
segments = []
for segment in path.split('/'):
if not is_valid_encoded_path_segment(segment):
segment = quote(utf8(segment))
if self.strict:
s = ("Improperly encoded path string received: '%s'. "
"Proceeding, but did you mean '%s'?" %
(path, self._path_from_segments(segments)))
warnings.warn(s, UserWarning)
segments.append(utf8(segment))
del segment
# In Python 3, utf8() returns Bytes objects that must be decoded into
# strings before they can be passed to unquote(). In Python 2, utf8()
# returns strings that can be passed directly to urllib.unquote().
segments = [
segment.decode('utf8')
if isinstance(segment, bytes) and not isinstance(segment, str)
else segment for segment in segments]
return [unquote(segment) for segment in segments]
def _path_from_segments(self, segments):
"""
Combine the provided path segments <segments> into a path string. Path
segments in <segments> will be quoted.
Returns: A path string with quoted path segments.
"""
segments = [
quote(utf8(attemptstr(segment)), self.SAFE_SEGMENT_CHARS)
for segment in segments]
return '/'.join(segments)
@six.add_metaclass(abc.ABCMeta)
class PathCompositionInterface(object):
"""
Abstract class interface for a parent class that contains a Path.
"""
def __init__(self, strict=False):
"""
Params:
force_absolute: See Path._force_absolute.
Assignments to <self> in __init__() must be added to
__setattr__() below.
"""
self._path = Path(force_absolute=self._force_absolute, strict=strict)
@property
def path(self):
return self._path
@property
def pathstr(self):
"""This method is deprecated. Use str(furl.path) instead."""
s = ('furl.pathstr is deprecated. Use str(furl.path) instead. There '
'should be one, and preferably only one, obvious way to serialize'
' a Path object to a string.')
warnings.warn(s, DeprecationWarning)
return str(self._path)
@abc.abstractmethod
def _force_absolute(self, path):
"""
Subclass me.
"""
pass
def __setattr__(self, attr, value):
"""
Returns: True if this attribute is handled and set here, False
otherwise.
"""
if attr == '_path':
self.__dict__[attr] = value
return True
elif attr == 'path':
self._path.load(value)
return True
return False
@six.add_metaclass(abc.ABCMeta)
class URLPathCompositionInterface(PathCompositionInterface):
"""
Abstract class interface for a parent class that contains a URL
Path.
A URL path's isabsolute attribute is absolute and read-only if a
netloc is defined. A path cannot start without '/' if there's a
netloc. For example, the URL 'http://google.coma/path' makes no
sense. It should be 'http://google.com/a/path'.
A URL path's isabsolute attribute is mutable if there's no
netloc. The scheme doesn't matter. For example, the isabsolute
attribute of the URL path in 'mailto:user@host.com', with scheme
'mailto' and path 'user@host.com', is mutable because there is no
netloc. See
http://en.wikipedia.org/wiki/URI_scheme#Examples
"""
def __init__(self, strict=False):
PathCompositionInterface.__init__(self, strict=strict)
def _force_absolute(self, path):
return bool(path) and self.netloc
@six.add_metaclass(abc.ABCMeta)
class FragmentPathCompositionInterface(PathCompositionInterface):
"""
Abstract class interface for a parent class that contains a Fragment
Path.
Fragment Paths they be set to absolute (self.isabsolute = True) or
not absolute (self.isabsolute = False).
"""
def __init__(self, strict=False):
PathCompositionInterface.__init__(self, strict=strict)
def _force_absolute(self, path):
return False
class Query(object):
"""
Represents a URL query comprised of zero or more unique parameters
and their respective values.
http://tools.ietf.org/html/rfc3986#section-3.4
All interaction with Query.params is done with unquoted strings. So
f.query.params['a'] = 'a%5E'
means the intended value for 'a' is 'a%5E', not 'a^'.
Query.params is implemented as an omdict1D object - a one
dimensional ordered multivalue dictionary. This provides support for
repeated URL parameters, like 'a=1&a=2'. omdict1D is a subclass of
omdict, an ordered multivalue dictionary. Documentation for omdict
can be found here
https://github.com/gruns/orderedmultidict
The one dimensional aspect of omdict1D means that a list of values
is interpreted as multiple values, not a single value which is
itself a list of values. This is a reasonable distinction to make
because URL query parameters are one dimensional: query parameter
values cannot themselves be composed of sub-values.
So what does this mean? This means we can safely interpret
f = furl('http://www.google.com')
f.query.params['arg'] = ['one', 'two', 'three']
as three different values for 'arg': 'one', 'two', and 'three',
instead of a single value which is itself some serialization of the
python list ['one', 'two', 'three']. Thus, the result of the above
will be
f.query.allitems() == [
('arg','one'), ('arg','two'), ('arg','three')]
and not
f.query.allitems() == [('arg', ['one', 'two', 'three'])]
The latter doesn't make sense because query parameter values cannot
be composed of sub-values. So finally
str(f.query) == 'arg=one&arg=two&arg=three'
Additionally, while the set of allowed characters in URL queries is
defined in RFC 3986 section 3.4, the format for encoding key=value
pairs within the query is not. In turn, the parsing of encoded
key=value query pairs differs between implementations.
As a compromise to support equal signs in both key=value pair
encoded queries, like
https://www.google.com?a=1&b=2
and non-key=value pair encoded queries, like
https://www.google.com?===3===
equal signs are percent encoded in key=value pairs where the key is
non-empty, e.g.
https://www.google.com?equal-sign=%3D
but not encoded in key=value pairs where the key is empty, e.g.
https://www.google.com?===equal=sign===
This presents a reasonable compromise to accurately reproduce
non-key=value queries with equal signs while also still percent
encoding equal signs in key=value pair encoded queries, as
expected. See
https://github.com/gruns/furl/issues/99
for more details.
Attributes:
params: Ordered multivalue dictionary of query parameter key:value
pairs. Parameters in self.params are maintained URL decoded,
e.g. 'a b' not 'a+b'.
strict: Boolean whether or not UserWarnings should be raised if
improperly encoded query strings are provided to methods that
take such strings, like load(), add(), set(), remove(), etc.
"""
# From RFC 3986:
# query = *( pchar / "/" / "?" )
# pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
SAFE_KEY_CHARS = "/?:@-._~!$'()*+,;"
SAFE_VALUE_CHARS = SAFE_KEY_CHARS + '='
def __init__(self, query='', strict=False):
self.strict = strict
self._params = omdict1D()
self.load(query)
def load(self, query):
items = self._items(query)
self.params.load(items)
return self
def add(self, args):
for param, value in self._items(args):
self.params.add(param, value)
return self
def set(self, mapping):
"""
Adopt all mappings in <mapping>, replacing any existing mappings
with the same key. If a key has multiple values in <mapping>,
they are all adopted.
Examples:
Query({1:1}).set([(1,None),(2,2)]).params.allitems()
== [(1,None),(2,2)]
Query({1:None,2:None}).set([(1,1),(2,2),(1,11)]).params.allitems()
== [(1,1),(2,2),(1,11)]
Query({1:None}).set([(1,[1,11,111])]).params.allitems()
== [(1,1),(1,11),(1,111)]
Returns: <self>.
"""
self.params.updateall(mapping)
return self
def remove(self, query):
if query is True:
self.load('')
return self
# Single key to remove.
items = [query]
# Dictionary or multivalue dictionary of items to remove.
if callable_attr(query, 'items'):
items = self._items(query)
# List of keys or items to remove.
elif non_string_iterable(query):
items = query
for item in items:
if non_string_iterable(item) and len(item) == 2:
key, value = item
self.params.popvalue(key, value, None)
else:
key = item
self.params.pop(key, None)
return self
@property
def params(self):
return self._params
@params.setter
def params(self, params):
items = self._items(params)
self._params.clear()
for key, value in items:
self._params.add(key, value)
def encode(self, delimiter='&', quote_plus=True, dont_quote='',
delimeter=_absent):
"""
Examples:
Query('a=a&b=#').encode() == 'a=a&b=%23'
Query('a=a&b=#').encode(';') == 'a=a;b=%23'
Query('a+b=c@d').encode(dont_quote='@') == 'a+b=c@d'
Query('a+b=c@d').encode(quote_plus=False) == 'a%20b=c%40d'
Until furl v0.4.6, the 'delimiter' argument was incorrectly
spelled 'delimeter'. For backwards compatibility, accept both
the correct 'delimiter' and the old, misspelled 'delimeter'.
Keys and values are encoded application/x-www-form-urlencoded if
<quote_plus> is True, percent-encoded otherwise.
<dont_quote> exempts valid query characters from being
percent-encoded, either in their entirety with dont_quote=True,
or selectively with dont_quote=<string>, like
dont_quote='/?@_'. Invalid query characters -- those not in
self.SAFE_KEY_CHARS, like '#' and '^' -- are always encoded,
even if included in <dont_quote>. For example:
Query('#=^').encode(dont_quote='#^') == '%23=%5E'.
Returns: A URL encoded query string using <delimiter> as the
delimiter separating key:value pairs. The most common and
default delimiter is '&', but ';' can also be specified. ';' is
W3C recommended.
"""
if delimeter is not _absent:
delimiter = delimeter
quote_key = create_quote_fn(self.SAFE_KEY_CHARS, quote_plus)
quote_value = create_quote_fn(self.SAFE_VALUE_CHARS, quote_plus)
pairs = []
for key, value in self.params.iterallitems():
utf8key = utf8(key, utf8(attemptstr(key)))
quoted_key = quote_key(utf8key, dont_quote)
if value is None: # Example: http://sprop.su/?key.
pair = quoted_key
else: # Example: http://sprop.su/?key=value.
utf8value = utf8(value, utf8(attemptstr(value)))
quoted_value = quote_value(utf8value, dont_quote)
if not quoted_key: # Unquote '=' to allow queries like '?==='.
quoted_value = quoted_value.replace('%3D', '=')
pair = '%s=%s' % (quoted_key, quoted_value)
pairs.append(pair)
query = delimiter.join(pairs)
return query
def asdict(self):
return {
'encoded': str(self),
'params': self.params.allitems(),
}
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return not self == other
def __bool__(self):
return len(self.params) > 0
__nonzero__ = __bool__
def __str__(self):
return self.encode()
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, str(self))
def _items(self, items):
"""
Extract and return the key:value items from various
containers. Some containers that could hold key:value items are
- List of (key,value) tuples.
- Dictionaries of key:value items.
- Multivalue dictionary of key:value items, with potentially
repeated keys.
- Query string with encoded params and values.
Keys and values are passed through unmodified unless they were
passed in within an encoded query string, like
'a=a%20a&b=b'. Keys and values passed in within an encoded query
string are unquoted by urlparse.parse_qsl(), which uses
urllib.unquote_plus() internally.
Returns: List of items as (key, value) tuples. Keys and values
are passed through unmodified unless they were passed in as part
of an encoded query string, in which case the final keys and
values that are returned will be unquoted.
Raises: UserWarning if <path> is an improperly encoded path
string and self.strict is True.
"""
if not items:
items = []
# Multivalue Dictionary-like interface. e.g. {'a':1, 'a':2,
# 'b':2}
elif callable_attr(items, 'allitems'):
items = list(items.allitems())
elif callable_attr(items, 'iterallitems'):
items = list(items.iterallitems())
# Dictionary-like interface. e.g. {'a':1, 'b':2, 'c':3}
elif callable_attr(items, 'items'):
items = list(items.items())
elif callable_attr(items, 'iteritems'):
items = list(items.iteritems())
# Encoded query string. e.g. 'a=1&b=2&c=3'
elif isinstance(items, six.string_types):
items = self._extract_items_from_querystr(items)
# Default to list of key:value items interface. e.g. [('a','1'),
# ('b','2')]
else:
items = list(items)
return items
def _extract_items_from_querystr(self, querystr):
items = []
pairstrs = [s2 for s1 in querystr.split('&') for s2 in s1.split(';')]
pairs = [item.split('=', 1) for item in pairstrs]
pairs = [(p[0], lget(p, 1, '')) for p in pairs] # Pad with value ''.
for pairstr, (key, value) in six.moves.zip(pairstrs, pairs):
valid_key = is_valid_encoded_query_key(key)
valid_value = is_valid_encoded_query_value(value)
if self.strict and (not valid_key or not valid_value):
msg = (
"Incorrectly percent encoded query string received: '%s'. "
"Proceeding, but did you mean '%s'?" %
(querystr, urllib.parse.urlencode(pairs)))
warnings.warn(msg, UserWarning)
key_decoded = unquote(key.replace('+', ' '))
# Empty value without a '=', e.g. '?sup'.
if key == pairstr:
value_decoded = None
else:
value_decoded = unquote(value.replace('+', ' '))
items.append((key_decoded, value_decoded))
return items
@six.add_metaclass(abc.ABCMeta)
class QueryCompositionInterface(object):
"""
Abstract class interface for a parent class that contains a Query.
"""
def __init__(self, strict=False):
self._query = Query(strict=strict)
@property
def query(self):
return self._query
@property
def querystr(self):
"""This method is deprecated. Use str(furl.query) instead."""
s = ('furl.querystr is deprecated. Use str(furl.query) instead. There '
'should be one, and preferably only one, obvious way to serialize'
' a Query object to a string.')
warnings.warn(s, DeprecationWarning)
return str(self._query)
@property
def args(self):
"""
Shortcut method to access the query parameters, self._query.params.
"""
return self._query.params
def __setattr__(self, attr, value):
"""
Returns: True if this attribute is handled and set here, False
otherwise.
"""
if attr == 'args' or attr == 'query':
self._query.load(value)
return True
return False
class Fragment(FragmentPathCompositionInterface, QueryCompositionInterface):
"""
Represents a URL fragment, comprised internally of a Path and Query
optionally separated by a '?' character.
http://tools.ietf.org/html/rfc3986#section-3.5
Attributes:
path: Path object from FragmentPathCompositionInterface.
query: Query object from QueryCompositionInterface.
separator: Boolean whether or not a '?' separator should be
included in the string representation of this fragment. When
False, a '?' character will not separate the fragment path from
the fragment query in the fragment string. This is useful to
build fragments like '#!arg1=val1&arg2=val2', where no
separating '?' is desired.
"""
def __init__(self, fragment='', strict=False):
FragmentPathCompositionInterface.__init__(self, strict=strict)
QueryCompositionInterface.__init__(self, strict=strict)
self.strict = strict
self.separator = True
self.load(fragment)
def load(self, fragment):
self.path.load('')
self.query.load('')
if fragment is None:
fragment = ''
toks = fragment.split('?', 1)
if len(toks) == 0:
self._path.load('')
self._query.load('')
elif len(toks) == 1:
# Does this fragment look like a path or a query? Default to
# path.
if '=' in fragment: # Query example: '#woofs=dogs'.
self._query.load(fragment)
else: # Path example: '#supinthisthread'.
self._path.load(fragment)
else:
# Does toks[1] actually look like a query? Like 'a=a' or
# 'a=' or '=a'?
if '=' in toks[1]:
self._path.load(toks[0])
self._query.load(toks[1])
# If toks[1] doesn't look like a query, the user probably
# provided a fragment string like 'a?b?' that was intended
# to be adopted as-is, not a two part fragment with path 'a'
# and query 'b?'.
else:
self._path.load(fragment)
def add(self, path=_absent, args=_absent):
if path is not _absent:
self.path.add(path)
if args is not _absent:
self.query.add(args)
return self
def set(self, path=_absent, args=_absent, separator=_absent):
if path is not _absent:
self.path.load(path)
if args is not _absent:
self.query.load(args)
if separator is True or separator is False:
self.separator = separator
return self
def remove(self, fragment=_absent, path=_absent, args=_absent):
if fragment is True:
self.load('')
if path is not _absent:
self.path.remove(path)
if args is not _absent:
self.query.remove(args)
return self
def asdict(self):
return {
'encoded': str(self),
'separator': self.separator,
'path': self.path.asdict(),
'query': self.query.asdict(),
}
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return not self == other
def __setattr__(self, attr, value):
if (not PathCompositionInterface.__setattr__(self, attr, value) and
not QueryCompositionInterface.__setattr__(self, attr, value)):
object.__setattr__(self, attr, value)
def __bool__(self):
return bool(self.path) or bool(self.query)
__nonzero__ = __bool__
def __str__(self):
path, query = str(self._path), str(self._query)
# If there is no query or self.separator is False, decode all
# '?' characters in the path from their percent encoded form
# '%3F' to '?'. This allows for fragment strings containg '?'s,
# like '#dog?machine?yes'.
if path and (not query or not self.separator):
path = path.replace('%3F', '?')
separator = '?' if path and query and self.separator else ''
return path + separator + query
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, str(self))
@six.add_metaclass(abc.ABCMeta)
class FragmentCompositionInterface(object):
"""
Abstract class interface for a parent class that contains a
Fragment.
"""
def __init__(self, strict=False):
self._fragment = Fragment(strict=strict)
@property
def fragment(self):
return self._fragment
@property
def fragmentstr(self):
"""This method is deprecated. Use str(furl.fragment) instead."""
s = ('furl.fragmentstr is deprecated. Use str(furl.fragment) instead. '
'There should be one, and preferably only one, obvious way to '
'serialize a Fragment object to a string.')
warnings.warn(s, DeprecationWarning)
return str(self._fragment)
def __setattr__(self, attr, value):
"""
Returns: True if this attribute is handled and set here, False
otherwise.
"""
if attr == 'fragment':
self.fragment.load(value)
return True
return False
class furl(URLPathCompositionInterface, QueryCompositionInterface,
FragmentCompositionInterface, UnicodeMixin):
"""
Object for simple parsing and manipulation of a URL and its
components.
scheme://username:password@host:port/path?query#fragment
Attributes:
strict: Boolean whether or not UserWarnings should be raised if
improperly encoded path, query, or fragment strings are provided
to methods that take such strings, like load(), add(), set(),
remove(), etc.
username: Username string for authentication. Initially None.
password: Password string for authentication with
<username>. Initially None.
scheme: URL scheme. A string ('http', 'https', '', etc) or None.
All lowercase. Initially None.
host: URL host (hostname, IPv4 address, or IPv6 address), not
including port. All lowercase. Initially None.
port: Port. Valid port values are 1-65535, or None meaning no port
specified.
netloc: Network location. Combined host and port string. Initially
None.
path: Path object from URLPathCompositionInterface.
query: Query object from QueryCompositionInterface.
fragment: Fragment object from FragmentCompositionInterface.
"""
def __init__(self, url='', args=_absent, path=_absent, fragment=_absent,
scheme=_absent, netloc=_absent, origin=_absent,
fragment_path=_absent, fragment_args=_absent,
fragment_separator=_absent, host=_absent, port=_absent,
query=_absent, query_params=_absent, username=_absent,
password=_absent, strict=False):
"""
Raises: ValueError on invalid URL or invalid URL component(s) provided.
"""
URLPathCompositionInterface.__init__(self, strict=strict)
QueryCompositionInterface.__init__(self, strict=strict)
FragmentCompositionInterface.__init__(self, strict=strict)
self.strict = strict
self.load(url) # Raises ValueError on invalid URL.
self.set( # Raises ValueError on invalid URL component(s).
args=args, path=path, fragment=fragment, scheme=scheme,
netloc=netloc, origin=origin, fragment_path=fragment_path,
fragment_args=fragment_args, fragment_separator=fragment_separator,
host=host, port=port, query=query, query_params=query_params,
username=username, password=password)
def load(self, url):
"""
Parse and load a URL.
Raises: ValueError on invalid URL, like a malformed IPv6 address
or invalid port.
"""
self.username = self.password = None
self._host = self._port = self._scheme = None
if url is None:
url = ''
if not isinstance(url, six.string_types):
url = str(url)
# urlsplit() raises a ValueError on malformed IPv6 addresses in
# Python 2.7+.
tokens = urlsplit(url)
self.netloc = tokens.netloc # Raises ValueError in Python 2.7+.
self.scheme = tokens.scheme
if not self.port:
self._port = DEFAULT_PORTS.get(self.scheme)
self.path.load(tokens.path)
self.query.load(tokens.query)
self.fragment.load(tokens.fragment)
return self
@property
def scheme(self):
return self._scheme
@scheme.setter
def scheme(self, scheme):
if callable_attr(scheme, 'lower'):
scheme = scheme.lower()
self._scheme = scheme
@property
def host(self):
return self._host
@host.setter
def host(self, host):
"""
Raises: ValueError on invalid host or malformed IPv6 address.
"""
# Invalid IPv6 literal.
urllib.parse.urlsplit('http://%s/' % host) # Raises ValueError.
# Invalid host string.
resembles_ipv6_literal = (
host is not None and lget(host, 0) == '[' and ':' in host and
lget(host, -1) == ']')
if (host is not None and not resembles_ipv6_literal and
not is_valid_host(host)):
errmsg = (
"Invalid host '%s'. Host strings must have at least one "
"non-period character, can't contain any of '%s', and can't "
"have adjacent periods.")
raise ValueError(errmsg % (host, INVALID_HOST_CHARS))
if callable_attr(host, 'lower'):
host = host.lower()
if callable_attr(host, 'startswith') and host.startswith('xn--'):
host = idna_decode(host)
self._host = host
@property
def port(self):
return self._port or DEFAULT_PORTS.get(self.scheme)
@port.setter
def port(self, port):
"""
The port value can be 1-65535 or None, meaning no port specified. If
<port> is None and self.scheme is a known scheme in DEFAULT_PORTS,
the default port value from DEFAULT_PORTS will be used.
Raises: ValueError on invalid port.
"""
if port is None:
self._port = DEFAULT_PORTS.get(self.scheme)
elif is_valid_port(port):
self._port = int(str(port))
else:
raise ValueError("Invalid port '%s'." % port)
@property
def netloc(self):
userpass = quote(utf8(self.username) or '', safe='')
if self.password is not None:
userpass += ':' + quote(utf8(self.password), safe='')
if userpass or self.username is not None:
userpass += '@'
netloc = idna_encode(self.host)
if self.port and self.port != DEFAULT_PORTS.get(self.scheme):
netloc = (netloc or '') + (':' + str(self.port))
if userpass or netloc:
netloc = (userpass or '') + (netloc or '')
return netloc
@netloc.setter
def netloc(self, netloc):
"""
Params:
netloc: Network location string, like 'google.com' or
'user:pass@google.com:99'.
Raises: ValueError on invalid port or malformed IPv6 address.
"""
# Raises ValueError on malformed IPv6 addresses.
urllib.parse.urlsplit('http://%s/' % netloc)
username = password = host = port = None
if netloc and '@' in netloc:
userpass, netloc = netloc.split('@', 1)
if ':' in userpass:
username, password = userpass.split(':', 1)
else:
username = userpass
if netloc and ':' in netloc:
# IPv6 address literal.
if ']' in netloc:
colonpos, bracketpos = netloc.rfind(':'), netloc.rfind(']')
if colonpos > bracketpos and colonpos != bracketpos + 1:
raise ValueError("Invalid netloc '%s'." % netloc)
elif colonpos > bracketpos and colonpos == bracketpos + 1:
host, port = netloc.rsplit(':', 1)
else:
host = netloc
else:
host, port = netloc.rsplit(':', 1)
host = host
else:
host = netloc
# Avoid side effects by assigning self.port before self.host so
# that if an exception is raised when assigning self.port,
# self.host isn't updated.
self.port = port # Raises ValueError on invalid port.
self.host = host
self.username = None if username is None else unquote(username)
self.password = None if password is None else unquote(password)
@property
def origin(self):
port = ''
scheme = self.scheme or ''
host = idna_encode(self.host) or ''
if self.port and self.port != DEFAULT_PORTS.get(self.scheme):
port = ':%s' % self.port
origin = '%s://%s%s' % (scheme, host, port)
return origin
@origin.setter
def origin(self, origin):
if origin is None:
self.scheme = self.netloc = None
else:
toks = origin.split('://', 1)
if len(toks) == 1:
host_port = origin
else:
self.scheme, host_port = toks
if ':' in host_port:
self.host, self.port = host_port.split(':', 1)
else:
self.host = host_port
@property
def url(self):
return self.tostr()
@url.setter
def url(self, url):
return self.load(url)
def add(self, args=_absent, path=_absent, fragment_path=_absent,
fragment_args=_absent, query_params=_absent):
"""
Add components to a URL and return this furl instance, <self>.
If both <args> and <query_params> are provided, a UserWarning is
raised because <args> is provided as a shortcut for
<query_params>, not to be used simultaneously with
<query_params>. Nonetheless, providing both <args> and
<query_params> behaves as expected, with query keys and values
from both <args> and <query_params> added to the query - <args>
first, then <query_params>.
Parameters:
args: Shortcut for <query_params>.
path: A list of path segments to add to the existing path
segments, or a path string to join with the existing path
string.
query_params: A dictionary of query keys and values or list of
key:value items to add to the query.
fragment_path: A list of path segments to add to the existing
fragment path segments, or a path string to join with the
existing fragment path string.
fragment_args: A dictionary of query keys and values or list
of key:value items to add to the fragment's query.
Returns: <self>.
Raises: UserWarning if redundant and possibly conflicting <args> and
<query_params> were provided.
"""
if args is not _absent and query_params is not _absent:
s = ('Both <args> and <query_params> provided to furl.add(). '
'<args> is a shortcut for <query_params>, not to be used '
'with <query_params>. See furl.add() documentation for more '
'details.')
warnings.warn(s, UserWarning)
if path is not _absent:
self.path.add(path)
if args is not _absent:
self.query.add(args)
if query_params is not _absent:
self.query.add(query_params)
if fragment_path is not _absent or fragment_args is not _absent:
self.fragment.add(path=fragment_path, args=fragment_args)
return self
def set(self, args=_absent, path=_absent, fragment=_absent, query=_absent,
scheme=_absent, username=_absent, password=_absent, host=_absent,
port=_absent, netloc=_absent, origin=_absent, query_params=_absent,
fragment_path=_absent, fragment_args=_absent,
fragment_separator=_absent):
"""
Set components of a url and return this furl instance, <self>.
If any overlapping, and hence possibly conflicting, parameters
are provided, appropriate UserWarning's will be raised. The
groups of parameters that could potentially overlap are
<scheme> and <origin>
<origin>, <netloc>, and/or (<host> or <port>)
<fragment> and (<fragment_path> and/or <fragment_args>)
any two or all of <query>, <args>, and/or <query_params>
In all of the above groups, the latter parameter(s) take
precedence over the earlier parameter(s). So, for example
furl('http://google.com/').set(
netloc='yahoo.com:99', host='bing.com', port=40)
will result in a UserWarning being raised and the url becoming
'http://bing.com:40/'
not
'http://yahoo.com:99/
Parameters:
args: Shortcut for <query_params>.
path: A list of path segments or a path string to adopt.
fragment: Fragment string to adopt.
scheme: Scheme string to adopt.
netloc: Network location string to adopt.
origin: Scheme and netloc.
query: Query string to adopt.
query_params: A dictionary of query keys and values or list of
key:value items to adopt.
fragment_path: A list of path segments to adopt for the
fragment's path or a path string to adopt as the fragment's
path.
fragment_args: A dictionary of query keys and values or list
of key:value items for the fragment's query to adopt.
fragment_separator: Boolean whether or not there should be a
'?' separator between the fragment path and fragment query.
host: Host string to adopt.
port: Port number to adopt.
username: Username string to adopt.
password: Password string to adopt.
Raises:
ValueError on invalid port.
UserWarning if <scheme> and <origin> are provided.
UserWarning if <origin>, <netloc> and/or (<host> and/or <port>) are
provided.
UserWarning if <query>, <args>, and/or <query_params> are provided.
UserWarning if <fragment> and (<fragment_path>,
<fragment_args>, and/or <fragment_separator>) are provided.
Returns: <self>.
"""
def present(v):
return v is not _absent
if present(scheme) and present(origin):
s = ('Possible parameter overlap: <scheme> and <origin>. See '
'furl.set() documentation for more details.')
warnings.warn(s, UserWarning)
provided = [
present(netloc), present(origin), present(host) or present(port)]
if sum(provided) >= 2:
s = ('Possible parameter overlap: <origin>, <netloc> and/or '
'(<host> and/or <port>) provided. See furl.set() '
'documentation for more details.')
warnings.warn(s, UserWarning)
if sum(present(p) for p in [args, query, query_params]) >= 2:
s = ('Possible parameter overlap: <query>, <args>, and/or '
'<query_params> provided. See furl.set() documentation for '
'more details.')
warnings.warn(s, UserWarning)
provided = [fragment_path, fragment_args, fragment_separator]
if present(fragment) and any(present(p) for p in provided):
s = ('Possible parameter overlap: <fragment> and '
'(<fragment_path>and/or <fragment_args>) or <fragment> '
'and <fragment_separator> provided. See furl.set() '
'documentation for more details.')
warnings.warn(s, UserWarning)
# Guard against side effects on exception.
original_url = self.url
try:
if username is not _absent:
self.username = username
if password is not _absent:
self.password = password
if netloc is not _absent:
# Raises ValueError on invalid port or malformed IP.
self.netloc = netloc
if origin is not _absent:
# Raises ValueError on invalid port or malformed IP.
self.origin = origin
if scheme is not _absent:
self.scheme = scheme
if host is not _absent:
# Raises ValueError on invalid host or malformed IP.
self.host = host
if port is not _absent:
self.port = port # Raises ValueError on invalid port.
if path is not _absent:
self.path.load(path)
if query is not _absent:
self.query.load(query)
if args is not _absent:
self.query.load(args)
if query_params is not _absent:
self.query.load(query_params)
if fragment is not _absent:
self.fragment.load(fragment)
if fragment_path is not _absent:
self.fragment.path.load(fragment_path)
if fragment_args is not _absent:
self.fragment.query.load(fragment_args)
if fragment_separator is not _absent:
self.fragment.separator = fragment_separator
except Exception:
self.load(original_url)
raise
return self
def remove(self, args=_absent, path=_absent, fragment=_absent,
query=_absent, scheme=False, username=False, password=False,
host=False, port=False, netloc=False, origin=False,
query_params=_absent, fragment_path=_absent,
fragment_args=_absent):
"""
Remove components of this furl's URL and return this furl
instance, <self>.
Parameters:
args: Shortcut for query_params.
path: A list of path segments to remove from the end of the
existing path segments list, or a path string to remove from
the end of the existing path string, or True to remove the
path portion of the URL entirely.
query: A list of query keys to remove from the query, if they
exist, or True to remove the query portion of the URL
entirely.
query_params: A list of query keys to remove from the query,
if they exist.
port: If True, remove the port from the network location
string, if it exists.
fragment: If True, remove the fragment portion of the URL
entirely.
fragment_path: A list of path segments to remove from the end
of the fragment's path segments or a path string to remove
from the end of the fragment's path string.
fragment_args: A list of query keys to remove from the
fragment's query, if they exist.
username: If True, remove the username, if it exists.
password: If True, remove the password, if it exists.
Returns: <self>.
"""
if scheme is True:
self.scheme = None
if username is True:
self.username = None
if password is True:
self.password = None
if host is True:
self.host = None
if port is True:
self.port = None
if netloc is True:
self.netloc = None
if origin is True:
self.origin = None
if path is not _absent:
self.path.remove(path)
if args is not _absent:
self.query.remove(args)
if query is not _absent:
self.query.remove(query)
if query_params is not _absent:
self.query.remove(query_params)
if fragment is not _absent:
self.fragment.remove(fragment)
if fragment_path is not _absent:
self.fragment.path.remove(fragment_path)
if fragment_args is not _absent:
self.fragment.query.remove(fragment_args)
return self
def tostr(self, query_delimiter='&', query_quote_plus=True,
query_dont_quote=''):
encoded_query = self.query.encode(
query_delimiter, query_quote_plus, query_dont_quote)
url = urllib.parse.urlunsplit((
self.scheme or '', # Must be text type in Python 3.
self.netloc,
str(self.path),
encoded_query,
str(self.fragment),
))
# Differentiate between '' and None values for scheme and netloc.
if self.scheme == '':
url = ':' + url
if self.netloc == '':
if self.scheme is None:
url = '//' + url
elif strip_scheme(url) == '':
url = url + '//'
return str(url)
def join(self, *urls):
for url in urls:
if not isinstance(url, six.string_types):
url = str(url)
newurl = urljoin(self.url, url)
self.load(newurl)
return self
def copy(self):
return self.__class__(self)
def asdict(self):
return {
'url': self.url,
'scheme': self.scheme,
'username': self.username,
'password': self.password,
'host': self.host,
'host_encoded': idna_encode(self.host),
'port': self.port,
'netloc': self.netloc,
'origin': self.origin,
'path': self.path.asdict(),
'query': self.query.asdict(),
'fragment': self.fragment.asdict(),
}
def __truediv__(self, path):
return self.copy().add(path=path)
def __eq__(self, other):
try:
return self.url == other.url
except AttributeError:
return None
def __ne__(self, other):
return not self == other
def __setattr__(self, attr, value):
if (not PathCompositionInterface.__setattr__(self, attr, value) and
not QueryCompositionInterface.__setattr__(self, attr, value) and
not FragmentCompositionInterface.__setattr__(self, attr, value)):
object.__setattr__(self, attr, value)
def __unicode__(self):
return self.tostr()
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, str(self))
| 33.662447 | 79 | 0.590468 |
ace43bdb58ea64ce6b5c3d7f481769073eb17008 | 9,275 | py | Python | accelbyte_py_sdk/api/ugc/operations/public_group/update_group.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/ugc/operations/public_group/update_group.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | 1 | 2021-10-13T03:46:58.000Z | 2021-10-13T03:46:58.000Z | accelbyte_py_sdk/api/ugc/operations/public_group/update_group.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# justice-ugc-service (2.1.0)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import ModelsCreateGroupRequest
from ...models import ModelsCreateGroupResponse
from ...models import ResponseError
class UpdateGroup(Operation):
"""Update group (UpdateGroup)
Required permission NAMESPACE:{namespace}:USER:{userId}:CONTENTGROUP [UPDATE]
replace group name and contents with new ones
Required Permission(s):
- NAMESPACE:{namespace}:USER:{userId}:CONTENTGROUP [UPDATE]
Properties:
url: /ugc/v1/public/namespaces/{namespace}/users/{userId}/groups/{groupId}
method: PUT
tags: ["Public Group"]
consumes: ["application/json", "application/octet-stream"]
produces: ["application/json"]
securities: [BEARER_AUTH]
body: (body) REQUIRED ModelsCreateGroupRequest in body
group_id: (groupId) REQUIRED str in path
namespace: (namespace) REQUIRED str in path
user_id: (userId) REQUIRED str in path
Responses:
200: OK - ModelsCreateGroupResponse (OK)
400: Bad Request - ResponseError (Bad Request)
401: Unauthorized - ResponseError (Unauthorized)
404: Not Found - ResponseError (Not Found)
500: Internal Server Error - ResponseError (Internal Server Error)
"""
# region fields
_url: str = "/ugc/v1/public/namespaces/{namespace}/users/{userId}/groups/{groupId}"
_method: str = "PUT"
_consumes: List[str] = ["application/json", "application/octet-stream"]
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"]]
_location_query: str = None
body: ModelsCreateGroupRequest # REQUIRED in [body]
group_id: str # REQUIRED in [path]
namespace: str # REQUIRED in [path]
user_id: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
}
def get_body_params(self) -> Any:
if not hasattr(self, "body") or self.body is None:
return None
return self.body.to_dict()
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "group_id"):
result["groupId"] = self.group_id
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
if hasattr(self, "user_id"):
result["userId"] = self.user_id
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_body(self, value: ModelsCreateGroupRequest) -> UpdateGroup:
self.body = value
return self
def with_group_id(self, value: str) -> UpdateGroup:
self.group_id = value
return self
def with_namespace(self, value: str) -> UpdateGroup:
self.namespace = value
return self
def with_user_id(self, value: str) -> UpdateGroup:
self.user_id = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = ModelsCreateGroupRequest()
if hasattr(self, "group_id") and self.group_id:
result["groupId"] = str(self.group_id)
elif include_empty:
result["groupId"] = ""
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
if hasattr(self, "user_id") and self.user_id:
result["userId"] = str(self.user_id)
elif include_empty:
result["userId"] = ""
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, ModelsCreateGroupResponse], Union[None, HttpResponse, ResponseError]]:
"""Parse the given response.
200: OK - ModelsCreateGroupResponse (OK)
400: Bad Request - ResponseError (Bad Request)
401: Unauthorized - ResponseError (Unauthorized)
404: Not Found - ResponseError (Not Found)
500: Internal Server Error - ResponseError (Internal Server Error)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(code=code, content_type=content_type, content=content)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 200:
return ModelsCreateGroupResponse.create_from_dict(content), None
if code == 400:
return None, ResponseError.create_from_dict(content)
if code == 401:
return None, ResponseError.create_from_dict(content)
if code == 404:
return None, ResponseError.create_from_dict(content)
if code == 500:
return None, ResponseError.create_from_dict(content)
return None, self.handle_undocumented_response(code=code, content_type=content_type, content=content)
# endregion response methods
# region static methods
@classmethod
def create(
cls,
body: ModelsCreateGroupRequest,
group_id: str,
namespace: str,
user_id: str,
) -> UpdateGroup:
instance = cls()
instance.body = body
instance.group_id = group_id
instance.namespace = namespace
instance.user_id = user_id
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> UpdateGroup:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = ModelsCreateGroupRequest.create_from_dict(dict_["body"], include_empty=include_empty)
elif include_empty:
instance.body = ModelsCreateGroupRequest()
if "groupId" in dict_ and dict_["groupId"] is not None:
instance.group_id = str(dict_["groupId"])
elif include_empty:
instance.group_id = ""
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
if "userId" in dict_ and dict_["userId"] is not None:
instance.user_id = str(dict_["userId"])
elif include_empty:
instance.user_id = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"groupId": "group_id",
"namespace": "namespace",
"userId": "user_id",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"body": True,
"groupId": True,
"namespace": True,
"userId": True,
}
# endregion static methods
| 31.020067 | 164 | 0.616819 |
ace43c88a8d1b6673f31376f17aed1731a60b6f7 | 10,827 | py | Python | cinder/openstack/common/processutils.py | adelina-t/cinder | 238522098eabe6dafac3dc3cafd50e57d0339479 | [
"Apache-2.0"
] | null | null | null | cinder/openstack/common/processutils.py | adelina-t/cinder | 238522098eabe6dafac3dc3cafd50e57d0339479 | [
"Apache-2.0"
] | null | null | null | cinder/openstack/common/processutils.py | adelina-t/cinder | 238522098eabe6dafac3dc3cafd50e57d0339479 | [
"Apache-2.0"
] | null | null | null | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import errno
import logging as stdlib_logging
import multiprocessing
import os
import random
import shlex
import signal
from eventlet.green import subprocess
from eventlet import greenthread
import six
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class InvalidArgumentError(Exception):
def __init__(self, message=None):
super(InvalidArgumentError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = _("Unexpected error while running command.")
if exit_code is None:
exit_code = '-'
message = _('%(description)s\n'
'Command: %(cmd)s\n'
'Exit code: %(exit_code)s\n'
'Stdout: %(stdout)r\n'
'Stderr: %(stderr)r') % {'description': description,
'cmd': cmd,
'exit_code': exit_code,
'stdout': stdout,
'stderr': stderr}
super(ProcessExecutionError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type process_input: string
:param env_variables: Environment variables and their values that
will be set for the process.
:type env_variables: dict
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be stdlib_logging.DEBUG or
stdlib_logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
env_variables = kwargs.pop('env_variables', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=_('Command requested root, but did not '
'specify a root helper.'))
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.log(loglevel, 'Running cmd (subprocess): %s',
logging.mask_password(' '.join(cmd)))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell,
env=env_variables)
result = None
for _i in six.moves.range(20):
# NOTE(russellb) 20 is an arbitrary number of retries to
# prevent any chance of looping forever here.
try:
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
except OSError as e:
if e.errno in (errno.EAGAIN, errno.EINTR):
continue
raise
break
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
LOG.log(loglevel, 'Result was %s' % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.log(loglevel, '%r failed. Retrying.', cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError as exn:
out, err = '', six.text_type(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug('Running cmd (SSH): %s', cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise InvalidArgumentError(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return (stdout, stderr)
def get_worker_count():
"""Utility to get the default worker count.
@return: The number of CPUs if that can be determined, else a default
worker count of 1 is returned.
"""
try:
return multiprocessing.cpu_count()
except NotImplementedError:
return 1
| 37.856643 | 78 | 0.578738 |
ace43ed1ce16241dd7bb52c66bdc2a0f44be2da7 | 36 | py | Python | webnotifier/__init__.py | polmp/webnotifier | 92bdf4e7a1744e78a1eeff2c2cd9cc6b93052f2e | [
"MIT"
] | null | null | null | webnotifier/__init__.py | polmp/webnotifier | 92bdf4e7a1744e78a1eeff2c2cd9cc6b93052f2e | [
"MIT"
] | null | null | null | webnotifier/__init__.py | polmp/webnotifier | 92bdf4e7a1744e78a1eeff2c2cd9cc6b93052f2e | [
"MIT"
] | null | null | null | from .webnotifier import WebNotifier | 36 | 36 | 0.888889 |
ace43f4c115c9ff8651b3ba3d1d9c2b22846e92d | 933 | py | Python | rally_openstack/exceptions.py | RSE-Cambridge/rally-openstack | 32bbc091bbce1db625a2fc22da28b32718befa13 | [
"Apache-2.0"
] | null | null | null | rally_openstack/exceptions.py | RSE-Cambridge/rally-openstack | 32bbc091bbce1db625a2fc22da28b32718befa13 | [
"Apache-2.0"
] | null | null | null | rally_openstack/exceptions.py | RSE-Cambridge/rally-openstack | 32bbc091bbce1db625a2fc22da28b32718befa13 | [
"Apache-2.0"
] | 1 | 2021-08-10T03:11:51.000Z | 2021-08-10T03:11:51.000Z | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally import exceptions as rally_exceptions
RallyException = rally_exceptions.RallyException
class AuthenticationFailed(rally_exceptions.InvalidArgumentsException):
error_code = 220
msg_fmt = ("Failed to authenticate to %(url)s for user '%(username)s'"
" in project '%(project)s': %(etype)s: %(error)s")
| 38.875 | 78 | 0.725616 |
ace43fef27635484633e6257ae378a35ce4bcb16 | 91,862 | py | Python | packages/python/plotly/plotly/graph_objs/heatmap/__init__.py | lucasiscovici/plotly.py | 72d30946fbcb238a0b88cff4d087072a781c903a | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/graph_objs/heatmap/__init__.py | lucasiscovici/plotly.py | 72d30946fbcb238a0b88cff4d087072a781c903a | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/graph_objs/heatmap/__init__.py | lucasiscovici/plotly.py | 72d30946fbcb238a0b88cff4d087072a781c903a | [
"MIT"
] | null | null | null | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://plot.ly/settings for more details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "heatmap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.heatmap.Stream
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.heatmap.Stream
constructor must be a dict or
an instance of plotly.graph_objs.heatmap.Stream"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.heatmap import stream as v_stream
# Initialize validators
# ---------------------
self._validators["maxpoints"] = v_stream.MaxpointsValidator()
self._validators["token"] = v_stream.TokenValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
self["maxpoints"] = maxpoints if maxpoints is not None else _v
_v = arg.pop("token", None)
self["token"] = token if token is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on plot.ly for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on plot.ly for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on plot.ly for bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.heatmap.hoverlabel.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly.graph_objs.heatmap.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on plot.ly for namelength .
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "heatmap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.heatmap.Hoverlabel
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.heatmap.Hoverlabel
constructor must be a dict or
an instance of plotly.graph_objs.heatmap.Hoverlabel"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.heatmap import hoverlabel as v_hoverlabel
# Initialize validators
# ---------------------
self._validators["align"] = v_hoverlabel.AlignValidator()
self._validators["alignsrc"] = v_hoverlabel.AlignsrcValidator()
self._validators["bgcolor"] = v_hoverlabel.BgcolorValidator()
self._validators["bgcolorsrc"] = v_hoverlabel.BgcolorsrcValidator()
self._validators["bordercolor"] = v_hoverlabel.BordercolorValidator()
self._validators["bordercolorsrc"] = v_hoverlabel.BordercolorsrcValidator()
self._validators["font"] = v_hoverlabel.FontValidator()
self._validators["namelength"] = v_hoverlabel.NamelengthValidator()
self._validators["namelengthsrc"] = v_hoverlabel.NamelengthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
self["align"] = align if align is not None else _v
_v = arg.pop("alignsrc", None)
self["alignsrc"] = alignsrc if alignsrc is not None else _v
_v = arg.pop("bgcolor", None)
self["bgcolor"] = bgcolor if bgcolor is not None else _v
_v = arg.pop("bgcolorsrc", None)
self["bgcolorsrc"] = bgcolorsrc if bgcolorsrc is not None else _v
_v = arg.pop("bordercolor", None)
self["bordercolor"] = bordercolor if bordercolor is not None else _v
_v = arg.pop("bordercolorsrc", None)
self["bordercolorsrc"] = bordercolorsrc if bordercolorsrc is not None else _v
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("namelength", None)
self["namelength"] = namelength if namelength is not None else _v
_v = arg.pop("namelengthsrc", None)
self["namelengthsrc"] = namelengthsrc if namelengthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of plotly.graph_objs.heatmap.colorbar.Tickfont
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.heatmap.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add one item
to d3's date formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.heatmap.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.heatmap.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as
layout.template.data.heatmap.colorbar.tickformatstopdefaults),
sets the default property values to use for elements of
heatmap.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of plotly.graph_objs.heatmap.colorbar.Tickformatstop
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.heatmap.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on plot.ly for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on plot.ly for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of plotly.graph_objs.heatmap.colorbar.Title
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.heatmap.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use heatmap.colorbar.title.font instead.
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.heatmap.colorbar.title.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use heatmap.colorbar.title.side instead.
Determines the location of color bar's title with respect to
the color bar. Note that the title's location used to be set by
the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@tick.setter
def yanchor(self, val):
self["yanchor"] = val
# yanchor
# -------
@property
def ticktextside(self):
"""
Sets the side of ticktext
The 'ticktextside' property is an enumeration that may be specified as:
- One of the following enumeration values:
['lefr', 'right']
Returns
-------
Any
"""
return self["ticktextside"]
@ticktextside.setter
def ticktextside(self, val):
self["ticktextside"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "heatmap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of
plotly.graph_objects.heatmap.colorbar.Tickformatstop
instances or dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.heatma
p.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
heatmap.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on plot.ly for ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for tickvals .
tickwidth
Sets the tick width (in px).
title
plotly.graph_objects.heatmap.colorbar.Title instance or
dict with compatible properties
titlefont
Deprecated: Please use heatmap.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use heatmap.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
ticktextside=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.heatmap.ColorBar
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of
plotly.graph_objects.heatmap.colorbar.Tickformatstop
instances or dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.heatma
p.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
heatmap.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on plot.ly for ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for tickvals .
tickwidth
Sets the tick width (in px).
title
plotly.graph_objects.heatmap.colorbar.Title instance or
dict with compatible properties
titlefont
Deprecated: Please use heatmap.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use heatmap.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.heatmap.ColorBar
constructor must be a dict or
an instance of plotly.graph_objs.heatmap.ColorBar"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.heatmap import colorbar as v_colorbar
# Initialize validators
# ---------------------
self._validators["bgcolor"] = v_colorbar.BgcolorValidator()
self._validators["bordercolor"] = v_colorbar.BordercolorValidator()
self._validators["borderwidth"] = v_colorbar.BorderwidthValidator()
self._validators["dtick"] = v_colorbar.DtickValidator()
self._validators["exponentformat"] = v_colorbar.ExponentformatValidator()
self._validators["len"] = v_colorbar.LenValidator()
self._validators["lenmode"] = v_colorbar.LenmodeValidator()
self._validators["nticks"] = v_colorbar.NticksValidator()
self._validators["outlinecolor"] = v_colorbar.OutlinecolorValidator()
self._validators["outlinewidth"] = v_colorbar.OutlinewidthValidator()
self._validators["separatethousands"] = v_colorbar.SeparatethousandsValidator()
self._validators["showexponent"] = v_colorbar.ShowexponentValidator()
self._validators["showticklabels"] = v_colorbar.ShowticklabelsValidator()
self._validators["showtickprefix"] = v_colorbar.ShowtickprefixValidator()
self._validators["showticksuffix"] = v_colorbar.ShowticksuffixValidator()
self._validators["thickness"] = v_colorbar.ThicknessValidator()
self._validators["thicknessmode"] = v_colorbar.ThicknessmodeValidator()
self._validators["tick0"] = v_colorbar.Tick0Validator()
self._validators["tickangle"] = v_colorbar.TickangleValidator()
self._validators["tickcolor"] = v_colorbar.TickcolorValidator()
self._validators["tickfont"] = v_colorbar.TickfontValidator()
self._validators["tickformat"] = v_colorbar.TickformatValidator()
self._validators["tickformatstops"] = v_colorbar.TickformatstopsValidator()
self._validators[
"tickformatstopdefaults"
] = v_colorbar.TickformatstopValidator()
self._validators["ticklen"] = v_colorbar.TicklenValidator()
self._validators["tickmode"] = v_colorbar.TickmodeValidator()
self._validators["tickprefix"] = v_colorbar.TickprefixValidator()
self._validators["ticks"] = v_colorbar.TicksValidator()
self._validators["ticksuffix"] = v_colorbar.TicksuffixValidator()
self._validators["ticktext"] = v_colorbar.TicktextValidator()
self._validators["ticktextsrc"] = v_colorbar.TicktextsrcValidator()
self._validators["tickvals"] = v_colorbar.TickvalsValidator()
self._validators["tickvalssrc"] = v_colorbar.TickvalssrcValidator()
self._validators["tickwidth"] = v_colorbar.TickwidthValidator()
self._validators["title"] = v_colorbar.TitleValidator()
self._validators["x"] = v_colorbar.XValidator()
self._validators["xanchor"] = v_colorbar.XanchorValidator()
self._validators["xpad"] = v_colorbar.XpadValidator()
self._validators["y"] = v_colorbar.YValidator()
self._validators["yanchor"] = v_colorbar.YanchorValidator()
self._validators["ypad"] = v_colorbar.YpadValidator()
self._validators["ticktextside"]=v_colorbar.TickTextSideValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
self["bgcolor"] = bgcolor if bgcolor is not None else _v
_v = arg.pop("bordercolor", None)
self["bordercolor"] = bordercolor if bordercolor is not None else _v
_v = arg.pop("borderwidth", None)
self["borderwidth"] = borderwidth if borderwidth is not None else _v
_v = arg.pop("dtick", None)
self["dtick"] = dtick if dtick is not None else _v
_v = arg.pop("exponentformat", None)
self["exponentformat"] = exponentformat if exponentformat is not None else _v
_v = arg.pop("len", None)
self["len"] = len if len is not None else _v
_v = arg.pop("lenmode", None)
self["lenmode"] = lenmode if lenmode is not None else _v
_v = arg.pop("nticks", None)
self["nticks"] = nticks if nticks is not None else _v
_v = arg.pop("outlinecolor", None)
self["outlinecolor"] = outlinecolor if outlinecolor is not None else _v
_v = arg.pop("outlinewidth", None)
self["outlinewidth"] = outlinewidth if outlinewidth is not None else _v
_v = arg.pop("separatethousands", None)
self["separatethousands"] = (
separatethousands if separatethousands is not None else _v
)
_v = arg.pop("showexponent", None)
self["showexponent"] = showexponent if showexponent is not None else _v
_v = arg.pop("showticklabels", None)
self["showticklabels"] = showticklabels if showticklabels is not None else _v
_v = arg.pop("showtickprefix", None)
self["showtickprefix"] = showtickprefix if showtickprefix is not None else _v
_v = arg.pop("showticksuffix", None)
self["showticksuffix"] = showticksuffix if showticksuffix is not None else _v
_v = arg.pop("thickness", None)
self["thickness"] = thickness if thickness is not None else _v
_v = arg.pop("thicknessmode", None)
self["thicknessmode"] = thicknessmode if thicknessmode is not None else _v
_v = arg.pop("tick0", None)
self["tick0"] = tick0 if tick0 is not None else _v
_v = arg.pop("tickangle", None)
self["tickangle"] = tickangle if tickangle is not None else _v
_v = arg.pop("tickcolor", None)
self["tickcolor"] = tickcolor if tickcolor is not None else _v
_v = arg.pop("tickfont", None)
self["tickfont"] = tickfont if tickfont is not None else _v
_v = arg.pop("tickformat", None)
self["tickformat"] = tickformat if tickformat is not None else _v
_v = arg.pop("tickformatstops", None)
self["tickformatstops"] = tickformatstops if tickformatstops is not None else _v
_v = arg.pop("tickformatstopdefaults", None)
self["tickformatstopdefaults"] = (
tickformatstopdefaults if tickformatstopdefaults is not None else _v
)
_v = arg.pop("ticklen", None)
self["ticklen"] = ticklen if ticklen is not None else _v
_v = arg.pop("tickmode", None)
self["tickmode"] = tickmode if tickmode is not None else _v
_v = arg.pop("tickprefix", None)
self["tickprefix"] = tickprefix if tickprefix is not None else _v
_v = arg.pop("ticks", None)
self["ticks"] = ticks if ticks is not None else _v
_v = arg.pop("ticksuffix", None)
self["ticksuffix"] = ticksuffix if ticksuffix is not None else _v
_v = arg.pop("ticktext", None)
self["ticktext"] = ticktext if ticktext is not None else _v
_v = arg.pop("ticktextsrc", None)
self["ticktextsrc"] = ticktextsrc if ticktextsrc is not None else _v
_v = arg.pop("tickvals", None)
self["tickvals"] = tickvals if tickvals is not None else _v
_v = arg.pop("tickvalssrc", None)
self["tickvalssrc"] = tickvalssrc if tickvalssrc is not None else _v
_v = arg.pop("tickwidth", None)
self["tickwidth"] = tickwidth if tickwidth is not None else _v
_v = arg.pop("title", None)
self["title"] = title if title is not None else _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
self["x"] = x if x is not None else _v
_v = arg.pop("xanchor", None)
self["xanchor"] = xanchor if xanchor is not None else _v
_v = arg.pop("xpad", None)
self["xpad"] = xpad if xpad is not None else _v
_v = arg.pop("y", None)
self["y"] = y if y is not None else _v
_v = arg.pop("yanchor", None)
self["yanchor"] = yanchor if yanchor is not None else _v
_v = arg.pop("ypad", None)
self["ypad"] = ypad if ypad is not None else _v
_v = arg.pop("ticktextside", None)
self["ticktextside"] = ticktextside if ticktextside is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["ColorBar", "Hoverlabel", "Stream", "colorbar", "hoverlabel"]
from plotly.graph_objs.heatmap import hoverlabel
from plotly.graph_objs.heatmap import colorbar
| 36.686102 | 93 | 0.569659 |
ace44117dba823be6f53fb8c928e1b912d9d1d16 | 14,469 | py | Python | google/appengine/ext/datastore_admin/main.py | airamrguez/appengine-1.8.0-golang-1.4.3-os-x-64bit | c2034c8ed4d218e1f85831df9bc986285ebf8661 | [
"Apache-2.0"
] | 1 | 2015-01-04T16:58:48.000Z | 2015-01-04T16:58:48.000Z | google/appengine/ext/datastore_admin/main.py | airamrguez/appengine-1.8.0-golang-1.4.3-os-x-64bit | c2034c8ed4d218e1f85831df9bc986285ebf8661 | [
"Apache-2.0"
] | null | null | null | google/appengine/ext/datastore_admin/main.py | airamrguez/appengine-1.8.0-golang-1.4.3-os-x-64bit | c2034c8ed4d218e1f85831df9bc986285ebf8661 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Main module for datastore admin receiver.
To use, add this to app.yaml:
builtins:
- datastore_admin: on
"""
import logging
import operator
import os
import time
from google.appengine.api import app_identity
from google.appengine.api import datastore_errors
from google.appengine.api import users
from google.appengine.ext import deferred
from google.appengine.ext import webapp
from google.appengine.ext.datastore_admin import backup_handler
from google.appengine.ext.datastore_admin import config
from google.appengine.ext.datastore_admin import copy_handler
from google.appengine.ext.datastore_admin import delete_handler
from google.appengine.ext.datastore_admin import utils
from google.appengine.ext.db import stats
from google.appengine.ext.db import metadata
from google.appengine.ext.webapp import util
ENTITY_ACTIONS = {
'Copy to Another App': copy_handler.ConfirmCopyHandler.Render,
'Delete Entities': delete_handler.ConfirmDeleteHandler.Render,
'Backup Entities': backup_handler.ConfirmBackupHandler.Render,
}
BACKUP_ACTIONS = {
'Delete': backup_handler.ConfirmDeleteBackupHandler.Render,
'Restore': backup_handler.ConfirmRestoreFromBackupHandler.Render,
'Info': backup_handler.BackupInformationHandler.Render,
}
PENDING_BACKUP_ACTIONS = {
'Abort': backup_handler.ConfirmAbortBackupHandler.Render,
'Info': backup_handler.BackupInformationHandler.Render,
}
GET_ACTIONS = ENTITY_ACTIONS.copy()
GET_ACTIONS.update(BACKUP_ACTIONS)
GET_ACTIONS.update(PENDING_BACKUP_ACTIONS)
GET_ACTIONS.update({'Import Backup Information':
backup_handler.ConfirmBackupImportHandler.Render})
MAX_RPCS = 10
def _GetDatastoreStats(kinds_list, use_stats_kinds=False):
"""Retrieves stats for kinds.
Args:
kinds_list: List of known kinds.
use_stats_kinds: If stats are available, kinds_list will be ignored and
all kinds found in stats will be used instead.
Returns:
timestamp: records time that statistics were last updated.
kind_dict: dictionary of kind objects with the following members:
- kind_name: the name of this kind.
- count: number of known entities of this type.
- total_bytes_str: total bytes for this kind as a string.
- average_bytes_str: average bytes per entity as a string.
"""
global_stat = stats.GlobalStat.all().fetch(1)
if not global_stat:
return _KindsListToTuple(kinds_list)
global_ts = global_stat[0].timestamp
kind_stats = stats.KindStat.all().filter('timestamp =', global_ts).fetch(1000)
if not kind_stats:
return _KindsListToTuple(kinds_list)
results = {}
for kind_ent in kind_stats:
if (not kind_ent.kind_name.startswith('__')
and (use_stats_kinds or kind_ent.kind_name in kinds_list)
and kind_ent.count > 0):
results[kind_ent.kind_name] = _PresentatableKindStats(kind_ent)
utils.CacheStats(results.values())
for kind_str in kinds_list or []:
if kind_str not in results:
results[kind_str] = {'kind_name': kind_str}
return (global_ts,
sorted(results.values(), key=lambda x: x['kind_name']))
def _KindsListToTuple(kinds_list):
"""Build default tuple when no datastore statistics are available. """
return '', [{'kind_name': kind} for kind in sorted(kinds_list)]
def _PresentatableKindStats(kind_ent):
"""Generate dict of presentable values for template."""
count = kind_ent.count
entity_bytes = kind_ent.entity_bytes
total_bytes = kind_ent.bytes
average_bytes = entity_bytes / count
return {'kind_name': kind_ent.kind_name,
'count': utils.FormatThousands(kind_ent.count),
'entity_bytes_str': utils.GetPrettyBytes(entity_bytes),
'entity_bytes': entity_bytes,
'total_bytes_str': utils.GetPrettyBytes(total_bytes),
'total_bytes': total_bytes,
'average_bytes_str': utils.GetPrettyBytes(average_bytes),
}
class RouteByActionHandler(webapp.RequestHandler):
"""Route to the appropriate handler based on the action parameter."""
def ListActions(self, error=None):
"""Handler for get requests to datastore_admin/confirm_delete."""
use_stats_kinds = False
kinds = []
more_kinds = False
try:
kinds, more_kinds = self.GetKinds()
if not kinds:
use_stats_kinds = True
logging.warning('Found no kinds. Using datastore stats instead.')
except datastore_errors.Error, e:
logging.exception(e)
use_stats_kinds = True
last_stats_update, kind_stats = _GetDatastoreStats(
kinds, use_stats_kinds=use_stats_kinds)
template_params = {
'run_as_a_service': self.request.get('run_as_a_service'),
'datastore_admin_home': utils.GenerateHomeUrl(None),
'offer_service': (self.request.get('service') and not
self.request.get('run_as_a_service')),
'kind_stats': kind_stats,
'more_kinds': more_kinds,
'last_stats_update': last_stats_update,
'app_id': self.request.get('app_id'),
'hosting_app_id': app_identity.get_application_id(),
'has_namespace': self.request.get('namespace', None) is not None,
'namespace': self.request.get('namespace'),
'action_list': sorted(ENTITY_ACTIONS.keys()),
'backup_action_list': sorted(BACKUP_ACTIONS.keys()),
'pending_backup_action_list': sorted(PENDING_BACKUP_ACTIONS.keys()),
'error': error,
'completed_operations': self.GetOperations(active=False),
'active_operations': self.GetOperations(active=True),
'pending_backups': self.GetPendingBackups(),
'backups': self.GetBackups(),
'map_reduce_path': config.MAPREDUCE_PATH + '/detail'
}
utils.RenderToResponse(self, 'list_actions.html', template_params)
def RouteAction(self, action_dict):
action = self.request.get('action')
if not action:
self.ListActions(error=self.request.get('error', None))
elif action not in action_dict:
error = '%s is not a valid action.' % action
self.ListActions(error=error)
else:
action_dict[action](self)
def get(self):
self.RouteAction(GET_ACTIONS)
def post(self):
self.RouteAction(GET_ACTIONS)
def GetKinds(self, all_ns=True, deadline=40):
"""Obtain a list of all kind names from the datastore.
Args:
all_ns: If true, list kind names for all namespaces.
If false, list kind names only for the current namespace.
deadline: maximum number of seconds to spend getting kinds.
Returns:
kinds: an alphabetized list of kinds for the specified namespace(s).
more_kinds: a boolean indicating whether there may be additional kinds
not included in 'kinds' (e.g. because the query deadline was reached).
"""
if all_ns:
kinds, more_kinds = self.GetKindsForAllNamespaces(deadline)
else:
kinds, more_kinds = self.GetKindsForCurrentNamespace(deadline)
return kinds, more_kinds
def GetKindsForAllNamespaces(self, deadline):
"""Obtain a list of all kind names from the datastore.
Pulls kinds from all namespaces. The result is deduped and alphabetized.
Args:
deadline: maximum number of seconds to spend getting kinds.
Returns:
kinds: an alphabetized list of kinds for the specified namespace(s).
more_kinds: a boolean indicating whether there may be additional kinds
not included in 'kinds' (e.g. because the query deadline was reached).
"""
start = time.time()
kind_name_set = set()
def ReadFromKindIters(kind_iter_list):
"""Read kinds from a list of iterators.
Reads a kind from each iterator in kind_iter_list, adds it to
kind_name_set, and removes any completed iterators.
Args:
kind_iter_list: a list of iterators of kinds.
"""
completed = []
for kind_iter in kind_iter_list:
try:
kind_name = kind_iter.next().kind_name
if utils.IsKindNameVisible(kind_name):
kind_name_set.add(kind_name)
except StopIteration:
completed.append(kind_iter)
for kind_iter in completed:
kind_iter_list.remove(kind_iter)
more_kinds = False
try:
namespace_iter = metadata.Namespace.all().run(batch_size=1000,
deadline=deadline)
kind_iter_list = []
for ns in namespace_iter:
remaining = deadline - (time.time() - start)
if remaining <= 0:
raise datastore_errors.Timeout
kind_iter_list.append(metadata.Kind.all(namespace=ns.namespace_name)
.run(batch_size=1000, deadline=remaining))
while len(kind_iter_list) == MAX_RPCS:
ReadFromKindIters(kind_iter_list)
while kind_iter_list:
ReadFromKindIters(kind_iter_list)
except datastore_errors.Timeout:
more_kinds = True
logging.warning('Failed to retrieve all kinds within deadline.')
return sorted(kind_name_set), more_kinds
def GetKindsForCurrentNamespace(self, deadline):
"""Obtain a list of all kind names from the datastore.
Pulls kinds from the current namespace only. The result is alphabetized.
Args:
deadline: maximum number of seconds to spend getting kinds.
Returns:
kinds: an alphabetized list of kinds for the specified namespace(s).
more_kinds: a boolean indicating whether there may be additional kinds
not included in 'kinds' (e.g. because the query limit was reached).
"""
more_kinds = False
kind_names = []
try:
kinds = metadata.Kind.all().order('__key__').run(batch_size=1000,
deadline=deadline)
for kind in kinds:
kind_name = kind.kind_name
if utils.IsKindNameVisible(kind_name):
kind_names.append(kind_name)
except datastore_errors.Timeout:
more_kinds = True
logging.warning('Failed to retrieve all kinds within deadline.')
return kind_names, more_kinds
def GetOperations(self, active=False, limit=100):
"""Obtain a list of operation, ordered by last_updated."""
query = utils.DatastoreAdminOperation.all()
if active:
query.filter('status = ', utils.DatastoreAdminOperation.STATUS_ACTIVE)
else:
query.filter('status IN ', [
utils.DatastoreAdminOperation.STATUS_COMPLETED,
utils.DatastoreAdminOperation.STATUS_FAILED,
utils.DatastoreAdminOperation.STATUS_ABORTED])
operations = query.fetch(max(10000, limit) if limit else 1000)
operations = sorted(operations, key=operator.attrgetter('last_updated'),
reverse=True)
return operations[:limit]
def GetBackups(self, limit=100):
"""Obtain a list of backups."""
query = backup_handler.BackupInformation.all()
query.filter('complete_time > ', 0)
backups = query.fetch(max(10000, limit) if limit else 1000)
backups = sorted(backups, key=operator.attrgetter('complete_time'),
reverse=True)
return backups[:limit]
def GetPendingBackups(self, limit=100):
"""Obtain a list of pending backups."""
query = backup_handler.BackupInformation.all()
query.filter('complete_time = ', None)
backups = query.fetch(max(10000, limit) if limit else 1000)
backups = sorted(backups, key=operator.attrgetter('start_time'),
reverse=True)
return backups[:limit]
class StaticResourceHandler(webapp.RequestHandler):
"""Read static files from disk."""
_BASE_FILE_PATH = os.path.dirname(__file__)
_RESOURCE_MAP = {
'static/js/compiled.js': 'text/javascript',
'static/css/compiled.css': 'text/css',
'static/img/help.gif': 'image/gif',
'static/img/tip.png': 'image/png',
'static/img/icn/icn-warning.gif': 'image/gif',
}
def get(self):
relative_path = self.request.path.split(config.BASE_PATH + '/')[1]
if relative_path not in self._RESOURCE_MAP:
self.response.set_status(404)
self.response.out.write('Resource not found.')
return
path = os.path.join(self._BASE_FILE_PATH, relative_path)
self.response.headers['Cache-Control'] = 'public; max-age=300'
self.response.headers['Content-Type'] = self._RESOURCE_MAP[relative_path]
if relative_path == 'static/css/compiled.css':
self.response.out.write(
open(path).read().replace('url(/img/', 'url(../img/'))
else:
self.response.out.write(open(path).read())
class LoginRequiredHandler(webapp.RequestHandler):
"""Handle federated login identity selector page."""
def get(self):
target = self.request.get('continue')
if not target:
self.error(400)
return
login_url = users.create_login_url(target)
self.redirect(login_url)
def CreateApplication():
"""Create new WSGIApplication and register all handlers.
Returns:
an instance of webapp.WSGIApplication with all mapreduce handlers
registered.
"""
return webapp.WSGIApplication(
backup_handler.handlers_list(config.BASE_PATH) +
copy_handler.handlers_list(config.BASE_PATH) +
[(r'%s/%s' % (config.BASE_PATH,
delete_handler.ConfirmDeleteHandler.SUFFIX),
delete_handler.ConfirmDeleteHandler),
(r'%s/%s' % (config.BASE_PATH, delete_handler.DoDeleteHandler.SUFFIX),
delete_handler.DoDeleteHandler),
(r'%s/%s' % (config.BASE_PATH, utils.MapreduceDoneHandler.SUFFIX),
utils.MapreduceDoneHandler),
(config.DEFERRED_PATH, deferred.TaskHandler),
(r'%s/static.*' % config.BASE_PATH, StaticResourceHandler),
(r'/_ah/login_required', LoginRequiredHandler),
(r'.*', RouteByActionHandler)])
APP = CreateApplication()
def main():
util.run_wsgi_app(APP)
if __name__ == '__main__':
main()
| 33.648837 | 80 | 0.695971 |
ace4412217ca37aab0fd71c71c345b3b3f6c76ef | 608 | py | Python | binary_search_tree/search.py | sumitsk/leetcode | bb3527b08ca794dea2c9d071efc24b4276bd1c05 | [
"MIT"
] | null | null | null | binary_search_tree/search.py | sumitsk/leetcode | bb3527b08ca794dea2c9d071efc24b4276bd1c05 | [
"MIT"
] | null | null | null | binary_search_tree/search.py | sumitsk/leetcode | bb3527b08ca794dea2c9d071efc24b4276bd1c05 | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def searchBST(self, root, val):
"""
:type root: TreeNode
:type val: int
:rtype: TreeNode
"""
node = root
while node is not None:
if node.val == val:
return node
elif node.val < val:
node = node.right
else:
node = node.left
return None
| 24.32 | 36 | 0.457237 |
ace4418f20497968097b7146c35900b097a08677 | 46,078 | py | Python | basenji/seqnn.py | JasperSnoek/basenji | 24215ad91920a37d75b139fd5ffac6005b84b4fd | [
"Apache-2.0"
] | 1 | 2021-05-12T08:51:44.000Z | 2021-05-12T08:51:44.000Z | basenji/seqnn.py | JasperSnoek/basenji | 24215ad91920a37d75b139fd5ffac6005b84b4fd | [
"Apache-2.0"
] | null | null | null | basenji/seqnn.py | JasperSnoek/basenji | 24215ad91920a37d75b139fd5ffac6005b84b4fd | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Calico LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
import gc
import sys
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import create_global_step
from basenji.dna_io import hot1_rc, hot1_augment
import basenji.ops
class SeqNN:
def __init__(self):
pass
def build(self, job):
###################################################
# model parameters and placeholders
###################################################
self.set_params(job)
# batches
self.inputs = tf.placeholder(tf.float32, shape=(self.batch_size, self.batch_length, self.seq_depth), name='inputs')
if self.target_classes == 1:
self.targets = tf.placeholder(tf.float32, shape=(self.batch_size, self.batch_length//self.target_pool, self.num_targets), name='targets')
else:
self.targets = tf.placeholder(tf.int32, shape=(self.batch_size, self.batch_length//self.target_pool, self.num_targets), name='targets')
self.targets_na = tf.placeholder(tf.bool, shape=(self.batch_size, self.batch_length//self.target_pool))
print('Targets pooled by %d to length %d' % (self.target_pool, self.batch_length//self.target_pool))
# dropout rates
self.cnn_dropout_ph = []
for li in range(self.cnn_layers):
self.cnn_dropout_ph.append(tf.placeholder(tf.float32))
if self.batch_renorm:
create_global_step()
RMAX_decay = basenji.ops.adjust_max(6000, 60000, 1, 3, name='RMAXDECAY')
DMAX_decay = basenji.ops.adjust_max(6000, 60000, 0, 5, name='DMAXDECAY')
renorm_clipping = {'rmin':1./RMAX_decay, 'rmax':RMAX_decay, 'dmax':DMAX_decay}
else:
renorm_clipping = {}
# training conditional
self.is_training = tf.placeholder(tf.bool)
###################################################
# convolution layers
###################################################
seq_length = self.batch_length
seq_depth = self.seq_depth
weights_regularizers = 0
self.layer_reprs = []
self.filter_weights = []
if self.save_reprs:
self.layer_reprs.append(self.inputs)
# reshape for convolution
# seqs_repr = tf.reshape(self.inputs, [self.batch_size, 1, seq_length, seq_depth])
seqs_repr = self.inputs
for li in range(self.cnn_layers):
with tf.variable_scope('cnn%d' % li) as vs:
seqs_repr_next = tf.layers.conv1d(seqs_repr, filters=self.cnn_filters[li], kernel_size=[self.cnn_filter_sizes[li]], strides=self.cnn_strides[li], padding='same', dilation_rate=[self.cnn_dilation[li]], use_bias=False, kernel_initializer=tf.contrib.layers.xavier_initializer(), kernel_regularizer=None)
print('Convolution w/ %d %dx%d filters strided %d, dilated %d' % (self.cnn_filters[li], seq_depth, self.cnn_filter_sizes[li], self.cnn_strides[li], self.cnn_dilation[li]))
# regularize
# if self.cnn_l2[li] > 0:
# weights_regularizers += self.cnn_l2[li]*tf.reduce_mean(tf.nn.l2_loss(kernel))
# maintain a pointer to the weights
# self.filter_weights.append(kernel)
# batch normalization
seqs_repr_next = tf.layers.batch_normalization(seqs_repr_next, momentum=0.9, training=self.is_training, renorm=self.batch_renorm, renorm_clipping=renorm_clipping, renorm_momentum=0.9)
print('Batch normalization')
# ReLU
seqs_repr_next = tf.nn.relu(seqs_repr_next)
print('ReLU')
# pooling
if self.cnn_pool[li] > 1:
seqs_repr_next = tf.layers.max_pooling1d(seqs_repr_next, pool_size=self.cnn_pool[li], strides=self.cnn_pool[li], padding='same')
print('Max pool %d' % self.cnn_pool[li])
# dropout
if self.cnn_dropout[li] > 0:
seqs_repr_next = tf.nn.dropout(seqs_repr_next, 1.0-self.cnn_dropout_ph[li])
# seqs_repr = tf.layers.dropout(seqs_repr, rate=self.cnn_dropout[li], training=self.is_training)
print('Dropout w/ probability %.3f' % self.cnn_dropout[li])
# updates size variables
seq_length = seq_length // self.cnn_pool[li]
if self.cnn_dense[li]:
# concat layer repr
seqs_repr = tf.concat(values=[seqs_repr, seqs_repr_next], axis=2)
# update size variables
seq_depth += self.cnn_filters[li]
else:
# update layer repr
seqs_repr = seqs_repr_next
# update size variables
seq_depth = self.cnn_filters[li]
# save representation (not positive about this one)
if self.save_reprs:
self.layer_reprs.append(seqs_repr)
# update batch buffer to reflect pooling
pool_preds = self.batch_length // seq_length
if self.batch_buffer % pool_preds != 0:
print('Please make the batch_buffer %d divisible by the CNN pooling %d' % (self.batch_buffer, pool_preds), file=sys.stderr)
exit(1)
self.batch_buffer_pool = self.batch_buffer // pool_preds
###################################################
# slice out side buffer
###################################################
# predictions
seqs_repr = seqs_repr[:,self.batch_buffer_pool:seq_length-self.batch_buffer_pool,:]
seq_length -= 2*self.batch_buffer_pool
self.preds_length = seq_length
# targets
tstart = self.batch_buffer // self.target_pool
tend = (self.batch_length - self.batch_buffer) // self.target_pool
self.targets_op = tf.identity(self.targets[:,tstart:tend,:], name='targets_op')
###################################################
# final layer
###################################################
with tf.variable_scope('final'):
final_filters = self.num_targets*self.target_classes
seqs_repr = tf.layers.conv1d(seqs_repr, filters=final_filters, kernel_size=[1], padding='same', use_bias=True, kernel_initializer=tf.contrib.layers.xavier_initializer(), kernel_regularizer=None)
print('Convolution w/ %d %dx1 filters to final targets' % (final_filters, seq_depth))
# if self.final_l1 > 0:
# weights_regularizers += self.final_l1*tf.reduce_mean(tf.abs(final_weights))
# expand length back out
if self.target_classes > 1:
seqs_repr = tf.reshape(seqs_repr, (self.batch_size, seq_length, self.num_targets, self.target_classes))
###################################################
# loss and optimization
###################################################
# work-around for specifying my own predictions
self.preds_adhoc = tf.placeholder(tf.float32, shape=seqs_repr.get_shape())
# choose link
if self.link in ['identity','linear']:
self.preds_op = tf.identity(seqs_repr, name='preds')
elif self.link == 'relu':
self.preds_op = tf.relu(seqs_repr, name='preds')
elif self.link == 'exp':
self.preds_op = tf.exp(tf.clip_by_value(seqs_repr,-50,50), name='preds')
elif self.link == 'exp_linear':
self.preds_op = tf.where(seqs_repr > 0, seqs_repr + 1, tf.exp(tf.clip_by_value(seqs_repr,-50,50)), name='preds')
elif self.link == 'softplus':
self.preds_op = tf.nn.softplus(seqs_repr, name='preds')
elif self.link == 'softmax':
# performed in the loss function, but saving probabilities
self.preds_prob = tf.nn.softmax(seqs_repr, name='preds')
else:
print('Unknown link function %s' % self.link, file=sys.stderr)
exit(1)
# clip
if self.target_clip is not None:
self.preds_op = tf.clip_by_value(self.preds_op, 0, self.target_clip)
self.targets_op = tf.clip_by_value(self.targets_op, 0, self.target_clip)
# sqrt
if self.target_sqrt:
self.preds_op = tf.sqrt(self.preds_op)
self.targets_op = tf.sqrt(self.targets_op)
# choose loss
if self.loss == 'gaussian':
self.loss_op = tf.squared_difference(self.preds_op, self.targets_op)
self.loss_adhoc = tf.squared_difference(self.preds_adhoc, self.targets_op)
elif self.loss == 'poisson':
self.loss_op = tf.nn.log_poisson_loss(self.targets_op, tf.log(self.preds_op), compute_full_loss=True)
self.loss_adhoc = tf.nn.log_poisson_loss(self.targets_op, tf.log(self.preds_adhoc), compute_full_loss=True)
elif self.loss == 'negative_binomial':
# define overdispersion alphas
self.alphas = tf.get_variable('alphas', shape=[self.num_targets], initializer=tf.constant_initializer(-5), dtype=tf.float32)
self.alphas = tf.nn.softplus(tf.clip_by_value(self.alphas,-50,50))
tf.summary.histogram('alphas', self.alphas)
for ti in np.linspace(0,self.num_targets-1,10).astype('int'):
tf.summary.scalar('alpha_t%d'%ti, self.alphas[ti])
# compute w/ inverse
k = 1. / self.alphas
# expand k
k_expand = tf.tile(k, [self.batch_size*seq_length])
k_expand = tf.reshape(k_expand, (self.batch_size, seq_length, self.num_targets))
# expand lgamma(k)
lgk_expand = tf.tile(tf.lgamma(k), [self.batch_size*seq_length])
lgk_expand = tf.reshape(lgk_expand, (self.batch_size, seq_length, self.num_targets))
# construct loss
loss1 = self.targets_op * tf.log(self.preds_op / (self.preds_op + k_expand))
loss2 = k_expand * tf.log(k_expand / (self.preds_op + k_expand))
loss3 = tf.lgamma(self.targets_op + k_expand) - lgk_expand
self.loss_op = -(loss1 + loss2 + loss3)
# adhoc
loss1 = self.targets_op * tf.log(self.preds_adhoc / (self.preds_adhoc + k_expand))
loss2 = k_expand * tf.log(k_expand / (self.preds_adhoc + k_expand))
self.loss_adhoc = -(loss1 + loss2 + loss3)
elif self.loss == 'negative_binomial_hilbe':
# define overdispersion alphas
self.alphas = tf.get_variable('alphas', shape=[self.num_targets], initializer=tf.constant_initializer(-5), dtype=tf.float32)
self.alphas = tf.exp(tf.clip_by_value(self.alphas,-50,50))
# expand
alphas_expand = tf.tile(self.alphas, [self.batch_size*seq_length])
alphas_expand = tf.reshape(alphas_expand, (self.batch_size, seq_length, self.num_targets))
# construct loss
loss1 = self.targets_op * tf.log(self.preds_op)
loss2 = (alphas_expand * self.targets_op + 1) / alphas_expand
loss3 = tf.log(alphas_expand * self.preds_op + 1)
self.loss_op = -loss1 + loss2*loss3
# adhoc
loss1 = self.targets_op * tf.log(self.preds_adhoc)
loss3 = tf.log(alphas_expand * self.preds_adhoc + 1)
self.loss_adhoc = -loss1 + loss2*loss3
elif self.loss == 'gamma':
# jchan document
self.loss_op = self.targets_op / self.preds_op + tf.log(self.preds_op)
self.loss_adhoc = self.targets_op / self.preds_adhoc + tf.log(self.preds_adhoc)
elif self.loss == 'cross_entropy':
self.loss_op = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=(self.targets_op-1), logits=self.preds_op)
self.loss_adhoc = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=(self.targets_op-1), logits=self.preds_adhoc)
else:
print('Cannot identify loss function %s' % self.loss)
exit(1)
# set NaN's to zero
# self.loss_op = tf.boolean_mask(self.loss_op, tf.logical_not(self.targets_na[:,tstart:tend]))
self.loss_op = tf.check_numerics(self.loss_op, 'Invalid loss', name='loss_check')
# reduce lossses by batch and position
self.loss_op = tf.reduce_mean(self.loss_op, axis=[0,1], name='target_loss')
self.loss_adhoc = tf.reduce_mean(self.loss_adhoc, axis=[0,1], name='target_loss_adhoc')
tf.summary.histogram('target_loss', self.loss_op)
for ti in np.linspace(0,self.num_targets-1,10).astype('int'):
tf.summary.scalar('loss_t%d'%ti, self.loss_op[ti])
self.target_losses = self.loss_op
self.target_losses_adhoc = self.loss_adhoc
# define target sigmas
'''
self.target_sigmas = tf.get_variable('target_sigmas', shape=[self.num_targets], initializer=tf.constant_initializer(2), dtype=tf.float32)
self.target_sigmas = tf.nn.softplus(tf.clip_by_value(self.target_sigmas,-50,50))
tf.summary.histogram('target_sigmas', self.target_sigmas)
for ti in np.linspace(0,self.num_targets-1,10).astype('int'):
tf.summary.scalar('sigma_t%d'%ti, self.target_sigmas[ti])
# self.target_sigmas = tf.ones(self.num_targets) / 2.
'''
# dot losses target sigmas
# self.loss_op = self.loss_op / (2*self.target_sigmas)
# self.loss_adhoc = self.loss_adhoc / (2*self.target_sigmas)
# fully reduce
self.loss_op = tf.reduce_mean(self.loss_op, name='loss')
self.loss_adhoc = tf.reduce_mean(self.loss_adhoc, name='loss_adhoc')
# add extraneous terms
self.loss_op += weights_regularizers # + tf.reduce_mean(tf.log(self.target_sigmas))
self.loss_adhoc += weights_regularizers # + tf.reduce_mean(tf.log(self.target_sigmas))
# track
tf.summary.scalar('loss', self.loss_op)
# define optimization
if self.optimization == 'adam':
self.opt = tf.train.AdamOptimizer(self.learning_rate, beta1=self.adam_beta1, beta2=self.adam_beta2, epsilon=self.adam_eps)
elif self.optimization == 'rmsprop':
self.opt = tf.train.RMSPropOptimizer(self.learning_rate, decay=self.decay, momentum=self.momentum)
elif self.optimization in ['sgd','momentum']:
self.opt = tf.train.MomentumOptimizer(self.learning_rate, momentum=self.momentum)
else:
print('Cannot recognize optimization algorithm %s' % self.optimization)
exit(1)
# compute gradients
self.gvs = self.opt.compute_gradients(self.loss_op, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
# clip gradients
if self.grad_clip is not None:
gradients, variables = zip(*self.gvs)
gradients, _ = tf.clip_by_global_norm(gradients, self.grad_clip)
self.gvs = zip(gradients, variables)
# apply gradients
self.step_op = self.opt.apply_gradients(self.gvs)
# batch norm helper
# if self.batch_renorm:
self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# summary
self.merged_summary = tf.summary.merge_all()
# initialize steps
self.step = 0
def drop_rate(self, drop_mult=0.5):
''' Drop the optimizer learning rate. '''
self.opt._lr *= drop_mult
def gradients(self, sess, batcher, target_indexes=None, layers=None, return_preds=False):
''' Compute predictions on a test set.
In
sess: TensorFlow session
batcher: Batcher class with sequence(s)
target_indexes: Optional target subset list
layers: Optional layer subset list
Out
grads: [S (sequences) x Li (layer i shape) x T (targets) array] * (L layers)
preds:
'''
# initialize target_indexes
if target_indexes is None:
target_indexes = np.array(range(self.num_targets))
elif type(target_indexes) != np.ndarray:
target_indexes = np.array(target_indexes)
# initialize gradients
# (I need a list for layers because the sizes are different within)
# (I'm using a list for targets because I need to compute them individually)
layer_grads = []
for lii in range(len(layers)):
layer_grads.append([])
for tii in range(len(target_indexes)):
layer_grads[lii].append([])
# initialize layers
if layers is None:
layers = range(1+self.cnn_layers)
elif type(layers) != list:
layers = [layers]
# initialize predictions
preds = None
if return_preds:
# determine non-buffer region
buf_start = self.batch_buffer // self.target_pool
buf_end = (self.batch_length - self.batch_buffer) // self.target_pool
buf_len = buf_end - buf_start
# initialize predictions
preds = np.zeros((batcher.num_seqs, buf_len, len(target_indexes)), dtype='float16')
# sequence index
si = 0
# setup feed dict for dropout
fd = self.set_mode('test')
# get first batch
Xb, _, _, Nb = batcher.next()
while Xb is not None:
# update feed dict
fd[self.inputs] = Xb
# predict
preds_batch = sess.run(self.preds_op, feed_dict=fd)
# compute gradients for each target individually
for tii in range(len(target_indexes)):
ti = target_indexes[tii]
# compute gradients over all positions
grads_op = tf.gradients(self.preds_op[:,:,ti], [self.layer_reprs[li] for li in layers])
grads_batch_raw = sess.run(grads_op, feed_dict=fd)
for lii in range(len(layers)):
# clean up
grads_batch = grads_batch_raw[lii][:Nb].astype('float16')
if grads_batch.shape[1] == 1:
grads_batch = grads_batch.squeeze(axis=1)
# save
layer_grads[lii][tii].append(grads_batch)
if return_preds:
# filter for specific targets
if target_indexes is not None:
preds_batch = preds_batch[:,:,target_indexes]
# accumulate predictions
preds[si:si+Nb,:,:] = preds_batch[:Nb,:,:]
# update sequence index
si += Nb
# next batch
Xb, _, _, Nb = batcher.next()
# reset training batcher
batcher.reset()
# stack into arrays
for lii in range(len(layers)):
for tii in range(len(target_indexes)):
# stack sequences
layer_grads[lii][tii] = np.vstack(layer_grads[lii][tii])
# transpose targets to back
layer_grads[lii] = np.array(layer_grads[lii])
if layer_grads[lii].ndim == 4:
# length dimension
layer_grads[lii] = np.transpose(layer_grads[lii], [1,2,3,0])
else:
# no length dimension
layer_grads[lii] = np.transpose(layer_grads[lii], [1,2,0])
if return_preds:
return layer_grads, preds
else:
return layer_grads
def gradients_pos(self, sess, batcher, position_indexes, target_indexes=None, layers=None, return_preds=False):
''' Compute predictions on a test set.
In
sess: TensorFlow session
batcher: Batcher class with sequence(s)
position_indexes: Optional position subset list
target_indexes: Optional target subset list
layers: Optional layer subset list
Out
grads: [S (sequences) x Li (layer i shape) x T (targets) array] * (L layers)
preds:
'''
# initialize target_indexes
if target_indexes is None:
target_indexes = np.array(range(self.num_targets))
elif type(target_indexes) != np.ndarray:
target_indexes = np.array(target_indexes)
# initialize layers
if layers is None:
layers = range(1+self.cnn_layers)
elif type(layers) != list:
layers = [layers]
# initialize gradients
# (I need a list for layers because the sizes are different within)
# (I'm using a list for positions/targets because I don't know the downstream object size)
layer_grads = []
for lii in range(len(layers)):
layer_grads.append([])
for pii in range(len(position_indexes)):
layer_grads[lii].append([])
for tii in range(len(target_indexes)):
layer_grads[lii][pii].append([])
# initialize layer reprs
layer_reprs = []
for lii in range(len(layers)):
layer_reprs.append([])
# initialize predictions
preds = None
if return_preds:
# determine non-buffer region
buf_start = self.batch_buffer // self.target_pool
buf_end = (self.batch_length - self.batch_buffer) // self.target_pool
buf_len = buf_end - buf_start
# initialize predictions
preds = np.zeros((batcher.num_seqs, buf_len, len(target_indexes)), dtype='float16')
# sequence index
si = 0
# setup feed dict for dropout
fd = self.set_mode('test')
# get first batch
Xb, _, _, Nb = batcher.next()
while Xb is not None:
# update feed dict
fd[self.inputs] = Xb
# predict (allegedly takes zero time beyond the first sequence?)
reprs_batch_raw, preds_batch = sess.run([self.layer_reprs, self.preds_op], feed_dict=fd)
# clean up layer repr
reprs_batch = reprs_batch_raw[layers[lii]][:Nb].astype('float16')
if reprs_batch.shape[1] == 1:
reprs_batch = reprs_batch.squeeze(axis=1)
# save repr
layer_reprs[lii].append(reprs_batch)
# for each target
t0 = time.time()
for tii in range(len(target_indexes)):
ti = target_indexes[tii]
# for each position
for pii in range(len(position_indexes)):
pi = position_indexes[pii]
# adjust for buffer
pi -= self.batch_buffer//self.target_pool
# compute gradients
grads_op = tf.gradients(self.preds_op[:,pi,ti], [self.layer_reprs[li] for li in layers])
grads_batch_raw = sess.run(grads_op, feed_dict=fd)
for lii in range(len(layers)):
# clean up
grads_batch = grads_batch_raw[lii][:Nb].astype('float16')
if grads_batch.shape[1] == 1:
grads_batch = grads_batch.squeeze(axis=1)
# save
layer_grads[lii][pii][tii].append(grads_batch)
if return_preds:
# filter for specific targets
if target_indexes is not None:
preds_batch = preds_batch[:,:,target_indexes]
# accumulate predictions
preds[si:si+Nb,:,:] = preds_batch[:Nb,:,:]
# update sequence index
si += Nb
# next batch
Xb, _, _, Nb = batcher.next()
# reset training batcher
batcher.reset()
gc.collect()
# stack into arrays
for lii in range(len(layers)):
layer_reprs[lii] = np.vstack(layer_reprs[lii])
for pii in range(len(position_indexes)):
for tii in range(len(target_indexes)):
# stack sequences
layer_grads[lii][pii][tii] = np.vstack(layer_grads[lii][pii][tii])
# collapse position into arrays
layer_grads[lii] = np.array(layer_grads[lii])
# transpose positions and targets to back
if layer_grads[lii].ndim == 5:
# length dimension
layer_grads[lii] = np.transpose(layer_grads[lii], [2, 3, 4, 0, 1])
else:
# no length dimension
layer_grads[lii] = np.transpose(layer_grads[lii] [2, 3, 0, 1])
if return_preds:
return layer_grads, layer_reprs, preds
else:
return layer_grads, layer_reprs
def hidden(self, sess, batcher, layers=None):
''' Compute hidden representations for a test set. '''
if layers is None:
layers = list(range(self.cnn_layers))
# initialize layer representation data structure
layer_reprs = []
for li in range(1+np.max(layers)):
layer_reprs.append([])
preds = []
# setup feed dict
fd = self.set_mode('test')
# get first batch
Xb, _, _, Nb = batcher.next()
while Xb is not None:
# update feed dict
fd[self.inputs] = Xb
# compute predictions
layer_reprs_batch, preds_batch = sess.run([self.layer_reprs, self.preds_op], feed_dict=fd)
# accumulate representations
for li in layers:
# squeeze (conv_2d-expanded) second dimension
if layer_reprs_batch[li].shape[1] == 1:
layer_reprs_batch[li] = layer_reprs_batch[li].squeeze(axis=1)
# append
layer_reprs[li].append(layer_reprs_batch[li][:Nb].astype('float16'))
# accumualte predictions
preds.append(preds_batch[:Nb])
# next batch
Xb, _, _, Nb = batcher.next()
# reset batcher
batcher.reset()
# accumulate representations
for li in layers:
layer_reprs[li] = np.vstack(layer_reprs[li])
preds = np.vstack(preds)
return layer_reprs, preds
def _predict_ensemble(self, sess, fd, Xb, ensemble_fwdrc, ensemble_shifts, mc_n, ds_indexes=None, target_indexes=None, return_var=False, return_all=False):
# determine predictions length
preds_length = self.preds_length
if ds_indexes is not None:
preds_length = len(ds_indexes)
# determine num targets
num_targets = self.num_targets
if target_indexes is not None:
num_targets = len(target_indexes)
# initialize batch predictions
preds_batch = np.zeros((Xb.shape[0], preds_length, num_targets), dtype='float32')
if return_var:
preds_batch_var = np.zeros(preds_batch.shape, dtype='float32')
else:
preds_batch_var = None
if return_all:
all_n = mc_n * len(ensemble_fwdrc)
preds_all = np.zeros((Xb.shape[0], preds_length, num_targets, all_n), dtype='float16')
else:
preds_all = None
running_i = 0
for ei in range(len(ensemble_fwdrc)):
# construct sequence
Xb_ensemble = hot1_augment(Xb, ensemble_fwdrc[ei], ensemble_shifts[ei])
# update feed dict
fd[self.inputs] = Xb_ensemble
# for each monte carlo (or non-mc single) iteration
for mi in range(mc_n):
# print('ei=%d, mi=%d, fwdrc=%d, shifts=%d' % (ei, mi, ensemble_fwdrc[ei], ensemble_shifts[ei]), flush=True)
# predict
preds_ei = sess.run(self.preds_op, feed_dict=fd)
# reverse
if ensemble_fwdrc[ei] is False:
preds_ei = preds_ei[:,::-1,:]
# down-sample
if ds_indexes is not None:
preds_ei = preds_ei[:,ds_indexes,:]
if target_indexes is not None:
preds_ei = preds_ei[:,:,target_indexes]
# save previous mean
preds_batch1 = preds_batch
# update mean
preds_batch = running_mean(preds_batch1, preds_ei, running_i+1)
# update variance sum
if return_var:
preds_batch_var = running_varsum(preds_batch_var, preds_ei, preds_batch1, preds_batch)
# save iteration
if return_all:
preds_all[:,:,:,running_i] = preds_ei[:,:,:]
# update running index
running_i += 1
return preds_batch, preds_batch_var, preds_all
def predict(self, sess, batcher, rc=False, shifts=[0], mc_n=0, target_indexes=None, return_var=False, return_all=False, down_sample=1):
''' Compute predictions on a test set.
In
sess: TensorFlow session
batcher: Batcher class with transcript-covering sequences.
rc: Average predictions from the forward and reverse complement sequences.
shifts: Average predictions from sequence shifts left/right.
mc_n: Monte Carlo iterations per rc/shift.
target_indexes: Optional target subset list
return_var: Return variance estimates
down_sample: Int specifying to consider uniformly spaced sampled positions
Out
preds: S (sequences) x L (unbuffered length) x T (targets) array
'''
# uniformly sample indexes
ds_indexes = None
preds_length = self.preds_length
if down_sample != 1:
ds_indexes = np.arange(0, self.preds_length, down_sample)
preds_length = len(ds_indexes)
# initialize prediction arrays
num_targets = self.num_targets
if target_indexes is not None:
num_targets = len(target_indexes)
# determine ensemble iteration parameters
ensemble_fwdrc = []
ensemble_shifts = []
for shift in shifts:
ensemble_fwdrc.append(True)
ensemble_shifts.append(shift)
if rc:
ensemble_fwdrc.append(False)
ensemble_shifts.append(shift)
if mc_n > 0:
# setup feed dict
fd = self.set_mode('test_mc')
else:
# setup feed dict
fd = self.set_mode('test')
# co-opt the variable to represent
# iterations per fwdrc/shift.
mc_n = 1
# total ensemble predictions
all_n = mc_n * len(ensemble_fwdrc)
# initialize prediction data structures
preds = np.zeros((batcher.num_seqs, preds_length, num_targets), dtype='float16')
if return_var:
if all_n == 1:
print('Cannot return prediction variance. Add rc, shifts, or mc.', file=sys.stderr)
exit(1)
preds_var = np.zeros((batcher.num_seqs, preds_length, num_targets), dtype='float16')
if return_all:
preds_all = np.zeros((batcher.num_seqs, preds_length, num_targets, all_n), dtype='float16')
# sequence index
si = 0
# get first batch
Xb, _, _, Nb = batcher.next()
while Xb is not None:
# make ensemble predictions
preds_batch, preds_batch_var, preds_all = self._predict_ensemble(sess, fd, Xb, ensemble_fwdrc, ensemble_shifts, mc_n, ds_indexes, target_indexes, return_var, return_all)
# accumulate predictions
preds[si:si+Nb,:,:] = preds_batch[:Nb,:,:]
if return_var:
preds_var[si:si+Nb,:,:] = preds_batch_var[:Nb,:,:] / (all_n-1)
if return_all:
preds_all[si:si+Nb,:,:,:] = preds_all[:Nb,:,:,:]
# update sequence index
si += Nb
# next batch
Xb, _, _, Nb = batcher.next()
# reset batcher
batcher.reset()
if return_var:
if return_all:
return preds, preds_var, preds_all
else:
return preds, preds_var
else:
return preds
def predict_genes(self, sess, batcher, transcript_map, rc=False, shifts=[0], mc_n=0, target_indexes=None):
''' Compute predictions on a test set.
In
sess: TensorFlow session
batcher: Batcher class with transcript-covering sequences
transcript_map: OrderedDict mapping transcript id's to (sequence index, position) tuples marking TSSs.
rc: Average predictions from the forward and reverse complement sequences.
shifts: Average predictions from sequence shifts left/right.
mc_n: Monte Carlo iterations per rc/shift.
target_indexes: Optional target subset list
Out
transcript_preds: G (gene transcripts) X T (targets) array
'''
# setup feed dict
fd = self.set_mode('test')
# initialize prediction arrays
num_targets = self.num_targets
if target_indexes is not None:
num_targets = len(target_indexes)
# determine ensemble iteration parameters
ensemble_fwdrc = []
ensemble_shifts = []
for shift in shifts:
ensemble_fwdrc.append(True)
ensemble_shifts.append(shift)
if rc:
ensemble_fwdrc.append(False)
ensemble_shifts.append(shift)
if mc_n > 0:
# setup feed dict
fd = self.set_mode('test_mc')
else:
# setup feed dict
fd = self.set_mode('test')
# co-opt the variable to represent
# iterations per fwdrc/shift.
mc_n = 1
# total ensemble predictions
all_n = mc_n * len(ensemble_fwdrc)
# initialize gene target predictions
num_genes = len(transcript_map)
gene_preds = np.zeros((num_genes, num_targets), dtype='float16')
# construct an inverse map
sequence_pos_transcripts = []
txi = 0
for transcript in transcript_map:
si, pos = transcript_map[transcript]
# extend sequence list
while len(sequence_pos_transcripts) <= si:
sequence_pos_transcripts.append({})
# add gene to position set
sequence_pos_transcripts[si].setdefault(pos,set()).add(txi)
txi += 1
# sequence index
si = 0
# get first batch
Xb, _, _, Nb = batcher.next()
while Xb is not None:
# make ensemble predictions
preds_batch, _, _ = self._predict_ensemble(sess, fd, Xb, ensemble_fwdrc, ensemble_shifts, mc_n, target_indexes=target_indexes)
# for each sequence in the batch
for pi in range(Nb):
# for each position with a gene
for tpos in sequence_pos_transcripts[si+pi]:
# for each gene at that position
for txi in sequence_pos_transcripts[si+pi][tpos]:
# adjust for the buffer
ppos = tpos - self.batch_buffer//self.target_pool
# add prediction
gene_preds[txi,:] += preds_batch[pi,ppos,:]
# update sequence index
si += Nb
# next batch
Xb, _, _, Nb = batcher.next()
# reset batcher
batcher.reset()
return gene_preds
def set_mode(self, mode):
''' Construct a feed dictionary to specify the model's mode. '''
fd = {}
if mode in ['train', 'training']:
fd[self.is_training] = True
for li in range(self.cnn_layers):
fd[self.cnn_dropout_ph[li]] = self.cnn_dropout[li]
elif mode in ['test', 'testing', 'evaluate']:
fd[self.is_training] = False
for li in range(self.cnn_layers):
fd[self.cnn_dropout_ph[li]] = 0
elif mode in ['test_mc', 'testing_mc', 'evaluate_mc', 'mc_test', 'mc_testing', 'mc_evaluate']:
fd[self.is_training] = False
for li in range(self.cnn_layers):
fd[self.cnn_dropout_ph[li]] = self.cnn_dropout[li]
else:
print('Cannot recognize mode %s' % mode)
exit(1)
return fd
def set_params(self, job):
''' Set model parameters. '''
###################################################
# data attributes
###################################################
self.seq_depth = job.get('seq_depth', 4)
self.num_targets = job['num_targets']
self.target_classes = job.get('target_classes', 1)
self.target_pool = job.get('target_pool', 1)
###################################################
# batching
###################################################
self.batch_size = job.get('batch_size', 64)
self.batch_length = job.get('batch_length', 1024)
self.batch_buffer = job.get('batch_buffer', 64)
###################################################
# training
###################################################
self.learning_rate = job.get('learning_rate', 0.001)
self.adam_beta1 = job.get('adam_beta1', 0.9)
self.adam_beta2 = job.get('adam_beta2', 0.999)
self.adam_eps = job.get('adam_eps', 1e-8)
self.momentum = job.get('momentum', 0)
self.decay = job.get('decay', 0.9)
self.optimization = job.get('optimization', 'adam').lower()
self.grad_clip = job.get('grad_clip', None)
###################################################
# CNN params
###################################################
self.cnn_filters = np.atleast_1d(job.get('cnn_filters', []))
self.cnn_filter_sizes = np.atleast_1d(job.get('cnn_filter_sizes', []))
self.cnn_layers = len(self.cnn_filters)
self.cnn_pool = layer_extend(job.get('cnn_pool', []), 1, self.cnn_layers)
self.cnn_strides = layer_extend(job.get('cnn_strides', []), 1, self.cnn_layers)
self.cnn_dense = layer_extend(job.get('cnn_dense', []), False, self.cnn_layers)
self.cnn_dilation = layer_extend(job.get('cnn_dilation', []), 1, self.cnn_layers)
###################################################
# regularization
###################################################
self.cnn_dropout = layer_extend(job.get('cnn_dropout', []), 0, self.cnn_layers)
self.cnn_l2 = layer_extend(job.get('cnn_l2', []), 0, self.cnn_layers)
self.final_l1 = job.get('final_l1', 0)
self.batch_renorm = bool(job.get('batch_renorm', False))
self.batch_renorm = bool(job.get('renorm', self.batch_renorm))
###################################################
# loss
###################################################
self.link = job.get('link', 'exp_linear')
self.loss = job.get('loss', 'poisson')
self.target_clip = job.get('target_clip', None)
self.target_sqrt = bool(job.get('target_sqrt', False))
###################################################
# other
###################################################
self.save_reprs = job.get('save_reprs', False)
def test(self, sess, batcher, rc=False, shifts=[0], mc_n=0):
''' Compute model accuracy on a test set.
Args:
sess: TensorFlow session
batcher: Batcher object to provide data
rc: Average predictions from the forward and reverse complement sequences.
shifts: Average predictions from sequence shifts left/right.
mc_n: Monte Carlo iterations per rc/shift.
Returns:
acc: Accuracy object
'''
# determine ensemble iteration parameters
ensemble_fwdrc = []
ensemble_shifts = []
for shift in shifts:
ensemble_fwdrc.append(True)
ensemble_shifts.append(shift)
if rc:
ensemble_fwdrc.append(False)
ensemble_shifts.append(shift)
if mc_n > 0:
# setup feed dict
fd = self.set_mode('test_mc')
else:
# setup feed dict
fd = self.set_mode('test')
# co-opt the variable to represent
# iterations per fwdrc/shift.
mc_n = 1
# initialize prediction and target arrays
preds = np.zeros((batcher.num_seqs, self.preds_length, self.num_targets), dtype='float16')
targets = np.zeros((batcher.num_seqs, self.preds_length, self.num_targets), dtype='float16')
targets_na = np.zeros((batcher.num_seqs, self.preds_length), dtype='bool')
batch_losses = []
batch_target_losses = []
# sequence index
si = 0
# get first batch
Xb, Yb, NAb, Nb = batcher.next()
while Xb is not None:
# make ensemble predictions
preds_batch, preds_batch_var, preds_all = self._predict_ensemble(sess, fd, Xb, ensemble_fwdrc, ensemble_shifts, mc_n)
# add target info
fd[self.targets] = Yb
fd[self.targets_na] = NAb
# recompute loss w/ ensembled prediction
fd[self.preds_adhoc] = preds_batch
targets_batch, loss_batch, target_losses_batch = sess.run([self.targets_op, self.loss_adhoc, self.target_losses_adhoc], feed_dict=fd)
# accumulate predictions and targets
if preds_batch.ndim == 3:
preds[si:si+Nb,:,:] = preds_batch[:Nb,:,:]
targets[si:si+Nb,:,:] = targets_batch[:Nb,:,:]
else:
for qi in range(preds_batch.shape[3]):
# TEMP, ideally this will be in the HDF5 and set previously
self.quantile_means = np.geomspace(0.1, 256, 16)
# softmax
preds_batch_norm = np.expand_dims(np.sum(np.exp(preds_batch[:Nb,:,:,:]),axis=3),axis=3)
pred_probs_batch = np.exp(preds_batch[:Nb,:,:,:]) / preds_batch_norm
# expectation over quantile medians
preds[si:si+Nb,:,:] = np.dot(pred_probs_batch, self.quantile_means)
# compare to quantile median
targets[si:si+Nb,:,:] = self.quantile_means[targets_batch[:Nb,:,:]-1]
# accumulate loss
batch_losses.append(loss_batch)
batch_target_losses.append(target_losses_batch)
# update sequence index
si += Nb
# next batch
Xb, Yb, NAb, Nb = batcher.next()
# reset batcher
batcher.reset()
# mean across batches
batch_losses = np.mean(batch_losses)
batch_target_losses = np.array(batch_target_losses).mean(axis=0)
# instantiate accuracy object
acc = basenji.accuracy.Accuracy(targets, preds, targets_na, batch_losses, batch_target_losses)
return acc
def train_epoch(self, sess, batcher, fwdrc=True, shift=0, sum_writer=None):
''' Execute one training epoch '''
# initialize training loss
train_loss = []
# setup feed dict
fd = self.set_mode('train')
# get first batch
Xb, Yb, NAb, Nb = batcher.next(fwdrc, shift)
while Xb is not None and Nb == self.batch_size:
# update feed dict
fd[self.inputs] = Xb
fd[self.targets] = Yb
fd[self.targets_na] = NAb
run_returns = sess.run([self.merged_summary, self.loss_op, self.step_op]+self.update_ops, feed_dict=fd)
summary, loss_batch = run_returns[:2]
# pull gradients
# gvs_batch = sess.run([g for (g,v) in self.gvs if g is not None], feed_dict=fd)
# add summary
if sum_writer is not None:
sum_writer.add_summary(summary, self.step)
# accumulate loss
# avail_sum = np.logical_not(NAb[:Nb,:]).sum()
# train_loss.append(loss_batch / avail_sum)
train_loss.append(loss_batch)
# next batch
Xb, Yb, NAb, Nb = batcher.next(fwdrc, shift)
self.step += 1
# reset training batcher
batcher.reset()
return np.mean(train_loss)
def layer_extend(var, default, layers):
''' Process job input to extend for the
proper number of layers. '''
# if it's a number
if type(var) != list:
# change the default to that number
default = var
# make it a list
var = [var]
# extend for each layer
while len(var) < layers:
var.append(default)
return var
def running_mean(u_k1, x_k, k):
return u_k1 + (x_k - u_k1) / k
def running_varsum(v_k1, x_k, m_k1, m_k):
''' Computing the running variance numerator.
Ref: https://www.johndcook.com/blog/standard_deviation/
'''
return v_k1 + (x_k - m_k1)*(x_k - m_k)
| 37.645425 | 316 | 0.568731 |
ace441b38a7c465a984880ff2a7c5226c37d4522 | 2,069 | py | Python | salt/_modules/tests/test_caasp_filters.py | mjura/salt | e6b9806e061306075ceb860aa6f3363f7a2cd7bc | [
"Apache-2.0"
] | 69 | 2017-03-01T14:09:44.000Z | 2019-10-06T14:06:55.000Z | salt/_modules/tests/test_caasp_filters.py | mjura/salt | e6b9806e061306075ceb860aa6f3363f7a2cd7bc | [
"Apache-2.0"
] | 443 | 2017-03-01T14:39:19.000Z | 2019-08-30T08:34:51.000Z | salt/_modules/tests/test_caasp_filters.py | mjura/salt | e6b9806e061306075ceb860aa6f3363f7a2cd7bc | [
"Apache-2.0"
] | 46 | 2017-03-01T14:11:31.000Z | 2019-08-06T12:46:22.000Z | from __future__ import absolute_import
import unittest
import caasp_filters
class TestIsIP(unittest.TestCase):
def test_is_ipv4(self):
# Valid IPv4 addresses.
self.assertTrue(caasp_filters.is_ipv4("127.0.0.1"))
self.assertTrue(caasp_filters.is_ipv4("192.168.23.1"))
self.assertTrue(caasp_filters.is_ipv4("192.168.23.255"))
self.assertTrue(caasp_filters.is_ipv4("255.255.255.255"))
self.assertTrue(caasp_filters.is_ipv4("0.0.0.0"))
# Invalid IPv4 addresses.
self.assertFalse(caasp_filters.is_ipv4("30.168.1.255.1"))
self.assertFalse(caasp_filters.is_ipv4("127.1"))
self.assertFalse(caasp_filters.is_ipv4("-1.0.2.3"))
self.assertFalse(caasp_filters.is_ipv4("3...3"))
self.assertFalse(caasp_filters.is_ipv4("whatever"))
# see bsc#1123291
self.assertFalse(caasp_filters.is_ipv4("master85.test.net"))
def test_is_ipv6(self):
self.assertTrue(
caasp_filters.is_ipv6("1111:2222:3333:4444:5555:6666:7777:8888")
)
self.assertTrue(
caasp_filters.is_ipv6("1111:2222:3333:4444:5555:6666:7777::")
)
self.assertTrue(caasp_filters.is_ipv6("::"))
self.assertTrue(caasp_filters.is_ipv6("::8888"))
self.assertFalse(
caasp_filters.is_ipv6("11112222:3333:4444:5555:6666:7777:8888")
)
self.assertFalse(caasp_filters.is_ipv6("1111:"))
self.assertFalse(caasp_filters.is_ipv6("::."))
class TestCaaspFilters(unittest.TestCase):
'''
Some basic tests for caasp_pillar.get()
'''
def test_basename(self):
self.assertEqual("hello", caasp_filters.basename("../hello"))
self.assertEqual("world", caasp_filters.basename("../hello/world"))
self.assertEqual("", caasp_filters.basename("./"))
self.assertEqual("", caasp_filters.basename("../"))
self.assertEqual("", caasp_filters.basename("/"))
self.assertEqual(".", caasp_filters.basename("."))
if __name__ == '__main__':
unittest.main()
| 34.483333 | 76 | 0.653456 |
ace4421c2e1a3496a7ae3cfe5f4b3ed9e0a5ae2b | 4,315 | py | Python | ml-project/bert_string_GPU/app_shubham_test_windows.py | mohit-bags/BERT-Notebooks | 71971a57d2e1eda701a93fa3bfcca75c2ec95ca8 | [
"MIT"
] | null | null | null | ml-project/bert_string_GPU/app_shubham_test_windows.py | mohit-bags/BERT-Notebooks | 71971a57d2e1eda701a93fa3bfcca75c2ec95ca8 | [
"MIT"
] | null | null | null | ml-project/bert_string_GPU/app_shubham_test_windows.py | mohit-bags/BERT-Notebooks | 71971a57d2e1eda701a93fa3bfcca75c2ec95ca8 | [
"MIT"
] | null | null | null | '''
File: app_shubham_test.py
Project: src
File Created: Saturday, 26th June 2021 03:38:47 am
Author: Shubham Sunwalka (shubham.kumar@slintel.com>)
-----
Last Modified: Saturday, 26th June 2021 03:38:47 am
Modified By: Shubham Sunwalka (shubham.kumar@slintel.com>)
-----
Copyright 2021 Shubham
'''
from simpletransformers.ner import NERModel, NERArgs
import json
import pandas as pd
import streamlit as st
st.title("Named Entity Recognition")
st.write("""
# Explore Us!
Which model is the best?
""")
model_name = st.sidebar.selectbox("Select Model",("bert","roberta"))
st.write(model_name)
sentence = st.text_input("Sentence")
st.write(sentence)
@st.cache(suppress_st_warning=True)
def load_model():
args = NERArgs()
args.use_multiprocessed_decoding = False
model1 = NERModel('bert', 'bert-base-uncased', args=args, use_cuda=False)
return model1
def predict(sentence):
if sentence :
# model1 = NERModel('bert', 'NERMODEL1',
# labels=["B-sector","I-sector","B-funda","O","operator","threshold","Join","B-attr","I-funda","TPQty","TPUnit","Sortby",
# "B-eco","I-eco","B-index","Capitalization","I-","funda","B-security",'I-security','Number','Sector','TPMonth','TPYr','TPRef'],
# args={"save_eval_checkpoints": False,
# "save_steps": -1,
# "output_dir": "NERMODEL",
# 'overwrite_output_dir': True,
# "save_model_every_epoch": False,
# 'reprocess_input_data': True,
# "train_batch_size": 10,'num_train_epochs': 15,"max_seq_length": 64}, use_cuda=False)
model1 = load_model()
predictions, raw_outputs = model1.predict([sentence])
result = json.dumps(predictions[0])
return result
if sentence :
result= predict(sentence)
#result=pd.DataFrame(result)
st.write(result)
# """Streamlit v. 0.52 ships with a first version of a **file uploader** widget. You can find the
# **documentation**
# [here](https://streamlit.io/docs/api.html?highlight=file%20upload#streamlit.file_uploader).
# For reference I've implemented an example of file upload here. It's available in the gallery at
# [awesome-streamlit.org](https://awesome-streamlit.org).
# """
# from enum import Enum
# from io import BytesIO, StringIO
# from typing import Union
# import pandas as pd
# import streamlit as st
# STYLE = """
# <style>
# img {
# max-width: 100%;
# }
# </style>
# """
# FILE_TYPES = ["csv", "py", "png", "jpg"]
# class FileType(Enum):
# """Used to distinguish between file types"""
# IMAGE = "Image"
# CSV = "csv"
# PYTHON = "Python"
# def get_file_type(file: Union[BytesIO, StringIO]) -> FileType:
# """The file uploader widget does not provide information on the type of file uploaded so we have
# to guess using rules or ML. See
# [Issue 896](https://github.com/streamlit/streamlit/issues/896)
# I've implemented rules for now :-)
# Arguments:
# file {Union[BytesIO, StringIO]} -- The file uploaded
# Returns:
# FileType -- A best guess of the file type
# """
# if isinstance(file, BytesIO):
# return FileType.IMAGE
# content = file.getvalue()
# if (
# content.startswith('"""')
# or "import" in content
# or "from " in content
# or "def " in content
# or "class " in content
# or "print(" in content
# ):
# return FileType.PYTHON
# return FileType.CSV
# def main():
# """Run this function to display the Streamlit app"""
# st.write("""
# # Simple Iris Flower Prediction App
# This app predicts the **Iris flower** type!
# """)
# #st.info(__doc__)
# st.markdown(STYLE, unsafe_allow_html=True)
# file = st.file_uploader("Upload file", type=FILE_TYPES)
# show_file = st.empty()
# if not file:
# show_file.info("Please upload a file of type: " + ", ".join(FILE_TYPES))
# return
# file_type = get_file_type(file)
# if file_type == FileType.IMAGE:
# show_file.image(file)
# elif file_type == FileType.PYTHON:
# st.code(file.getvalue())
# else:
# data = pd.read_csv(file)
# st.dataframe(data.head(10))
# file.close()
# # main() | 25.532544 | 154 | 0.621553 |
ace4427c131648ce1814a9cb96a9a437c3888d0f | 670 | py | Python | packaging_utils/common/config.py | sebix/packaging-utils | 8e2b21220c18bbd396ffb84c80844cba0c954282 | [
"0BSD"
] | 4 | 2018-07-27T21:13:38.000Z | 2022-03-31T14:31:59.000Z | packaging_utils/common/config.py | sebix/packaging-utils | 8e2b21220c18bbd396ffb84c80844cba0c954282 | [
"0BSD"
] | 10 | 2019-07-06T15:46:46.000Z | 2021-08-08T14:04:06.000Z | packaging_utils/common/config.py | sebix/packaging-utils | 8e2b21220c18bbd396ffb84c80844cba0c954282 | [
"0BSD"
] | 1 | 2019-09-04T05:06:36.000Z | 2019-09-04T05:06:36.000Z | """
Common methods to deal with configs
"""
import configparser
import os.path
from typing import Optional
CONFIG_FILE = os.path.expanduser("~/.config/packaging_utils.ini")
def read_config(section: str) -> Optional[str]:
"""
Returns content of configuration file or None if it does not exist.
"""
if not os.path.exists(CONFIG_FILE):
return
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
if section:
return config[section]
else:
return config
def get_librariesio_api_key() -> Optional[str]:
"""
Returns the libraries.io API key
"""
return read_config('libraries.io')['api_key']
| 21.612903 | 71 | 0.676119 |
ace445675c41092588f8d631f1be46d87df14bb8 | 7,149 | py | Python | matchability/datasets/vkitti.py | utiasSTARS/matchable-image-transforms | 2c723872ef82e51dfa32abd3bbcec8410cb7dd2d | [
"MIT"
] | 11 | 2020-07-31T00:39:26.000Z | 2022-02-10T05:46:52.000Z | matchability/datasets/vkitti.py | utiasSTARS/matchable-image-transforms | 2c723872ef82e51dfa32abd3bbcec8410cb7dd2d | [
"MIT"
] | 1 | 2021-02-08T06:14:07.000Z | 2021-02-22T02:33:30.000Z | matchability/datasets/vkitti.py | utiasSTARS/matchable-image-transforms | 2c723872ef82e51dfa32abd3bbcec8410cb7dd2d | [
"MIT"
] | 5 | 2019-12-31T11:03:09.000Z | 2022-01-30T02:16:02.000Z | import torch.utils.data
from torchvision import transforms
import os.path
import glob
import numpy as np
from collections import namedtuple
from PIL import Image
import viso2
from .. import transforms as custom_transforms
CameraIntrinsics = namedtuple('CameraIntrinsics', 'fu, fv, cu, cv')
intrinsics_full = CameraIntrinsics(725.0, 725.0, 620.5, 187.0)
# 1242x375 --> 636x192
intrinsics_636x192 = CameraIntrinsics(371.2, 371.2, 317.5, 95.5)
# 1242x375 --> 256x192
intrinsics_centrecrop_256x192 = CameraIntrinsics(371.2, 371.2, 127.5, 95.5)
class Dataset:
"""Load and parse data from Virtual KITTI dataset."""
def __init__(self, base_path, sequence, condition, **kwargs):
self.base_path = base_path
self.sequence = sequence
self.condition = condition
self.frames = kwargs.get('frames', None)
self.rgb_dir = kwargs.get('rgb_dir', 'vkitti_1.3.1_rgb')
self.depth_dir = kwargs.get('depth_dir', 'vkitti_1.3.1_depthgt')
self.gt_dir = kwargs.get('gt_dir', 'vkitti_1.3.1_extrinsicsgt')
self._load_timestamps_and_poses()
self.num_frames = len(self.timestamps)
self.rgb_files = sorted(glob.glob(
os.path.join(self.base_path, self.rgb_dir,
self.sequence, self.condition, '*.png')))
self.depth_files = sorted(glob.glob(
os.path.join(self.base_path, self.depth_dir,
self.sequence, self.condition, '*.png')))
if self.frames is not None:
self.rgb_files = [self.rgb_files[i] for i in self.frames]
self.depth_files = [self.depth_files[i] for i in self.frames]
def __len__(self):
return self.num_frames
def get_rgb(self, idx, size=None):
"""Load RGB image from file."""
return self._load_image(self.rgb_files[idx], size=size,
mode='RGB', dtype=np.uint8)
def get_gray(self, idx, size=None):
"""Load grayscale image from file."""
return self._load_image(self.rgb_files[idx], size=size,
mode='L', dtype=np.uint8)
def get_depth(self, idx, size=None):
"""Load depth image from file."""
return self._load_image(self.depth_files[idx], size=size,
mode='F', dtype=np.float, factor=100.)
def _load_image(self, impath, size=None, mode='RGB',
dtype=np.float, factor=1):
"""Load image from file."""
im = Image.open(impath).convert(mode)
if size:
im = im.resize(size, resample=Image.BILINEAR)
return (np.array(im) / factor).astype(dtype)
def _load_timestamps_and_poses(self):
"""Load ground truth poses (T_w_cam) and timestamps from file."""
pose_file = os.path.join(self.base_path, self.gt_dir,
'{}_{}.txt'.format(
self.sequence, self.condition))
self.timestamps = []
self.poses = []
# Read and parse the poses
with open(pose_file, 'r') as f:
for line in f.readlines():
line = line.split()
if line[0] == 'frame': # this is the header
continue
self.timestamps.append(float(line[0]))
# from world to camera
Tmatrix = np.array([float(x)
for x in line[1:17]]).reshape((4, 4))
# from camera to world
self.poses.append(np.linalg.inv(Tmatrix))
if self.frames is not None:
self.timestamps = [self.timestamps[i] for i in self.frames]
self.poses = [self.poses[i] for i in self.frames]
class TorchDataset(torch.utils.data.Dataset):
def __init__(self, opts, sequence, cond1, cond2, random_crop, **kwargs):
self.opts = opts
self.random_crop = random_crop
self.dataset1 = Dataset(self.opts.data_dir, sequence, cond1, **kwargs)
self.dataset2 = Dataset(self.opts.data_dir, sequence, cond2, **kwargs)
self.vo_params = viso2.Mono_parameters() # Use ransac
self.vo_params.ransac_iters = 400
self.vo = viso2.VisualOdometryMono(self.vo_params)
def __len__(self):
return len(self.dataset1)
def __getitem__(self, idx1):
interval = np.random.randint(
low=-self.opts.max_interval, high=self.opts.max_interval+1)
idx2 = idx1 + interval
if idx2 >= len(self.dataset2):
idx2 = len(self.dataset2) - 1
elif idx2 < 0:
idx2 = 0
# Get images
rgb1 = Image.fromarray(self.dataset1.get_rgb(idx1))
rgb2 = Image.fromarray(self.dataset2.get_rgb(idx2))
resize_scale = min(self.opts.image_load_size) / min(rgb1.size)
resize_offset = 0.5 * (max(rgb1.size) * resize_scale -
max(self.opts.image_load_size))
resize = transforms.Compose([
transforms.Resize(min(self.opts.image_load_size)),
transforms.CenterCrop(self.opts.image_load_size),
custom_transforms.StatefulRandomCrop(
self.opts.image_final_size) if self.random_crop else transforms.Resize(self.opts.image_final_size)
])
make_grayscale = transforms.Grayscale()
make_normalized_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(self.opts.image_mean, self.opts.image_std)
])
make_normalized_gray_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
(self.opts.image_mean[0],), (self.opts.image_std[0],))
])
# Clamp to at the minimum to avoid computing log(0) = -inf
make_log_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda tensor: tensor.clamp(1e-3, 1.)),
transforms.Lambda(lambda tensor: tensor.log())
])
rgb1 = resize(rgb1)
rgb2 = resize(rgb2)
gray1 = make_grayscale(rgb1)
gray2 = make_grayscale(rgb2)
if self.opts.compute_matches:
matches11 = self._get_match_count(gray1, gray1)
matches12 = self._get_match_count(gray1, gray2)
matches22 = self._get_match_count(gray2, gray2)
logrgb1 = make_log_tensor(rgb1)
logrgb2 = make_log_tensor(rgb2)
rgb1 = make_normalized_tensor(rgb1)
rgb2 = make_normalized_tensor(rgb2)
gray1 = make_normalized_gray_tensor(gray1)
gray2 = make_normalized_gray_tensor(gray2)
data = {'rgb1': rgb1, 'rgb2': rgb2,
'gray1': gray1, 'gray2': gray2,
'logrgb1': logrgb1, 'logrgb2': logrgb2}
if self.opts.compute_matches:
data.update({'matches11': matches11,
'matches12': matches12,
'matches22': matches22})
return data
def _get_match_count(self, im1, im2):
self.vo.process_frame(np.array(im1), np.array(im2))
return np.float32(self.vo.getNumberOfInliers())
| 37.626316 | 114 | 0.598825 |
ace44592a537f0ab6ecbaf7bf823e88f4b196412 | 562 | py | Python | EM/EM_demo.py | RushabhP29/Expectation-maximization-algorithm | b2fe34fc36156b2180e7dd09ece77863694ee252 | [
"MIT"
] | 1 | 2020-09-29T21:55:47.000Z | 2020-09-29T21:55:47.000Z | EM/EM_demo.py | RushabhP29/Expectation-maximization-algorithm | b2fe34fc36156b2180e7dd09ece77863694ee252 | [
"MIT"
] | null | null | null | EM/EM_demo.py | RushabhP29/Expectation-maximization-algorithm | b2fe34fc36156b2180e7dd09ece77863694ee252 | [
"MIT"
] | null | null | null |
from pyspark import SparkConf, SparkContext
from EMN import EMN
from Kmeans import Kmeans
def main():
sc = SparkContext(master="local", appName="EM")
try:
csv = sc.textFile("kmeans_data.csv") #csv =sc.textFile(sys.argv[1]) if input via cmd
except IOError:
print('No such file')
exit(1)
K=2
maxIteration = 2
myEM = EMN()
myEM.EMClustering(csv, K, maxIteration)
outfile = "EMresults.txt"
myEM.assigningLabels(csv, outfile)
sc.stop()
if __name__ == "__main__":
main()
| 22.48 | 93 | 0.6121 |
ace4467db8a6ae590bb4dd5a69603d8b21cfb8be | 6,028 | py | Python | pipeline/bulk-extract-sequence-library-features.py | WEHI-Proteomics/tfde | 10f6d9e89cb14a12655ce2378089abce28de9db6 | [
"MIT"
] | null | null | null | pipeline/bulk-extract-sequence-library-features.py | WEHI-Proteomics/tfde | 10f6d9e89cb14a12655ce2378089abce28de9db6 | [
"MIT"
] | null | null | null | pipeline/bulk-extract-sequence-library-features.py | WEHI-Proteomics/tfde | 10f6d9e89cb14a12655ce2378089abce28de9db6 | [
"MIT"
] | null | null | null | import glob
import os
import shutil
import time
import argparse
import sys
import pandas as pd
import numpy as np
import sqlite3
import json
from multiprocessing import Pool
def run_process(process):
print("Executing: {}".format(process))
os.system(process)
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
# nohup python -u ./open-path/pda/bulk-cuboid-extract.py -en dwm-test > bulk-cuboid-extract.log 2>&1 &
parser = argparse.ArgumentParser(description='Orchestrate the feature extraction of sequence library features from all runs.')
parser.add_argument('-eb','--experiment_base_dir', type=str, default='./experiments', help='Path to the experiments directory.', required=False)
parser.add_argument('-en','--experiment_name', type=str, help='Name of the experiment.', required=True)
parser.add_argument('-rn','--run_names', type=str, help='Comma-separated names of runs to process.', required=True)
parser.add_argument('-ssm','--small_set_mode', action='store_true', help='A small subset of the data for testing purposes.', required=False)
parser.add_argument('-ssms','--small_set_mode_size', type=int, default='100', help='The number of identifications to sample for small set mode.', required=False)
parser.add_argument('-mpwrt','--max_peak_width_rt', type=int, default=10, help='Maximum peak width tolerance for the extraction from the estimated coordinate in RT.', required=False)
parser.add_argument('-mpwccs','--max_peak_width_ccs', type=int, default=20, help='Maximum peak width tolerance for the extraction from the estimated coordinate in CCS.', required=False)
parser.add_argument('-ini','--ini_file', type=str, default='./tfde/pipeline/pasef-process-short-gradient.ini', help='Path to the config file.', required=False)
parser.add_argument('-d','--denoised', action='store_true', help='Use the denoised version of the raw database.')
args = parser.parse_args()
# print the arguments for the log
info = []
for arg in vars(args):
info.append((arg, getattr(args, arg)))
print(info)
start_run = time.time()
# check the experiment directory exists
EXPERIMENT_DIR = "{}/{}".format(args.experiment_base_dir, args.experiment_name)
if not os.path.exists(EXPERIMENT_DIR):
print("The experiment directory is required but doesn't exist: {}".format(EXPERIMENT_DIR))
sys.exit(1)
# check the log directory exists
LOG_DIR = "{}/logs".format(EXPERIMENT_DIR)
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
# set up the target decoy classifier directory
TARGET_DECOY_MODEL_DIR = "{}/target-decoy-models".format(EXPERIMENT_DIR)
if os.path.exists(TARGET_DECOY_MODEL_DIR):
shutil.rmtree(TARGET_DECOY_MODEL_DIR)
os.makedirs(TARGET_DECOY_MODEL_DIR)
print("The target-decoy classifier directory was deleted and re-created: {}".format(TARGET_DECOY_MODEL_DIR))
# the experiment metrics file
METRICS_DB_NAME = "{}/experiment-metrics-for-library-sequences.sqlite".format(TARGET_DECOY_MODEL_DIR)
if os.path.isfile(METRICS_DB_NAME):
os.remove(METRICS_DB_NAME)
if args.small_set_mode:
small_set_flags = "-ssm -ssms {}".format(args.small_set_mode_size)
else:
small_set_flags = ""
if args.denoised:
denoised_flag = "-d"
else:
denoised_flag = ""
# set up the processing pool
pool = Pool(processes=4)
run_names_l = args.run_names.split(',')
print("{} runs to process: {}".format(len(run_names_l), run_names_l))
extract_cmd_l = []
for run_name in run_names_l:
print("processing {}".format(run_name))
LOG_FILE_NAME = "{}/extract-library-sequence-features-for-run-{}.log".format(LOG_DIR, run_name)
current_directory = os.path.abspath(os.path.dirname(__file__))
cmd = "python -u {}/extract-library-sequence-features-for-run.py -eb {} -en {} -rn {} -ini {} -mpwrt {} -mpwccs {} {} {} > {} 2>&1".format(current_directory, args.experiment_base_dir, args.experiment_name, run_name, args.ini_file, args.max_peak_width_rt, args.max_peak_width_ccs, small_set_flags, denoised_flag, LOG_FILE_NAME)
extract_cmd_l.append(cmd)
pool.map(run_process, extract_cmd_l)
# load the run-based metrics into a single experiment-based dataframe
run_sequence_files = glob.glob('{}/library-sequences-in-run-*.pkl'.format(TARGET_DECOY_MODEL_DIR))
print("found {} sequence files to consolidate into an experiment set and stored in {}.".format(len(run_sequence_files), METRICS_DB_NAME))
# load the run-lavel metrics into a database
db_conn = sqlite3.connect(METRICS_DB_NAME)
for file in run_sequence_files:
df = pd.read_pickle(file)
# convert the lists and dictionaries to strings
df.target_coords = df.apply(lambda row: json.dumps(row.target_coords, cls=NpEncoder), axis=1)
df.decoy_coords = df.apply(lambda row: json.dumps(row.decoy_coords, cls=NpEncoder), axis=1)
df.target_metrics = df.apply(lambda row: json.dumps(row.target_metrics, cls=NpEncoder), axis=1)
df.decoy_metrics = df.apply(lambda row: json.dumps(row.decoy_metrics, cls=NpEncoder), axis=1)
df.attributes = df.apply(lambda row: json.dumps(row.attributes, cls=NpEncoder), axis=1)
# count the sequence peak instances
peak_counts_l = []
for group_name,group_df in df.groupby(['sequence','charge','run_name'], as_index=False):
peak_counts_l.append(tuple(group_name) + (len(group_df),))
peak_counts_df = pd.DataFrame(peak_counts_l, columns=['sequence','charge','run_name','peak_count'])
df = pd.merge(df, peak_counts_df, how='left', left_on=['sequence','charge','run_name'], right_on=['sequence','charge','run_name'])
# store the metrics in the database
df.to_sql(name='extracted_metrics', con=db_conn, if_exists='append', index=False)
db_conn.close()
stop_run = time.time()
print("total running time ({}): {} seconds".format(parser.prog, round(stop_run-start_run,1)))
| 49.409836 | 330 | 0.739881 |
ace446f023df3f58d7e3925dee679805aab4178a | 2,166 | py | Python | ColourTool.py | MarcAlx/ColorTool | 233bb1f9f809b97841b144e80f8acb8ee92bcbd3 | [
"Apache-2.0"
] | null | null | null | ColourTool.py | MarcAlx/ColorTool | 233bb1f9f809b97841b144e80f8acb8ee92bcbd3 | [
"Apache-2.0"
] | null | null | null | ColourTool.py | MarcAlx/ColorTool | 233bb1f9f809b97841b144e80f8acb8ee92bcbd3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Created by Marc-Alexandre Blanchard
from tkinter import *
class Application(object):
""" classe application """
def triplet(self,rgb):
return format((rgb[0]<<16)|(rgb[1]<<8)|rgb[2], '06x')
def updateUI(self):
color = "#"+self.triplet((self.varR.get(),self.varG.get(),self.varB.get()))
self.cadre.create_rectangle(0, 0, 200, 200, fill=color)
color=color.upper()
self.textH.config(text=color)
def __init__(self):
self._tk = Tk()
self._tk.title("ColourTool")
self._tk.resizable(width=False, height=False)
self.conteneur1= Frame(self._tk)
self.cadre = Canvas(self._tk, width=200, height=200)
color = "#"+self.triplet((255, 255, 255))
color=color.upper()
self.cadre.create_rectangle(0, 0, 200, 200, fill=color)
self.cadre.grid(row=0, column=2,rowspan=7)
self.textH= Label(self._tk, text = color)
self.textH.grid(row=4, column=1,columnspan=1)
self.textR= Label(self._tk, text = "Red")
self.textR.grid(row=0, column=0)
self.textG= Label(self._tk, text = "Green")
self.textG.grid(row=1, column=0)
self.textB= Label(self._tk, text = "Blue")
self.textB.grid(row=2, column=0)
self.varR = IntVar()
self.varG = IntVar()
self.varB = IntVar()
self.red = Scale(self._tk,length=300,resolution=1,tickinterval=128, from_=0, to=255,orient=HORIZONTAL,variable=self.varR,command = lambda x=self.varR : self.updateUI())
self.red.grid(row=0, column=1)
self.green = Scale(self._tk,length=300,resolution=1,tickinterval=128, from_=0, to=255,orient=HORIZONTAL,variable=self.varG,command = lambda x=self.varG : self.updateUI())
self.green.grid(row=1, column=1)
self.blue = Scale(self._tk,length=300,resolution=1,tickinterval=128, from_=0, to=255,orient=HORIZONTAL,variable=self.varB,command = lambda x=self.varB : self.updateUI())
self.blue.grid(row=2, column=1)
def mainloop(self):
self._tk.mainloop()
if __name__ == '__main__':
Application().mainloop()
| 38.678571 | 178 | 0.622807 |
ace44703fd9f09d78c863dd38e8e1d41c29feb48 | 6,128 | py | Python | datasets/datafeeder.py | zldzmfoq12/Tacotron | 323c00c559327be14dc393f1eeefc6eb88e9f05b | [
"MIT"
] | 2 | 2020-01-12T06:11:21.000Z | 2020-01-22T07:44:29.000Z | datasets/datafeeder.py | zldzmfoq12/Tacotron | 323c00c559327be14dc393f1eeefc6eb88e9f05b | [
"MIT"
] | null | null | null | datasets/datafeeder.py | zldzmfoq12/Tacotron | 323c00c559327be14dc393f1eeefc6eb88e9f05b | [
"MIT"
] | null | null | null | import numpy as np
import os
import random
import tensorflow as tf
import threading
import time
import traceback
from text import text_to_sequence
from util.infolog import log
_batches_per_group = 32
# _p_cmudict = 0.5
_pad = 0
class DataFeeder(threading.Thread):
'''Feeds batches of data into a queue on a background thread.'''
def __init__(self, coordinator, metadata_filename, hparams):
super(DataFeeder, self).__init__()
self._coord = coordinator
self._hparams = hparams
self._cleaner_names = [x.strip() for x in hparams.cleaners.split(',')]
self._offset = 0
# Load metadata:
self._datadir = os.path.dirname(metadata_filename)
with open(metadata_filename, encoding='utf-8') as f:
self._metadata = [line.strip().split('|') for line in f]
hours = sum((int(x[2]) for x in self._metadata)) * hparams.frame_shift_ms / (3600 * 1000)
log('Loaded metadata for %d examples (%.2f hours)' % (len(self._metadata), hours))
# Create placeholders for inputs and targets. Don't specify batch size because we want to
# be able to feed different sized batches at eval time.
self._placeholders = [
tf.placeholder(tf.int32, [None, None], 'inputs'),
tf.placeholder(tf.int32, [None], 'input_lengths'),
tf.placeholder(tf.float32, [None, None, hparams.num_mels], 'mel_targets'),
tf.placeholder(tf.float32, [None, None, hparams.num_freq], 'linear_targets')
]
# Create queue for buffering data:
queue = tf.FIFOQueue(8, [tf.int32, tf.int32, tf.float32, tf.float32], name='input_queue')
self._enqueue_op = queue.enqueue(self._placeholders)
self.inputs, self.input_lengths, self.mel_targets, self.linear_targets = queue.dequeue()
self.inputs.set_shape(self._placeholders[0].shape)
self.input_lengths.set_shape(self._placeholders[1].shape)
self.mel_targets.set_shape(self._placeholders[2].shape)
self.linear_targets.set_shape(self._placeholders[3].shape)
self._cmudict = None
# # Load CMUDict: If enabled, this will randomly substitute some words in the training data with
# # their ARPABet equivalents, which will allow you to also pass ARPABet to the model for
# # synthesis (useful for proper nouns, etc.)
# if hparams.use_cmudict:
# cmudict_path = os.path.join(self._datadir, 'cmudict-0.7b')
# if not os.path.isfile(cmudict_path):
# raise Exception('If use_cmudict=True, you must download ' +
# 'http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b to %s' % cmudict_path)
# self._cmudict = cmudict.CMUDict(cmudict_path, keep_ambiguous=False)
# log('Loaded CMUDict with %d unambiguous entries' % len(self._cmudict))
# else:
# self._cmudict = None
def start_in_session(self, session):
self._session = session
self.start()
def run(self):
try:
while not self._coord.should_stop():
self._enqueue_next_group()
except Exception as e:
traceback.print_exc()
self._coord.request_stop(e)
def _enqueue_next_group(self):
start = time.time()
# Read a group of examples:
n = self._hparams.batch_size
r = self._hparams.outputs_per_step
examples = [self._get_next_example() for i in range(n * _batches_per_group)]
# Bucket examples based on similar output sequence length for efficiency:
examples.sort(key=lambda x: x[-1])
batches = [examples[i:i + n] for i in range(0, len(examples), n)]
random.shuffle(batches)
log('Generated %d batches of size %d in %.03f sec' % (len(batches), n, time.time() - start))
for batch in batches:
feed_dict = dict(zip(self._placeholders, _prepare_batch(batch, r)))
self._session.run(self._enqueue_op, feed_dict=feed_dict)
def _get_next_example(self):
'''Loads a single example (input, mel_target, linear_target, cost) from disk'''
if self._offset >= len(self._metadata):
self._offset = 0
random.shuffle(self._metadata)
meta = self._metadata[self._offset]
self._offset += 1
text = meta[3]
# if self._cmudict and random.random() < _p_cmudict:
# text = ' '.join([self._maybe_get_arpabet(word) for word in text.split(' ')])
input_data = np.asarray(text_to_sequence(text, self._cleaner_names), dtype=np.int32)
linear_target = np.load(os.path.join(self._datadir, meta[0]))
mel_target = np.load(os.path.join(self._datadir, meta[1]))
return (input_data, mel_target, linear_target, len(linear_target))
def _maybe_get_arpabet(self, word):
arpabet = self._cmudict.lookup(word)
return '{%s}' % arpabet[0] if arpabet is not None and random.random() < 0.5 else word
def _prepare_batch(batch, outputs_per_step):
random.shuffle(batch)
inputs = _prepare_inputs([x[0] for x in batch])
input_lengths = np.asarray([len(x[0]) for x in batch], dtype=np.int32)
mel_targets = _prepare_targets([x[1] for x in batch], outputs_per_step)
linear_targets = _prepare_targets([x[2] for x in batch], outputs_per_step)
return (inputs, input_lengths, mel_targets, linear_targets)
def _prepare_inputs(inputs):
max_len = max((len(x) for x in inputs))
return np.stack([_pad_input(x, max_len) for x in inputs])
def _prepare_targets(targets, alignment):
max_len = max((len(t) for t in targets)) + 1
return np.stack([_pad_target(t, _round_up(max_len, alignment)) for t in targets])
def _pad_input(x, length):
return np.pad(x, (0, length - x.shape[0]), mode='constant', constant_values=_pad)
def _pad_target(t, length):
return np.pad(t, [(0, length - t.shape[0]), (0, 0)], mode='constant', constant_values=_pad)
def _round_up(x, multiple):
remainder = x % multiple
return x if remainder == 0 else x + multiple - remainder
| 41.687075 | 124 | 0.651273 |
ace44732a196145d23ee83d99f106702053086a5 | 19 | py | Python | btd6_memory_info/generated/UnityEngine/UIElements/UIR/Utility/utility.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | btd6_memory_info/generated/UnityEngine/UIElements/UIR/Utility/utility.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | btd6_memory_info/generated/UnityEngine/UIElements/UIR/Utility/utility.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | class Utility: pass | 19 | 19 | 0.842105 |
ace447346f2e9c0ac0b4a011a1a7ce08995f561f | 771 | py | Python | HackerRank Solutions/Algorithms/Implementation/Cats and a Mouse.py | UtkarshPathrabe/Competitive-Coding | ba322fbb1b88682d56a9b80bdd92a853f1caa84e | [
"MIT"
] | 13 | 2021-09-02T07:30:02.000Z | 2022-03-22T19:32:03.000Z | HackerRank Solutions/Python/Algorithms/Cats and a Mouse.py | UtkarshPathrabe/Competitive-Coding | ba322fbb1b88682d56a9b80bdd92a853f1caa84e | [
"MIT"
] | null | null | null | HackerRank Solutions/Python/Algorithms/Cats and a Mouse.py | UtkarshPathrabe/Competitive-Coding | ba322fbb1b88682d56a9b80bdd92a853f1caa84e | [
"MIT"
] | 3 | 2021-08-24T16:06:22.000Z | 2021-09-17T15:39:53.000Z | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the catAndMouse function below.
def catAndMouse(x, y, z):
distanceBetweenCatAAndMouse = abs(x - z)
distanceBetweenCatBAndMouse = abs(y - z)
if distanceBetweenCatAAndMouse < distanceBetweenCatBAndMouse:
return "Cat A"
elif distanceBetweenCatAAndMouse > distanceBetweenCatBAndMouse:
return "Cat B"
else:
return "Mouse C"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
xyz = input().split()
x = int(xyz[0])
y = int(xyz[1])
z = int(xyz[2])
result = catAndMouse(x, y, z)
fptr.write(result + '\n')
fptr.close()
| 19.769231 | 67 | 0.618677 |
ace44758d9024e65206ee88b0dba419fc8536c92 | 124 | py | Python | deepatari/learner/__init__.py | cowhi/deepatari | 3b676ca4fc66266d766cd2366226f3e10213bc78 | [
"MIT"
] | 10 | 2016-06-10T01:13:44.000Z | 2017-10-15T10:47:09.000Z | deepatari/learner/__init__.py | cowhi/deepatari | 3b676ca4fc66266d766cd2366226f3e10213bc78 | [
"MIT"
] | null | null | null | deepatari/learner/__init__.py | cowhi/deepatari | 3b676ca4fc66266d766cd2366226f3e10213bc78 | [
"MIT"
] | 2 | 2016-06-10T14:38:08.000Z | 2020-08-29T03:11:06.000Z | from .learner import Learner
from .dqnkeras import DQNKeras
from .dqnlasagne import DQNLasagne
from .dqnneon import DQNNeon
| 24.8 | 34 | 0.83871 |
ace4487bcc109a4e514d46b6c557c068ee15aafc | 12,071 | py | Python | Server/src/virtualenv/Lib/encodings/cp874.py | ppyordanov/HCI_4_Future_Cities | 4dc7dc59acccf30357bde66524c2d64c29908de8 | [
"MIT"
] | null | null | null | Server/src/virtualenv/Lib/encodings/cp874.py | ppyordanov/HCI_4_Future_Cities | 4dc7dc59acccf30357bde66524c2d64c29908de8 | [
"MIT"
] | null | null | null | Server/src/virtualenv/Lib/encodings/cp874.py | ppyordanov/HCI_4_Future_Cities | 4dc7dc59acccf30357bde66524c2d64c29908de8 | [
"MIT"
] | null | null | null | """ Python Character Mapping Codec cp874 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP874.TXT' with gencodec.py.
""" # "
import codecs
# ## Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp874',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\ufffe' # 0x82 -> UNDEFINED
u'\ufffe' # 0x83 -> UNDEFINED
u'\ufffe' # 0x84 -> UNDEFINED
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\ufffe' # 0x86 -> UNDEFINED
u'\ufffe' # 0x87 -> UNDEFINED
u'\ufffe' # 0x88 -> UNDEFINED
u'\ufffe' # 0x89 -> UNDEFINED
u'\ufffe' # 0x8A -> UNDEFINED
u'\ufffe' # 0x8B -> UNDEFINED
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\ufffe' # 0x99 -> UNDEFINED
u'\ufffe' # 0x9A -> UNDEFINED
u'\ufffe' # 0x9B -> UNDEFINED
u'\ufffe' # 0x9C -> UNDEFINED
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\ufffe' # 0x9F -> UNDEFINED
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
u'\u0e24' # 0xC4 -> THAI CHARACTER RU
u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
u'\u0e26' # 0xC6 -> THAI CHARACTER LU
u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
u'\ufffe' # 0xDB -> UNDEFINED
u'\ufffe' # 0xDC -> UNDEFINED
u'\ufffe' # 0xDD -> UNDEFINED
u'\ufffe' # 0xDE -> UNDEFINED
u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
u'\u0e51' # 0xF1 -> THAI DIGIT ONE
u'\u0e52' # 0xF2 -> THAI DIGIT TWO
u'\u0e53' # 0xF3 -> THAI DIGIT THREE
u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
u'\u0e56' # 0xF6 -> THAI DIGIT SIX
u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
u'\u0e59' # 0xF9 -> THAI DIGIT NINE
u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
u'\ufffe' # 0xFC -> UNDEFINED
u'\ufffe' # 0xFD -> UNDEFINED
u'\ufffe' # 0xFE -> UNDEFINED
u'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table = codecs.charmap_build(decoding_table)
| 38.689103 | 117 | 0.572778 |
ace449d7dfcc84eb3a2ef330e256c0d6f4021eb6 | 1,853 | py | Python | modules/signatures/windows/dns_freehosting_domain.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 24 | 2021-06-21T07:35:37.000Z | 2022-03-22T03:33:59.000Z | modules/signatures/windows/dns_freehosting_domain.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 3 | 2021-07-01T08:09:05.000Z | 2022-01-28T03:38:36.000Z | modules/signatures/windows/dns_freehosting_domain.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 6 | 2021-06-22T05:32:57.000Z | 2022-02-11T02:05:45.000Z | # Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# This signature was contributed by RedSocks - http://redsocks.nl
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class Dns_Freehosting_Domain(Signature):
name = "dns_freehosting_domain"
description = "Resolves Free Hosting Domain, Possibly Malicious"
severity = 2
categories = ["freehosting"]
authors = ["RedSocks"]
minimum = "2.0"
domains_re = [
".*\.yzi\.me",
".*\.hol\.es",
".*\.zxq\.net",
".*\.ta4a\.info",
".*\.url\.ph",
".*\.vacau\.com",
".*\.netai\.net",
".*\.webege\.com",
".*\.6te\.net",
".*\.meximas\.com",
".*\.ws\.gy",
".*\.comuv\.com",
".*\.comuf\.com",
".*\.comze\.com",
".*\.comoj\.com",
".*\.favcc1\.com",
".*\.y55\.eu",
".*\.esy\.es",
".*\.pixub\.com",
".*\.1x\.biz",
".*\.altervista\.org",
".*\.website\.org",
".*\.net84\.net",
".*\.besaba\.com",
".*\.5gbfree\.com",
".*\.site40\.net",
".*\.site50\.net",
".*\.site88\.net",
".*\.comxa\.com",
".*\.site11\.com",
".*\.host22\.com",
".*\.000a\.de",
".*\.freeiz\.com",
".*\.net23\.net",
".*\.net46\.net",
".*\.cwsurf\.de",
".*\.uni\.me",
".*\.look\.in",
".*\.comule\.com",
".*\.comeze\.com",
".*\.x10host\.com",
]
def on_complete(self):
for indicator in self.domains_re:
match = self.check_domain(pattern=indicator, regex=True)
if match:
self.mark_ioc("domain", match)
return self.has_marks()
| 27.656716 | 68 | 0.464112 |
ace44a707be3f46e3d786676c2ec20acf5d558a2 | 13,946 | py | Python | PythonVirtEnv/Lib/site-packages/plotly/validators/_densitymapbox.py | zuhorski/EPL_Project | 2d2417652879cfbe33c44c003ad77b7222590849 | [
"MIT"
] | 2 | 2021-07-18T11:39:56.000Z | 2021-11-06T17:13:05.000Z | venv/Lib/site-packages/plotly/validators/_densitymapbox.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | null | null | null | venv/Lib/site-packages/plotly/validators/_densitymapbox.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | null | null | null | import _plotly_utils.basevalidators
class DensitymapboxValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="densitymapbox", parent_name="", **kwargs):
super(DensitymapboxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Densitymapbox"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `colorscale`. In case
`colorscale` is unspecified or `autocolorscale`
is true, the default palette will be chosen
according to whether numbers in the `color`
array are all positive, all negative or mixed.
below
Determines if the densitymapbox trace will be
inserted before the layer with the specified
ID. By default, densitymapbox traces are placed
below the first layer of type symbol If set to
'', the layer will be inserted above every
existing layer.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.densitymapbox.Colo
rBar` instance or dict with compatible
properties
colorscale
Sets the colorscale. The colorscale must be an
array containing arrays mapping a normalized
value to an rgb, rgba, hex, hsl, hsv, or named
color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required.
For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`zmin` and
`zmax`. Alternatively, `colorscale` may be a
palette name string of the following list: Grey
s,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,
Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth
,Electric,Viridis,Cividis.
customdata
Assigns extra data each datum. This may be
useful when listening to hover, click and
selection events. Note that, "scatter" traces
also appends customdata items in the markers
DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud
for customdata .
hoverinfo
Determines which trace information appear on
hover. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud
for hoverinfo .
hoverlabel
:class:`plotly.graph_objects.densitymapbox.Hove
rlabel` instance or dict with compatible
properties
hovertemplate
Template string used for rendering the
information that appear on hover box. Note that
this will override `hoverinfo`. Variables are
inserted using %{variable}, for example "y:
%{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for
several points, "xother" will be added to those
with different x positions from the first
point. An underscore before or after
"(x|y)other" will add a space on that side,
only when this field is shown. Numbers are
formatted using d3-format's syntax
%{variable:d3-format}, for example "Price:
%{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}".
https://github.com/d3/d3-time-
format#locale_format for details on the date
formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event
data described at this link
https://plotly.com/javascript/plotlyjs-
events/#event-data. Additionally, every
attributes that can be specified per-point (the
ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud
for hovertemplate .
hovertext
Sets hover text elements associated with each
(lon,lat) pair If a single string, the same
string appears over all the data points. If an
array of string, the items are mapped in order
to the this trace's (lon,lat) coordinates. To
be seen, trace `hoverinfo` must contain a
"text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud
for hovertext .
ids
Assigns id labels to each datum. These ids for
object constancy of data points during
animation. Should be an array of strings, not
numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud
for ids .
lat
Sets the latitude coordinates (in degrees
North).
latsrc
Sets the source reference on Chart Studio Cloud
for lat .
legendgroup
Sets the legend group for this trace. Traces
part of the same legend group hide/show at the
same time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.densitymapbox.Lege
ndgrouptitle` instance or dict with compatible
properties
legendrank
Sets the legend rank for this trace. Items and
groups with smaller ranks are presented on
top/left side while with `*reversed*
`legend.traceorder` they are on bottom/right
side. The default legendrank is 1000, so that
you can use ranks less than 1000 to place
certain items before all unranked items, and
ranks greater than 1000 to go after all
unranked items.
lon
Sets the longitude coordinates (in degrees
East).
lonsrc
Sets the source reference on Chart Studio Cloud
for lon .
meta
Assigns extra meta information associated with
this trace that can be used in various text
attributes. Attributes such as trace `name`,
graph, axis and colorbar `title.text`,
annotation `text` `rangeselector`,
`updatemenues` and `sliders` `label` text all
support `meta`. To access the trace `meta`
values in an attribute in the same trace,
simply use `%{meta[i]}` where `i` is the index
or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or
key of the `meta` and `n` is the trace index.
metasrc
Sets the source reference on Chart Studio Cloud
for meta .
name
Sets the trace name. The trace name appear as
the legend item and on hover.
opacity
Sets the opacity of the trace.
radius
Sets the radius of influence of one `lon` /
`lat` point in pixels. Increasing the value
makes the densitymapbox trace smoother, but
less detailed.
radiussrc
Sets the source reference on Chart Studio Cloud
for radius .
reversescale
Reverses the color mapping if true. If true,
`zmin` will correspond to the last color in the
array and `zmax` will correspond to the first
color.
showlegend
Determines whether or not an item corresponding
to this trace is shown in the legend.
showscale
Determines whether or not a colorbar is
displayed for this trace.
stream
:class:`plotly.graph_objects.densitymapbox.Stre
am` instance or dict with compatible properties
subplot
Sets a reference between this trace's data
coordinates and a mapbox subplot. If "mapbox"
(the default value), the data refer to
`layout.mapbox`. If "mapbox2", the data refer
to `layout.mapbox2`, and so on.
text
Sets text elements associated with each
(lon,lat) pair If a single string, the same
string appears over all the data points. If an
array of string, the items are mapped in order
to the this trace's (lon,lat) coordinates. If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be
seen in the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud
for text .
uid
Assign an id to this trace, Use this to provide
object constancy between traces during
animations and transitions.
uirevision
Controls persistence of some user-driven
changes to the trace: `constraintrange` in
`parcoords` traces, as well as some `editable:
true` modifications such as `name` and
`colorbar.title`. Defaults to
`layout.uirevision`. Note that other user-
driven trace attribute changes are controlled
by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and
`colorbar.(x|y)` (accessible with `config:
{editable: true}`) is controlled by
`layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on
trace index if no `uid` is provided. So if your
app can add/remove traces before the end of the
`data` array, such that the same trace has a
different index, you can still preserve user-
driven changes if you give each trace a `uid`
that stays with it as it moves.
visible
Determines whether or not this trace is
visible. If "legendonly", the trace is not
drawn, but can appear as a legend item
(provided that the legend itself is visible).
z
Sets the points' weight. For example, a value
of 10 would be equivalent to having 10 points
of weight 1 in the same spot
zauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `z`) or the bounds set in `zmin` and `zmax`
Defaults to `false` when `zmin` and `zmax` are
set by the user.
zmax
Sets the upper bound of the color domain. Value
should have the same units as in `z` and if
set, `zmin` must be set as well.
zmid
Sets the mid-point of the color domain by
scaling `zmin` and/or `zmax` to be equidistant
to this point. Value should have the same units
as in `z`. Has no effect when `zauto` is
`false`.
zmin
Sets the lower bound of the color domain. Value
should have the same units as in `z` and if
set, `zmax` must be set as well.
zsrc
Sets the source reference on Chart Studio Cloud
for z .
""",
),
**kwargs
)
| 48.592334 | 78 | 0.54288 |
ace44abb14304632547fe7ca0c115fcfd4270029 | 3,547 | py | Python | tests/test_property.py | KIPAC/cfgmdl | ea2903b51594ca1102f812c73ad77228fe51cc00 | [
"BSD-3-Clause"
] | null | null | null | tests/test_property.py | KIPAC/cfgmdl | ea2903b51594ca1102f812c73ad77228fe51cc00 | [
"BSD-3-Clause"
] | null | null | null | tests/test_property.py | KIPAC/cfgmdl | ea2903b51594ca1102f812c73ad77228fe51cc00 | [
"BSD-3-Clause"
] | 1 | 2022-02-22T20:20:46.000Z | 2022-02-22T20:20:46.000Z | #!/usr/bin/env python
"""
Test the model build
"""
import numpy as np
from cfgmdl import Model, Property
from collections import OrderedDict as odict
from cfgmdl import Unit
Unit.update(dict(mm=1e-3))
def check_property(dtype, default, test_val, bad_val=None, cast_val=None):
class TestClass(Model):
v = Property(dtype=dtype, default=default, help="A Property")
v2 = Property(dtype=dtype, help="A Property")
v3 = Property(dtype=dtype, required=True, help="A Property")
v4 = Property(dtype=dtype, default=test_val, help="A Property")
try: bad = TestClass()
except ValueError: pass
else: raise ValueError("Failed to catch ValueError for missing required Property")
test_obj = TestClass(v3=default)
assert test_obj.v == default
assert test_obj.v2 is None
assert test_obj.v4 == test_val
assert test_obj._properties['v'].default_prefix == ""
assert test_obj._properties['v'].default_value('dtype') is None
test_obj.v = test_val
assert test_obj.v == test_val
test_obj.v = None
assert test_obj.v is None
test_obj.v2 = test_val
assert test_obj.v2 == test_val
delattr(test_obj, 'v')
assert test_obj.v == default
test_obj.v = test_val
assert test_obj.v == test_val
del test_obj.v
assert test_obj.v == default
test_obj.v = test_val
assert test_obj.v == test_val
setattr(test_obj, 'v', default)
assert test_obj.v == default
assert test_obj.v2 == test_val
assert getattr(test_obj, 'v') == default
if cast_val is not None:
test_obj.v = cast_val
assert test_obj.v == dtype(cast_val)
test_obj.v = default
assert test_obj.v == default
setattr(test_obj, 'v', cast_val)
assert test_obj.v == dtype(cast_val)
if bad_val is not None:
try: test_obj.v = bad_val
except TypeError: pass
else: raise TypeError("Failed to catch TypeError in CheckProperty")
try: test_obj.v2 = bad_val
except TypeError: pass
else: raise TypeError("Failed to catch TypeError in CheckProperty")
try: test_obj.v3 = bad_val
except TypeError: pass
else: raise TypeError("Failed to catch TypeError in CheckProperty")
try: bad = TestClass(v3=bad_val)
except TypeError: pass
else: raise TypeError("Failed to catch TypeError in Model.set_attributes")
if dtype == dict:
return
try: bad = TestClass(v3=dict(value=bad_val))
except TypeError: pass
else: raise TypeError("Failed to catch TypeError in Model.set_attributes")
def test_property_basics():
try:
class TestClass(Model):
vv = Property(dummy=3)
except AttributeError: pass
else: raise AttributeError("Failed to catch AttributeError")
class TestClass(Model):
vv = Property()
test_obj = TestClass()
help(test_obj._vv)
def test_property_none():
check_property(None, None, None)
def test_property_string():
check_property(str, "aa", "ab")
def test_property_int():
check_property(int, 1, 2, "aa")
def test_property_int2():
check_property(int, 1, 2, float)
def test_property_float():
check_property(float, 1., 2., "aa", 1)
def test_property_list():
check_property(list, [], [3, 4], None)
def test_property_list():
check_property(list, [], [3, 4], None, (3,4))
def test_property_dict():
check_property(dict, {}, {3:4})
| 25.702899 | 86 | 0.64759 |
ace44af4ac2127982c84aabafdb4c739f31b79f8 | 167 | py | Python | first-homework.py | ChrisAvalos/astr-119 | 26f8bf43fd97a081bb5afdc383fb97afd71250c3 | [
"MIT"
] | null | null | null | first-homework.py | ChrisAvalos/astr-119 | 26f8bf43fd97a081bb5afdc383fb97afd71250c3 | [
"MIT"
] | 5 | 2020-10-03T00:50:21.000Z | 2020-10-14T21:43:13.000Z | first-homework.py | ChrisAvalos/astr-119 | 26f8bf43fd97a081bb5afdc383fb97afd71250c3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#this program will write
#My full name and preferred pronouns
print("Hello, my name is Christopher Avalos and I prefer he/him/they pronouns") | 27.833333 | 79 | 0.766467 |
ace44b1b4b1410ea5c51a164f5c2df4a5ca8eeee | 3,759 | py | Python | pyoutline/tests/modules/shell_test.py | splhack/OpenCue | 068d0e304fd3d3c5b3f1da9d2588da20c6fdf330 | [
"Apache-2.0"
] | null | null | null | pyoutline/tests/modules/shell_test.py | splhack/OpenCue | 068d0e304fd3d3c5b3f1da9d2588da20c6fdf330 | [
"Apache-2.0"
] | 1 | 2020-09-09T20:39:24.000Z | 2020-09-09T20:39:24.000Z | pyoutline/tests/modules/shell_test.py | splhack/OpenCue | 068d0e304fd3d3c5b3f1da9d2588da20c6fdf330 | [
"Apache-2.0"
] | null | null | null | # Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import range
import mock
import tempfile
import unittest
from FileSequence import FrameSet
import outline
from outline.loader import Outline
from outline.modules.shell import Shell
from outline.modules.shell import ShellSequence
from outline.modules.shell import ShellScript
from .. import test_utils
class ShellModuleTest(unittest.TestCase):
"""Shell Module Tests"""
def setUp(self):
outline.Outline.current = None
@mock.patch('outline.layer.Layer.system')
def testShell(self, systemMock):
"""Test a simple shell command."""
command = ['/bin/ls']
shell = Shell('bah', command=command)
shell._execute(FrameSet('5-6'))
systemMock.assert_has_calls([
mock.call(command, frame=5),
mock.call(command, frame=6),
])
@mock.patch('outline.layer.Layer.system')
def testShellSequence(self, systemMock):
"""Test a simple sequence of shell commands"""
commandCount = 10
commands = ['/bin/echo %d' % (frame+1) for frame in range(commandCount)]
shellSeq = ShellSequence('bah', commands=commands, cores=10, memory='512m')
shellSeq._execute(FrameSet('5-6'))
self.assertEqual('1-%d' % commandCount, shellSeq.get_frame_range())
systemMock.assert_has_calls([
mock.call('/bin/echo 5'),
mock.call('/bin/echo 6'),
])
@mock.patch('outline.layer.Layer.system')
def testShellScript(self, systemMock):
"""Test a custom shell script layer"""
# The script will be copied into the session directory so we have to create a dummy
# session to use.
layerName = 'arbitrary-layer'
with test_utils.TemporarySessionDirectory(), tempfile.NamedTemporaryFile() as scriptFile:
scriptContents = '# !/bin/sh\necho zoom zoom zoom'
with open(scriptFile.name, 'w') as fp:
fp.write(scriptContents)
outln = Outline()
outln.setup()
expectedSessionPath = outln.get_session().put_file(
scriptFile.name, layer=layerName, rename='script')
shellScript = ShellScript(layerName, script=scriptFile.name)
shellScript.set_outline(outln)
shellScript._setup()
shellScript._execute(FrameSet('5-6'))
with open(expectedSessionPath) as fp:
sessionScriptContents = fp.read()
self.assertEqual(scriptContents, sessionScriptContents)
systemMock.assert_has_calls([mock.call(expectedSessionPath, frame=5)])
@mock.patch('outline.layer.Layer.system')
def testShellToString(self, systemMock):
"""Test a string shell command."""
command = '/bin/ls -l ./'
shell = Shell('bah', command=command)
shell._execute(FrameSet('5-6'))
systemMock.assert_has_calls([
mock.call(command, frame=5),
mock.call(command, frame=6),
])
if __name__ == '__main__':
unittest.main()
| 30.811475 | 97 | 0.659218 |
ace44b1f2d712615965b81bd23e053784526e649 | 305 | py | Python | examples/testPyside2_02.py | hooloong/hooGnoolTools | 8fab1da86994a7731d7dc3c455f3bc79370a2083 | [
"MIT"
] | 1 | 2018-04-15T06:29:21.000Z | 2018-04-15T06:29:21.000Z | examples/testPyside2_02.py | hooloong/hooGnoolTools | 8fab1da86994a7731d7dc3c455f3bc79370a2083 | [
"MIT"
] | null | null | null | examples/testPyside2_02.py | hooloong/hooGnoolTools | 8fab1da86994a7731d7dc3c455f3bc79370a2083 | [
"MIT"
] | null | null | null | # coding:utf-8
import sys
from PySide2.QtWidgets import QApplication, QLabel
if __name__ == "__main__":
app = QApplication([])
label = QLabel("Hello World")
label.setWindowTitle("My First Application")
label.setGeometry(300, 300, 250, 175)
label.show()
sys.exit(app.exec_())
| 17.941176 | 50 | 0.67541 |
ace44bb6b2351aec90cc0ad86c7858d1857291fe | 1,467 | py | Python | MLProject/src/write_utils.py | FlashYoshi/UGentProjects | 5561ce3bb73d5bc5bf31bcda2be7e038514c7072 | [
"MIT"
] | null | null | null | MLProject/src/write_utils.py | FlashYoshi/UGentProjects | 5561ce3bb73d5bc5bf31bcda2be7e038514c7072 | [
"MIT"
] | null | null | null | MLProject/src/write_utils.py | FlashYoshi/UGentProjects | 5561ce3bb73d5bc5bf31bcda2be7e038514c7072 | [
"MIT"
] | 1 | 2019-07-18T11:23:49.000Z | 2019-07-18T11:23:49.000Z | import os
import numpy as np
import pandas as pd
from folder_utils import create_train_dict, list_all_test_data
# Creates the header for the csv
def create_header(keys):
l = []
for key in keys:
l.append(key)
return np.array(l)
def create_csv(body, keys, root='..'):
filename = os.path.join(root, 'result.csv')
header = create_header(keys)
columns = [x + 1 for x in range(len(body))]
df = pd.DataFrame(body, index=columns, columns=header)
df.to_csv(filename, index=True, index_label='Id', header=True, sep=',')
###########################################################################################
# These functions are not being used in our best solution but have been used in the past. #
###########################################################################################
def calculate_prior_probabilities():
d = create_train_dict()
probs = dict()
total = 0
for val in d.values():
for k, v in val.items():
probs[k] = len(v)
total += len(v)
for k in probs.keys():
probs[k] /= total
return probs
def create_probs_matrix():
probs = calculate_prior_probabilities()
line = np.empty(shape=(1, len(probs)))
for i, prob in enumerate(probs.values()):
line[0, i] = prob
arr = np.tile(line, (len(list_all_test_data()), 1))
return arr
def create_prob_csv():
body = create_probs_matrix()
create_csv(body)
| 23.66129 | 91 | 0.557601 |
ace44c897bd961b3948d6f58a8d9b8727b76244c | 4,444 | py | Python | planet/generator/tests/test_foaf.py | TUM-LIS/librecores-web | c5d9d52ef96df3ee7a4c56ca861beaacc8aab807 | [
"MIT"
] | 119 | 2015-01-20T17:24:53.000Z | 2022-03-16T20:02:59.000Z | planet/generator/tests/test_foaf.py | TUM-LIS/librecores-web | c5d9d52ef96df3ee7a4c56ca861beaacc8aab807 | [
"MIT"
] | 405 | 2016-02-21T14:21:00.000Z | 2022-02-18T01:57:44.000Z | planet/generator/tests/test_foaf.py | TUM-LIS/librecores-web | c5d9d52ef96df3ee7a4c56ca861beaacc8aab807 | [
"MIT"
] | 56 | 2015-01-20T21:07:53.000Z | 2022-03-27T10:34:46.000Z | #!/usr/bin/env python
import unittest, os, shutil
from planet.foaf import foaf2config
from ConfigParser import ConfigParser
from planet import config, logger
workdir = 'tests/work/config/cache'
blogroll = 'http://journal.dajobe.org/journal/2003/07/semblogs/bloggers.rdf'
testfeed = "http://dannyayers.com/feed/rdf"
test_foaf_document = '''
<rdf:RDF
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:foaf="http://xmlns.com/foaf/0.1/"
xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
xmlns:rss="http://purl.org/rss/1.0/"
xmlns:dc="http://purl.org/dc/elements/1.1/">
<foaf:Agent rdf:nodeID="id2245354">
<foaf:name>Danny Ayers</foaf:name>
<rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
<foaf:weblog>
<foaf:Document rdf:about="http://dannyayers.com/">
<dc:title>Raw Blog by Danny Ayers</dc:title>
<rdfs:seeAlso>
<rss:channel rdf:about="http://dannyayers.com/feed/rdf">
<foaf:maker rdf:nodeID="id2245354"/>
<foaf:topic rdf:resource="http://www.w3.org/2001/sw/"/>
<foaf:topic rdf:resource="http://www.w3.org/RDF/"/>
</rss:channel>
</rdfs:seeAlso>
</foaf:Document>
</foaf:weblog>
<foaf:interest rdf:resource="http://www.w3.org/2001/sw/"/>
<foaf:interest rdf:resource="http://www.w3.org/RDF/"/>
</foaf:Agent>
</rdf:RDF>
'''.strip()
class FoafTest(unittest.TestCase):
"""
Test the foaf2config function
"""
def setUp(self):
self.config = ConfigParser()
self.config.add_section(blogroll)
def tearDown(self):
if os.path.exists(workdir):
shutil.rmtree(workdir)
os.removedirs(os.path.split(workdir)[0])
#
# Tests
#
def test_foaf_document(self):
foaf2config(test_foaf_document, self.config)
self.assertEqual('Danny Ayers', self.config.get(testfeed, 'name'))
def test_no_foaf_name(self):
test = test_foaf_document.replace('foaf:name','foaf:title')
foaf2config(test, self.config)
self.assertEqual('Raw Blog by Danny Ayers',
self.config.get(testfeed, 'name'))
def test_no_weblog(self):
test = test_foaf_document.replace('rdfs:seeAlso','rdfs:seealso')
foaf2config(test, self.config)
self.assertFalse(self.config.has_section(testfeed))
def test_invalid_xml_before(self):
test = '\n<?xml version="1.0" encoding="UTF-8"?>' + test_foaf_document
foaf2config(test, self.config)
self.assertFalse(self.config.has_section(testfeed))
def test_invalid_xml_after(self):
test = test_foaf_document.strip()[:-1]
foaf2config(test, self.config)
self.assertEqual('Danny Ayers', self.config.get(testfeed, 'name'))
def test_online_accounts(self):
config.load('tests/data/config/foaf.ini')
feeds = config.subscriptions()
feeds.sort()
self.assertEqual(['http://api.flickr.com/services/feeds/' +
'photos_public.gne?id=77366516@N00',
'http://del.icio.us/rss/eliast',
'http://torrez.us/feed/rdf'], feeds)
def test_multiple_subscriptions(self):
config.load('tests/data/config/foaf-multiple.ini')
self.assertEqual(2,len(config.reading_lists()))
feeds = config.subscriptions()
feeds.sort()
self.assertEqual(5,len(feeds))
self.assertEqual(['http://api.flickr.com/services/feeds/' +
'photos_public.gne?id=77366516@N00',
'http://api.flickr.com/services/feeds/' +
'photos_public.gne?id=SOMEID',
'http://del.icio.us/rss/SOMEID',
'http://del.icio.us/rss/eliast',
'http://torrez.us/feed/rdf'], feeds)
def test_recursive(self):
config.load('tests/data/config/foaf-deep.ini')
feeds = config.subscriptions()
feeds.sort()
self.assertEqual(['http://api.flickr.com/services/feeds/photos_public.gne?id=77366516@N00',
'http://del.icio.us/rss/eliast', 'http://del.icio.us/rss/leef',
'http://del.icio.us/rss/rubys', 'http://intertwingly.net/blog/atom.xml',
'http://thefigtrees.net/lee/life/atom.xml',
'http://torrez.us/feed/rdf'], feeds)
# these tests only make sense if libRDF is installed
try:
import RDF
except:
logger.warn("Redland RDF is not available => can't test FOAF reading lists")
for key in FoafTest.__dict__.keys():
if key.startswith('test_'): delattr(FoafTest, key)
if __name__ == '__main__':
unittest.main()
| 34.71875 | 99 | 0.65144 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.