hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9eca13d83542dee571b86909b5bbdd623047cd4a
| 1,091
|
py
|
Python
|
tests/test_timed.py
|
luis-puhl/minas-py
|
7a665da366ab65bbb05b1713292492cf5ab4a859
|
[
"MIT"
] | 4
|
2019-05-01T01:29:32.000Z
|
2019-06-22T08:16:20.000Z
|
tests/test_timed.py
|
luis-puhl/minas-py
|
7a665da366ab65bbb05b1713292492cf5ab4a859
|
[
"MIT"
] | 3
|
2020-03-24T17:04:13.000Z
|
2021-06-08T19:50:48.000Z
|
tests/test_timed.py
|
luis-puhl/minas-py
|
7a665da366ab65bbb05b1713292492cf5ab4a859
|
[
"MIT"
] | null | null | null |
import unittest
import time
import inspect
import typing
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from minas.timed import Timed
class TimedFunctionsTest(unittest.TestCase):
def setUp(self):
self.tm = Timed()
def tearDown(self):
pass
def test_resume_plot(self):
tm = self.tm
timed = tm.timed
self.assertIsInstance(timed, typing.Callable)
funcs = []
for i in range(10):
mu = np.random.random()
sigma = np.random.random()
# @timed
def func():
value = np.random.normal(loc=mu, scale=sigma)
time.sleep(abs(value))
return value
func.__name__ = 'func_' + str(i)
funcs.append(timed(func))
#
for i in range(np.random.randint(10, 100)):
funcs[np.random.randint(0, len(funcs))]()
#
print(tm.timedResume)
fig, ax = tm.mkTimedResumePlot()
plt.show()
if __name__ == '__main__':
unittest.main()
| 23.717391
| 61
| 0.566453
|
dced02996c6cadaec264bd2fc9ed701f1f352e84
| 2,155
|
py
|
Python
|
ProjectCode/DataLoader.py
|
levihedges/email-anomaly-tool
|
22a409e5784a193a8d9bf175a03b8826a57872b4
|
[
"CC0-1.0"
] | null | null | null |
ProjectCode/DataLoader.py
|
levihedges/email-anomaly-tool
|
22a409e5784a193a8d9bf175a03b8826a57872b4
|
[
"CC0-1.0"
] | null | null | null |
ProjectCode/DataLoader.py
|
levihedges/email-anomaly-tool
|
22a409e5784a193a8d9bf175a03b8826a57872b4
|
[
"CC0-1.0"
] | null | null | null |
import elasticsearch
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import pandas as pd
import numpy as np
class DataLoader:
def __init__(self, index, size, node):
self.index = index
self.size = size
self.node = node
def fetch_data(self):
try:
elastic_node = Elasticsearch(self.node)
search_response = elastic_node.search(
index = self.index,
body={},
size = self.size
)
except elasticsearch.exceptions.NotFoundError:
print("The specified index could not be found on the Elasticsearch instance. Please check that the index was entered properly and that the instance is running then re-attempt.")
exit()
except elasticsearch.exceptions.ConnectionError:
print("The specified URI could not be connected to. Please check that the URI was entered properly, that the Elasticsearch instance is running and that you have a suitable network connection then re-attempt.")
exit()
return search_response
def create_dataframe(self, response):
documents = response["hits"]["hits"]
es_fields = {}
for num, doc in enumerate(documents):
pass
source = doc["_source"]
for key, val in source.items():
try:
es_fields[key] = np.append(es_fields[key], val)
except KeyError:
es_fields[key] = np.array([val])
es_dataframe = pd.DataFrame(es_fields)
return es_dataframe
def send_to_elastic(flagged_events, date):
elastic_node = Elasticsearch(self.node)
helpers.bulk(elastic_node, store_events(flagged_events, date))
def store_events(flagged_events, date):
fe_list = flagged_events.iterrows()
for index, document in fe_list:
yield {
"_index": 'flagged_events_' + date
"_type": 'doc'
"_id": f"{document['id'] + index}"
"_source": filterKeys(document)
}
| 35.327869
| 221
| 0.6
|
266d6d3155340817a0e55651d19bb6281d885ff7
| 3,469
|
py
|
Python
|
tbot/web/handlers/api/twitch/connect_discord.py
|
thomaserlang/tbot
|
99cfa204d86ef35cf2cc9482ae5a44abb35b443a
|
[
"MIT"
] | null | null | null |
tbot/web/handlers/api/twitch/connect_discord.py
|
thomaserlang/tbot
|
99cfa204d86ef35cf2cc9482ae5a44abb35b443a
|
[
"MIT"
] | 10
|
2022-02-14T11:40:20.000Z
|
2022-03-09T22:44:03.000Z
|
tbot/web/handlers/api/twitch/connect_discord.py
|
thomaserlang/tbot
|
99cfa204d86ef35cf2cc9482ae5a44abb35b443a
|
[
"MIT"
] | 1
|
2020-09-19T16:38:24.000Z
|
2020-09-19T16:38:24.000Z
|
import logging, json, base64
from tornado import web, httpclient, escape
from urllib import parse
from tbot import config, utils
from ..base import Base_handler, Api_handler, Level
class Handler(Api_handler):
@Level(3)
async def get(self, channel_id):
r = await self.db.fetchone(
'SELECT discord_server_id, discord_server_name FROM twitch_channels WHERE channel_id=%s',
(channel_id,)
)
self.write_object({
'connected': True if r['discord_server_id'] else False,
'name': r['discord_server_name'] if r else None,
})
@Level(3)
async def delete(self, channel_id):
r = await self.db.fetchone(
'UPDATE twitch_channels SET discord_server_id=null WHERE channel_id=%s',
(channel_id,)
)
self.set_status(204)
@Level(3)
async def post(self, channel_id):
r = await self.db.fetchone(
'select name from twitch_channels where channel_id=%s',
(channel_id,)
)
if not r:
raise Exception('Unknown channel {}'.format(channel_id))
self.redirect('https://discordapp.com/api/oauth2/authorize?'+parse.urlencode({
'client_id': config['discord']['client_id'],
'permissions': config['discord']['permissions'],
'redirect_uri': parse.urljoin(config['web']['base_url'], 'connect/discord'),
'scope': 'bot',
'response_type': 'code',
'state': base64.b64encode(utils.json_dumps({
'channel_id': channel_id,
'channel_name': r['name'],
}).encode('utf-8')),
}))
class Receive_handler(Base_handler):
async def get(self):
code = self.get_argument('code', None)
state = utils.json_loads(base64.b64decode(self.get_argument('state')))
await self.check_access(state['channel_id'])
http = httpclient.AsyncHTTPClient()
response = await http.fetch('https://discordapp.com/api/oauth2/token', body=parse.urlencode({
'client_id': config['discord']['client_id'],
'client_secret': config['discord']['client_secret'],
'code': code,
'redirect_uri': parse.urljoin(config['web']['base_url'], 'connect/discord'),
'grant_type': 'authorization_code',
}), method='POST', headers={'Content-Type': 'application/x-www-form-urlencoded'}, raise_error=False)
if response.code != 200:
logging.error(escape.native_str(response.body))
self.write('Unable to verify you at Discord, please try again.')
return
data = json.loads(escape.native_str(response.body))
if 'guild' not in data:
e = 'oAuth2 grant is not enabled for the bot. Enable it here: https://discordapp.com/developers/applications/{}/bots'.format(\
config['discord']['client_id']
)
logging.error(e)
self.write(e)
return
await self.db.execute('UPDATE twitch_channels SET discord_server_id=%s, discord_server_name=%s WHERE channel_id=%s',
(data['guild']['id'], data['guild']['name'], state['channel_id'])
)
if state['channel_name']:
self.redirect('/twitch/{}/discord'.format(state['channel_name']))
else:
self.redirect('/twitch/dashboard')
@Level(3)
async def check_access(self, channel_id):
pass
| 40.811765
| 138
| 0.600749
|
694400dac07b3bcf5518425ba5e076d1603d1ddc
| 1,584
|
py
|
Python
|
setup.py
|
nemesgyadam/easyimages
|
79d27288be3c72d4d987312c108a1b2fb952b3ae
|
[
"MIT"
] | 12
|
2018-08-28T18:52:46.000Z
|
2021-12-20T02:30:18.000Z
|
setup.py
|
nemesgyadam/easyimages
|
79d27288be3c72d4d987312c108a1b2fb952b3ae
|
[
"MIT"
] | 1
|
2020-08-06T16:05:37.000Z
|
2020-08-18T12:52:42.000Z
|
setup.py
|
nemesgyadam/easyimages
|
79d27288be3c72d4d987312c108a1b2fb952b3ae
|
[
"MIT"
] | 1
|
2021-05-10T12:32:02.000Z
|
2021-05-10T12:32:02.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip < 10
from pip.req import parse_requirements
import os
__version__ = '1.91'
with open('README.MD') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
dir_path = os.path.dirname(os.path.realpath(__file__))
req_path = os.path.join(dir_path, 'requirements.txt')
install_reqs = parse_requirements(req_path, session='hack')
try:
reqs = [str(ir.req) for ir in install_reqs]
except:
reqs = [str(ir.requirement) for ir in install_reqs]
setup(
author="Jakub Cieslik",
author_email='kubacieslik@gmail.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Images made easy",
install_requires=reqs,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='easyimages',
name='easyimages',
packages=find_packages(include=['easyimages']),
setup_requires=reqs,
test_suite='tests',
tests_require=reqs + ['torch', 'torchvision'],
url='https://github.com/i008/easyimages',
version=__version__,
zip_safe=False,
)
| 27.789474
| 59
| 0.672348
|
b60c96a04618483025dc998f4f7d0c0db552a971
| 1,992
|
py
|
Python
|
tests/emmet-core/molecules/test_orbitals.py
|
acrutt/emmet
|
e98100c9932f145a3ad3087ddb7aa9b779d9a191
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
tests/emmet-core/molecules/test_orbitals.py
|
acrutt/emmet
|
e98100c9932f145a3ad3087ddb7aa9b779d9a191
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
tests/emmet-core/molecules/test_orbitals.py
|
acrutt/emmet
|
e98100c9932f145a3ad3087ddb7aa9b779d9a191
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
import json
import datetime
import copy
import pytest
from monty.io import zopen
from monty.serialization import loadfn
from emmet.core.qchem.task import TaskDocument
from emmet.core.molecules.orbitals import OrbitalDoc
@pytest.fixture(scope="session")
def closed_shell(test_dir):
task = TaskDocument(**loadfn((test_dir / "closed_shell_nbo_task.json.gz")))
return task
@pytest.fixture(scope="session")
def open_shell(test_dir):
task = TaskDocument(**loadfn((test_dir / "open_shell_nbo_task.json.gz")))
return task
def test_orbital(closed_shell, open_shell):
# Test closed-shell NBO parsing
doc = OrbitalDoc.from_task(closed_shell, "test-123456", deprecated=False)
assert doc.property_name == "natural bonding orbitals"
assert doc.open_shell == False
assert len(doc.nbo_population) == len(closed_shell.output.initial_molecule)
assert doc.nbo_population[0].valence_electrons == pytest.approx(2.75426)
assert len(doc.nbo_lone_pairs) == 3
assert doc.nbo_lone_pairs[0].s_character == pytest.approx(0.02)
assert doc.nbo_lone_pairs[0].atom_index == 0
assert len(doc.nbo_bonds) == 10
assert doc.nbo_bonds[0].atom1_s_character == pytest.approx(29.93)
assert doc.nbo_bonds[0].atom1_index == 0
assert doc.nbo_bonds[0].atom2_index == 3
assert len(doc.nbo_interactions) == 95
assert doc.nbo_interactions[0].donor_index == 8
assert doc.nbo_interactions[0].acceptor_index == 19
assert doc.nbo_interactions[0].energy_difference == pytest.approx(0.95)
assert doc.alpha_population is None
assert doc.beta_population is None
# Test open-shell NBO parsing
doc = OrbitalDoc.from_task(open_shell, "test-123456", deprecated=False)
assert doc.open_shell == True
assert len(doc.nbo_population) == len(open_shell.output.initial_molecule)
assert doc.alpha_population is not None
assert doc.beta_population is not None
assert doc.nbo_lone_pairs is None
assert doc.nbo_bonds is None
| 33.762712
| 79
| 0.746486
|
55e0c2067304fcc6233350d95f98468c43de65ae
| 7,661
|
py
|
Python
|
python_modules/dagster/dagster/core/storage/local_compute_log_manager.py
|
basilvetas/dagster
|
b08f5534a0b0277dab38cb7b6a46d324e94b8940
|
[
"Apache-2.0"
] | 2
|
2021-06-21T17:50:26.000Z
|
2021-06-21T19:14:23.000Z
|
python_modules/dagster/dagster/core/storage/local_compute_log_manager.py
|
basilvetas/dagster
|
b08f5534a0b0277dab38cb7b6a46d324e94b8940
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/storage/local_compute_log_manager.py
|
basilvetas/dagster
|
b08f5534a0b0277dab38cb7b6a46d324e94b8940
|
[
"Apache-2.0"
] | 1
|
2021-08-18T17:21:57.000Z
|
2021-08-18T17:21:57.000Z
|
import hashlib
import os
import sys
from collections import defaultdict
from contextlib import contextmanager
from dagster import StringSource, check
from dagster.core.execution.compute_logs import mirror_stream_to_file
from dagster.core.storage.pipeline_run import PipelineRun
from dagster.serdes import ConfigurableClass, ConfigurableClassData
from dagster.utils import ensure_dir, touch_file
from watchdog.events import PatternMatchingEventHandler
from watchdog.observers.polling import PollingObserver
from .compute_log_manager import (
MAX_BYTES_FILE_READ,
ComputeIOType,
ComputeLogFileData,
ComputeLogManager,
ComputeLogSubscription,
)
WATCHDOG_POLLING_TIMEOUT = 2.5
IO_TYPE_EXTENSION = {ComputeIOType.STDOUT: "out", ComputeIOType.STDERR: "err"}
MAX_FILENAME_LENGTH = 255
class LocalComputeLogManager(ComputeLogManager, ConfigurableClass):
"""Stores copies of stdout & stderr for each compute step locally on disk.
"""
def __init__(self, base_dir, inst_data=None):
self._base_dir = base_dir
self._subscription_manager = LocalComputeLogSubscriptionManager(self)
self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
@contextmanager
def _watch_logs(self, pipeline_run, step_key=None):
check.inst_param(pipeline_run, "pipeline_run", PipelineRun)
check.opt_str_param(step_key, "step_key")
key = self.get_key(pipeline_run, step_key)
outpath = self.get_local_path(pipeline_run.run_id, key, ComputeIOType.STDOUT)
errpath = self.get_local_path(pipeline_run.run_id, key, ComputeIOType.STDERR)
with mirror_stream_to_file(sys.stdout, outpath):
with mirror_stream_to_file(sys.stderr, errpath):
yield
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return {"base_dir": StringSource}
@staticmethod
def from_config_value(inst_data, config_value):
return LocalComputeLogManager(inst_data=inst_data, **config_value)
def _run_directory(self, run_id):
return os.path.join(self._base_dir, run_id, "compute_logs")
def get_local_path(self, run_id, key, io_type):
check.inst_param(io_type, "io_type", ComputeIOType)
return self._get_local_path(run_id, key, IO_TYPE_EXTENSION[io_type])
def complete_artifact_path(self, run_id, key):
return self._get_local_path(run_id, key, "complete")
def _get_local_path(self, run_id, key, extension):
filename = "{}.{}".format(key, extension)
if len(filename) > MAX_FILENAME_LENGTH:
filename = "{}.{}".format(hashlib.md5(key.encode("utf-8")).hexdigest(), extension)
return os.path.join(self._run_directory(run_id), filename)
def read_logs_file(self, run_id, key, io_type, cursor=0, max_bytes=MAX_BYTES_FILE_READ):
path = self.get_local_path(run_id, key, io_type)
if not os.path.exists(path) or not os.path.isfile(path):
return ComputeLogFileData(path=path, data=None, cursor=0, size=0, download_url=None)
# See: https://docs.python.org/2/library/stdtypes.html#file.tell for Windows behavior
with open(path, "rb") as f:
f.seek(cursor, os.SEEK_SET)
data = f.read(max_bytes)
cursor = f.tell()
stats = os.fstat(f.fileno())
# local download path
download_url = self.download_url(run_id, key, io_type)
return ComputeLogFileData(
path=path,
data=data.decode("utf-8"),
cursor=cursor,
size=stats.st_size,
download_url=download_url,
)
def is_watch_completed(self, run_id, key):
return os.path.exists(self.complete_artifact_path(run_id, key))
def on_watch_start(self, pipeline_run, step_key):
pass
def get_key(self, pipeline_run, step_key):
check.inst_param(pipeline_run, "pipeline_run", PipelineRun)
check.opt_str_param(step_key, "step_key")
return step_key or pipeline_run.pipeline_name
def on_watch_finish(self, pipeline_run, step_key=None):
check.inst_param(pipeline_run, "pipeline_run", PipelineRun)
check.opt_str_param(step_key, "step_key")
key = self.get_key(pipeline_run, step_key)
touchpath = self.complete_artifact_path(pipeline_run.run_id, key)
touch_file(touchpath)
def download_url(self, run_id, key, io_type):
check.inst_param(io_type, "io_type", ComputeIOType)
return "/download/{}/{}/{}".format(run_id, key, io_type.value)
def on_subscribe(self, subscription):
self._subscription_manager.add_subscription(subscription)
def dispose(self):
self._subscription_manager.dispose()
class LocalComputeLogSubscriptionManager(object):
def __init__(self, manager):
self._manager = manager
self._subscriptions = defaultdict(list)
self._watchers = {}
self._observer = PollingObserver(WATCHDOG_POLLING_TIMEOUT)
self._observer.start()
def _key(self, run_id, key):
return "{}:{}".format(run_id, key)
def add_subscription(self, subscription):
check.inst_param(subscription, "subscription", ComputeLogSubscription)
key = self._key(subscription.run_id, subscription.key)
self._subscriptions[key].append(subscription)
self.watch(subscription.run_id, subscription.key)
def remove_all_subscriptions(self, run_id, key):
key = self._key(run_id, key)
for subscription in self._subscriptions.pop(key, []):
subscription.complete()
def watch(self, run_id, key):
key = self._key(run_id, key)
if key in self._watchers:
return
update_paths = [
self._manager.get_local_path(run_id, key, ComputeIOType.STDOUT),
self._manager.get_local_path(run_id, key, ComputeIOType.STDERR),
]
complete_paths = [self._manager.complete_artifact_path(run_id, key)]
directory = os.path.dirname(self._manager.get_local_path(run_id, key, ComputeIOType.STDERR))
ensure_dir(directory)
self._watchers[key] = self._observer.schedule(
LocalComputeLogFilesystemEventHandler(self, run_id, key, update_paths, complete_paths),
str(directory),
)
def notify_subscriptions(self, run_id, key):
key = self._key(run_id, key)
for subscription in self._subscriptions[key]:
subscription.fetch()
def unwatch(self, run_id, key, handler):
key = self._key(run_id, key)
if key in self._watchers:
self._observer.remove_handler_for_watch(handler, self._watchers[key])
del self._watchers[key]
def dispose(self):
self._observer.stop()
class LocalComputeLogFilesystemEventHandler(PatternMatchingEventHandler):
def __init__(self, manager, run_id, key, update_paths, complete_paths):
self.manager = manager
self.run_id = run_id
self.key = key
self.update_paths = update_paths
self.complete_paths = complete_paths
patterns = update_paths + complete_paths
super(LocalComputeLogFilesystemEventHandler, self).__init__(patterns=patterns)
def on_created(self, event):
if event.src_path in self.complete_paths:
self.manager.remove_all_subscriptions(self.run_id, self.key)
self.manager.unwatch(self.run_id, self.key, self)
def on_modified(self, event):
if event.src_path in self.update_paths:
self.manager.notify_subscriptions(self.run_id, self.key)
| 37.553922
| 100
| 0.693904
|
a8dfbdcdcd58c724e2f83ef95c568513ae644765
| 14,829
|
py
|
Python
|
warn_transformer/integrate.py
|
biglocalnews/warn-transformer
|
daefc8835ed369e19838410d4c94950c43800d1e
|
[
"Apache-2.0"
] | 3
|
2022-02-18T22:21:00.000Z
|
2022-03-24T21:24:29.000Z
|
warn_transformer/integrate.py
|
biglocalnews/warn-transformer
|
daefc8835ed369e19838410d4c94950c43800d1e
|
[
"Apache-2.0"
] | 20
|
2022-02-20T01:00:07.000Z
|
2022-03-25T18:18:56.000Z
|
warn_transformer/integrate.py
|
biglocalnews/warn-transformer
|
daefc8835ed369e19838410d4c94950c43800d1e
|
[
"Apache-2.0"
] | 2
|
2022-02-25T02:50:00.000Z
|
2022-03-14T16:32:22.000Z
|
import csv
import json
import logging
import typing
from collections import defaultdict
from datetime import datetime, timezone
from itertools import chain
from operator import itemgetter
from pathlib import Path
import jellyfish
import requests
from . import utils
logger = logging.getLogger(__name__)
def run(
new_path: Path = utils.WARN_TRANSFORMER_OUTPUT_DIR
/ "processed"
/ "consolidated.csv",
init_current_data: bool = False,
) -> Path:
"""Integrate new consolidated data with the current database.
Args:
new_path (Path): The path to the latest consolidated file on the local file system
init_current_data (bool): Set to True when you want to create a new integrated dataset from scratch. Default False.
Returns a Path to the newly integrated file.
"""
# Get the most recently published integrated dataset
current_data_list = get_current_data(init_current_data)
# Read in new consolidated.csv file
with open(new_path) as fh:
new_data_reader = csv.DictReader(fh)
new_data_list = list(new_data_reader)
logger.debug(f"{len(new_data_list)} records in new file at {new_path}")
# Regroup each list by state
current_data_by_source = regroup_by_source(current_data_list)
new_data_by_source = regroup_by_source(new_data_list)
# Winnow down the new data to records that have changed
changed_data_by_source = get_changed_data(
new_data_by_source, current_data_by_source
)
# Loop through the changed data to determine which are new and which are amendements
amend_by_source = {}
insert_by_source = {}
for postal_code, change_list in changed_data_by_source.items():
logger.debug(
f"Inspecting {len(change_list)} changed records from {postal_code}"
)
current_row_list = current_data_by_source[postal_code]
amend_list = []
insert_list = []
for new_row in change_list:
# See if we can find a likely parent that was amended
likely_ancestor = get_likely_ancestor(new_row, current_row_list)
# If there is one, we assume this is an amendment
if likely_ancestor:
amend_list.append({"new": new_row, "current": likely_ancestor})
# Otherwise we estimate it's a new record
else:
insert_list.append(new_row)
# Log the result
logger.debug(f"{len(insert_list)} new records")
logger.debug(f"{len(amend_list)} amended records")
# Add to master list for integration
insert_by_source[postal_code] = insert_list
amend_by_source[postal_code] = amend_list
# Final report on what we'll do
full_amend_list = flatten_grouped_data(amend_by_source)
full_insert_list = flatten_grouped_data(insert_by_source)
logger.debug(f"{len(full_insert_list)} total new records")
logger.debug(f"{len(full_amend_list)} total amended records")
# Write out both lists
insert_path = utils.WARN_TRANSFORMER_OUTPUT_DIR / "processed" / "additions.csv"
with open(insert_path, "w") as fh:
if full_insert_list:
logger.debug(f"Writing {len(full_insert_list)} records to {insert_path}")
writer = csv.DictWriter(fh, full_insert_list[0].keys())
writer.writeheader()
writer.writerows(full_insert_list)
amend_path = utils.WARN_TRANSFORMER_OUTPUT_DIR / "processed" / "amendments.csv"
with open(amend_path, "w") as fh:
if full_amend_list:
logger.debug(f"Writing {len(full_amend_list)} records to {amend_path}")
writer = csv.DictWriter(fh, full_amend_list[0].keys())
writer.writeheader()
writer.writerows(full_amend_list)
# Create a new list to store everything
integrated_list: typing.List[typing.Dict[str, typing.Any]] = []
# Create a lookup of the current amended records that links them their likely replacements
amend_lookup = {d["current"]["hash_id"]: d["new"] for d in full_amend_list}
# Get the current timestamp to mark the updates we make in this run
now = datetime.now(timezone.utc)
# Loop through everything in the current database
for current_row in current_data_list:
# If this is an amended row ...
if current_row["hash_id"] in amend_lookup:
# Pull out the new record from the our lookup
amended_row = amend_lookup[current_row["hash_id"]]
# Link it to its likely ancestor
amended_row["is_amendment"] = True
amended_row["is_superseded"] = False
amended_row["likely_ancestor"] = current_row["hash_id"]
# Update is metadata
amended_row["first_inserted_date"] = current_row["first_inserted_date"]
amended_row["last_updated_date"] = now
amended_row["estimated_amendments"] = (
current_row["estimated_amendments"] + 1
)
# Add it to the new integrated database
integrated_list.append(amended_row)
# Mark the current record as superseded
# This allows it to be excluded circumstances where we only want unique records
# But without deleting it entirely
current_row["is_superseded"] = True
# Add it to the integrated database
integrated_list.append(current_row)
# If the current row is not amended ...
else:
# If these field haven't already been filled in, nope 'em
if "is_superseded" not in current_row.keys():
current_row["is_superseded"] = False
if "is_amendment" not in current_row.keys():
current_row["is_amendment"] = False
# Then we just keep what we got
integrated_list.append(current_row)
# Now insert the new records with today's timestamp
for new_row in full_insert_list:
new_row["first_inserted_date"] = now
new_row["last_updated_date"] = now
new_row["estimated_amendments"] = 0
new_row["is_superseded"] = False
new_row["is_amendment"] = False
integrated_list.append(new_row)
# And sort everything in reverse chronological order
sorted_list = sorted(
integrated_list,
key=itemgetter("last_updated_date", "first_inserted_date", "notice_date"),
reverse=True,
)
# Finally, write out what we got
integrated_path = utils.WARN_TRANSFORMER_OUTPUT_DIR / "processed" / "integrated.csv"
logger.debug(f"Writing {len(integrated_list)} records to {integrated_path}")
with open(integrated_path, "w") as fh:
fieldnames = [
"hash_id",
"first_inserted_date",
"notice_date",
"effective_date",
"postal_code",
"company",
"location",
"jobs",
"is_closure",
"is_temporary",
"is_superseded",
"is_amendment",
"likely_ancestor",
"estimated_amendments",
"last_updated_date",
]
writer = csv.DictWriter(fh, fieldnames, extrasaction="ignore")
writer.writeheader()
writer.writerows(sorted_list)
# And return the path
return integrated_path
def is_similar_string(s1, s2):
"""Evaluate whether we consider the two strings close enough to be likely variations.
Args:
s1 (str): The first string.
s2 (str): The second string.
Returns True or False.
"""
return jellyfish.jaro_winkler_similarity(s1, s2) > 0.95
def is_similar_date(d1, d2):
"""Evaluate whether we consider the two date strings close enough to be likely variations.
Args:
d1 (str): The first string.
d2 (str): The second string.
Returns True or False.
"""
return jellyfish.levenshtein_distance(d1, d2) <= 3
def get_likely_ancestor(
new_row: typing.Dict[str, typing.Any], current_data: typing.List
) -> typing.Optional[typing.Dict[str, typing.Any]]:
"""Determine if the provided new row has a likely parent in the current dataset.
Args:
new_row (dict): A record from the new dataset believed to contain a change to the current dataset.
current_data (list): All of the records in the current dataset for comparison
Returns:
The record in the current data judged most likely to be the ancestor of the new record.
Returns None if the record is estimated to be new.
"""
# Check our key fields against everything in the dataset
likely_match_list = []
for current_row in current_data:
# Check the company names
if not is_similar_string(new_row["company"], current_row["company"]):
continue
# Check the notice date
if not is_similar_date(new_row["notice_date"], current_row["notice_date"]):
continue
# Check the effective date, if it exists
if new_row["effective_date"] and current_row["effective_date"]:
if not is_similar_date(
new_row["effective_date"], current_row["effective_date"]
):
continue
# Check the location, if it exists
if new_row["location"] and current_row["location"]:
if not is_similar_string(new_row["location"], current_row["location"]):
continue
# Whatever is left we keep
likely_match_list.append(current_row)
# If there's nothing, return None
if not likely_match_list:
return None
# If there is more than one likely match, we are going to log out
if len(likely_match_list) > 1:
# Log here. Might do more later.
logger.debug("New row has more than one likely match")
logger.debug(f"New row: {json.dumps(new_row, indent=2, default=str)}")
logger.debug(
f"Likely matches: {json.dumps(likely_match_list, indent=2, default=str)}"
)
# For now we just return the first one
return likely_match_list[0]
def get_current_data(init: bool = False) -> typing.List[typing.Dict[str, typing.Any]]:
"""Fetch the most recent published version of our integrated dataset.
Args:
init (bool): Set to True when you want to create a new integrated dataset from scratch. Default False.
Returns a list of dictionaries ready for comparison against the new consolidated data file.
"""
# Set which file to pull
base_url = "https://raw.githubusercontent.com/biglocalnews/warn-github-flow/transformer/data/warn-transformer/processed/"
if init:
current_url = f"{base_url}consolidated.csv"
logger.debug(f"Initializing new current file from {current_url}")
else:
current_url = f"{base_url}integrated.csv"
logger.debug(f"Downloading most recent current file from {current_url}")
# Download the current database
current_r = requests.get(current_url)
current_data_str = current_r.content.decode("utf-8")
# Read in the current database
current_data_reader = csv.DictReader(current_data_str.splitlines(), delimiter=",")
current_data_list: typing.List[typing.Dict[str, typing.Any]] = list(
current_data_reader
)
# Get the current timestamp to mark the updates we make in this run
now = datetime.now(timezone.utc)
# If we're initializing a new dataset, we'll need to fill in the extra
# fields custom to the integrated set.
if init:
for row in current_data_list:
row["first_inserted_date"] = now
row["last_updated_date"] = now
row["estimated_amendments"] = 0
else:
for row in current_data_list:
# Otherwise we'll want to parse a few data types for later use
row["last_updated_date"] = datetime.fromisoformat(row["last_updated_date"])
row["first_inserted_date"] = datetime.fromisoformat(
row["first_inserted_date"]
)
row["estimated_amendments"] = int(row["estimated_amendments"])
# Return the list
logger.debug(f"{len(current_data_list)} records downloaded from current database")
return current_data_list
def get_changed_data(
new_data: typing.DefaultDict[str, typing.List],
current_data: typing.DefaultDict[str, typing.List],
) -> typing.DefaultDict[str, typing.List]:
"""Determine which rows in a new data file are different from the current dataset.
Args:
new_data (dict): A dictionary keyed by postal code. Each value is a list of all records from that source.
current_data (dict): A dictionary keyed by postal code. Each value is a list of all records from that source.
Returns a dictionary keyed by postal code. Each value is a list of all records with that value deemed to have changed.
"""
changed_dict = defaultdict(list)
for postal_code, new_row_list in new_data.items():
logger.debug(f"Inspecting {len(new_row_list)} new records from {postal_code}")
# Pull the current rows from the source
current_row_list = current_data[postal_code]
logger.debug(
f"Comparing against {len(current_row_list)} records from the current database"
)
# Loop through the rows in this source
for new_row in new_row_list:
# Identify new rows that are identical to a record in the current database
if not any(
r for r in current_row_list if r["hash_id"] == new_row["hash_id"]
):
# If not, it's either a new record or an amendment.
# So it goes in our change list
changed_dict[postal_code].append(new_row)
# Log where we stand
change_list = changed_dict[postal_code]
logger.debug(
f"{len(change_list)} changed rows ({round((len(change_list)/len(new_row_list))*100, 2)}%)"
)
# Pass it out
return changed_dict
def regroup_by_source(data_list: typing.List) -> typing.DefaultDict[str, typing.List]:
"""Regroup the provided list by its source field.
Args:
data_list: A list of dictionaries presumed to have a "postal_code" field.
Returns: A dictionary keyed by postal code. Each value is a list of all records with that value.
"""
regrouped_dict = defaultdict(list)
for row in data_list:
regrouped_dict[row["postal_code"]].append(row)
return regrouped_dict
def flatten_grouped_data(grouped_data: typing.Dict[str, typing.List]):
"""Flatten a dictionary of data grouped by source down to a single list.
Args:
grouped_data (dict): The grouped data
Returns a list of the data for all sources
"""
return list(chain(*grouped_data.values()))
if __name__ == "__main__":
run()
| 37.44697
| 125
| 0.661609
|
1ccec519a3c18a0c6360a09ce15dbedf892eea16
| 316
|
py
|
Python
|
src/sima/post/amplitudetype.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
src/sima/post/amplitudetype.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
src/sima/post/amplitudetype.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
# Generated with AmplitudeType
#
from enum import Enum
from enum import auto
class AmplitudeType(Enum):
""""""
SINGLE = auto()
DOUBLE = auto()
def label(self):
if self == AmplitudeType.SINGLE:
return "Single"
if self == AmplitudeType.DOUBLE:
return "Double"
| 21.066667
| 40
| 0.601266
|
5056b3043cbaae1cff67c740cb1f9c3392c1c1f9
| 364
|
py
|
Python
|
rustplus/api/structures/__init__.py
|
nesttle/rustplus
|
0149a7126672abe6917b21c6209bdefbe7fcc346
|
[
"MIT"
] | null | null | null |
rustplus/api/structures/__init__.py
|
nesttle/rustplus
|
0149a7126672abe6917b21c6209bdefbe7fcc346
|
[
"MIT"
] | null | null | null |
rustplus/api/structures/__init__.py
|
nesttle/rustplus
|
0149a7126672abe6917b21c6209bdefbe7fcc346
|
[
"MIT"
] | null | null | null |
from .rust_time import RustTime
from .rust_info import RustInfo
from .rust_map import RustMap
from .rust_marker import RustMarker
from .rust_chat_message import RustChatMessage
from .rust_team_info import RustTeamInfo, RustTeamMember, RustTeamNote
from .rust_entity_info import RustEntityInfo
from .rust_contents import RustContents
from .rust_item import RustItem
| 40.444444
| 70
| 0.868132
|
a73eeb122a5e04dc2ddcc760e2c77a6b81be6f10
| 4,889
|
py
|
Python
|
docs/experiments/object_reach_stable_baselines.py
|
irivers29/robo-gym
|
d4672e80f3d0ba1c6fc8c4624dd8461ad4551d1b
|
[
"MIT"
] | null | null | null |
docs/experiments/object_reach_stable_baselines.py
|
irivers29/robo-gym
|
d4672e80f3d0ba1c6fc8c4624dd8461ad4551d1b
|
[
"MIT"
] | null | null | null |
docs/experiments/object_reach_stable_baselines.py
|
irivers29/robo-gym
|
d4672e80f3d0ba1c6fc8c4624dd8461ad4551d1b
|
[
"MIT"
] | null | null | null |
from asyncio.constants import LOG_THRESHOLD_FOR_CONNLOST_WRITES
import time
import argparse
import os
import datetime
import gym
from matplotlib.pyplot import plot
import robo_gym
from robo_gym.wrappers.exception_handling import ExceptionHandling
import numpy as np
import torch
from stable_baselines3 import TD3
from stable_baselines3.td3 import MlpPolicy
from stable_baselines3.common import results_plotter
from stable_baselines3.common.results_plotter import load_results, ts2xy, plot_results
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.callbacks import BaseCallback
import matplotlib.pyplot as plt
target_machine_ip = '127.0.0.1' # or other machine 'xxx.xxx.xxx.xxx'
best_mean_reward, n_steps = -np.inf, 0
num_best_model = 0
time_steps = 4000000
def main(args):
env = gym.make('ShelfEnvironmentPositioningURSim-v0', ur_model='ur10e', ip=target_machine_ip, gui=False)
env = ExceptionHandling(env)
log_dir='mon/'
os.makedirs(log_dir, exist_ok=True)
env = Monitor(env, log_dir)
obs = env.reset()
observation_dim = len(obs)
#achieved_goal_dim = len(info['ee_coord'])
#desired_goal_dim = len(info['target_coord'])
action_dim = env.action_space.shape[0]
state_dim = observation_dim
#assert achieved_goal_dim == desired_goal_dim
print("observation", obs)
#print("ee_coord", info["ee_coord"])
#print("target coordinates", info["target_coord"])
use_cuda = torch.cuda.is_available()
print(use_cuda)
print("-----------------------")
print('Parameters:')
print("Observation Size:", observation_dim)
#print("Goal Size:", achieved_goal_dim)
print("State Size:", observation_dim)
print("Action Size:", action_dim)
print("State Size:", state_dim)
print("-----------------------")
callback = SaveOnBestTrainingRewardCallback(check_freq=2000, log_dir=log_dir)
#model = TD3(MlpPolicy, env, verbose=1, device='cuda', tensorboard_log="./TD3_positive_reward_tensorboard/")
model = TD3.load("mon/21_02_pos_results/best_model")
model.set_env(env)
model.learn(total_timesteps=args['num_episodes'], callback=callback)
model.save('TD3')
plot_results([log_dir], args["num_episodes"], results_plotter.X_TIMESTEPS, "TD3")
class SaveOnBestTrainingRewardCallback(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
:param check_freq: (int)
:param log_dir: (str) Path to the folder where the model will be saved.
It must contains the file created by the ``Monitor`` wrapper.
:param verbose: (int)
"""
def __init__(self, check_freq: int, log_dir: str, verbose=1):
super(SaveOnBestTrainingRewardCallback, self).__init__(verbose)
self.check_freq = check_freq
self.log_dir = log_dir
self.save_path = os.path.join(log_dir, 'best_model')
self.best_mean_reward = -np.inf
def _init_callback(self) -> None:
# Create folder if needed
if self.save_path is not None:
os.makedirs(self.save_path, exist_ok=True)
def _on_step(self) -> bool:
if self.n_calls % self.check_freq == 0:
# Retrieve training reward
x, y = ts2xy(load_results(self.log_dir), 'timesteps')
if len(x) > 0:
# Mean training reward over the last 100 episodes
mean_reward = np.mean(y[-100:])
if self.verbose > 0:
print("Num timesteps: {}".format(self.num_timesteps))
print("Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}".format(self.best_mean_reward, mean_reward))
# New best model, you could save the agent here
if mean_reward > self.best_mean_reward:
self.best_mean_reward = mean_reward
# Example for saving best model
if self.verbose > 0:
print("Saving new best model to {}".format(self.save_path))
self.model.save(self.save_path)
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--lr_actor', help='actor learning rate', default=0.0001)
parser.add_argument('--lr_critic', help='critic learning rate', default=0.001)
parser.add_argument('--batch_size', help='batch size', default=64)
parser.add_argument('--num_episodes', help='episodes to train', default=4000000)
parser.add_argument('--episodes-length', help='max length for one episode', default=1000)
parser.add_argument('--HER', help='Hinsight Experience Replay', default=False)
args = vars(parser.parse_args())
start_time = datetime.datetime.now()
main(args)
print("---%s seconds---"%(datetime.datetime.now() - start_time))
| 37.320611
| 131
| 0.682962
|
724bfac8bcc446adf31c991e350e4318023227bc
| 754
|
py
|
Python
|
vmapp/domains/admin.py
|
codeasap-pl/vmapp
|
dd845dcb6b49f06ac635d019139453137e20b548
|
[
"MIT"
] | null | null | null |
vmapp/domains/admin.py
|
codeasap-pl/vmapp
|
dd845dcb6b49f06ac635d019139453137e20b548
|
[
"MIT"
] | null | null | null |
vmapp/domains/admin.py
|
codeasap-pl/vmapp
|
dd845dcb6b49f06ac635d019139453137e20b548
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.db.models import Q, Count
from .models import Domain
@admin.register(Domain)
class DomainAdmin(admin.ModelAdmin):
list_display = (
"domain", "is_enabled", "total_users", "total_active_users",
"ctime", "mtime"
)
def get_queryset(self, request):
qs = super().get_queryset(request)
qs = qs.annotate(
total_users=Count(
"user",
),
total_active_users=Count(
"user",
filter=Q(user__is_enabled=True)
)
)
return qs
def total_users(self, obj):
return obj.total_users
def total_active_users(self, obj):
return obj.total_active_users
| 23.5625
| 68
| 0.582228
|
b662ce7645f5992160cc11571529ef29e5a3d6fd
| 8,593
|
py
|
Python
|
train_qanet.py
|
zichuan-scott-xu/cs224n_squad
|
29f48c256a2eddd940010d2a408307fee80de52d
|
[
"MIT"
] | null | null | null |
train_qanet.py
|
zichuan-scott-xu/cs224n_squad
|
29f48c256a2eddd940010d2a408307fee80de52d
|
[
"MIT"
] | null | null | null |
train_qanet.py
|
zichuan-scott-xu/cs224n_squad
|
29f48c256a2eddd940010d2a408307fee80de52d
|
[
"MIT"
] | null | null | null |
"""Train a model on SQuAD.
Author:
Chris Chute (chute@stanford.edu)
"""
import numpy as np
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as sched
import torch.utils.data as data
import util
import math
from args import get_train_args
from collections import OrderedDict
from json import dumps
from qanet2 import QANet
from tensorboardX import SummaryWriter
from tqdm import tqdm
from ujson import load as json_load
from util import collate_fn, SQuAD
def main(args):
# Set up logging and devices
args.save_dir = util.get_save_dir(args.save_dir, args.name, training=True)
log = util.get_logger(args.save_dir, args.name)
tbx = SummaryWriter(args.save_dir)
device, args.gpu_ids = util.get_available_devices()
log.info(f'Args: {dumps(vars(args), indent=4, sort_keys=True)}')
args.batch_size *= max(1, len(args.gpu_ids))
# Set random seed
log.info(f'Using random seed {args.seed}...')
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# Get embeddings
log.info('Loading embeddings...')
word_vectors = util.torch_from_json(args.word_emb_file)
char_vectors = util.torch_from_json(args.char_emb_file)
# Get model
log.info('Building model...')
model = QANet(word_vectors=word_vectors,
char_vectors=char_vectors,
model_dim=args.qanet_hidden,
num_model_enc_block=args.num_enc_blocks,
num_heads=args.num_heads)
model = nn.DataParallel(model, args.gpu_ids)
if args.load_path:
log.info(f'Loading checkpoint from {args.load_path}...')
model, step = util.load_model(model, args.load_path, args.gpu_ids)
else:
step = 0
model = model.to(device)
model.train()
ema = util.EMA(model, args.ema_decay)
# Get saver
saver = util.CheckpointSaver(args.save_dir,
max_checkpoints=args.max_checkpoints,
metric_name=args.metric_name,
maximize_metric=args.maximize_metric,
log=log)
# Get optimizer and scheduler
# optimizer = optim.Adadelta(model.parameters(), args.lr,
# weight_decay=args.l2_wd)
# scheduler = sched.LambdaLR(optimizer, lambda s: 1.) # Constant LR
model_params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = optim.Adam(model_params, lr=args.start_lr, eps=1e-7, weight_decay=3e-7, betas=(0.8,0.999))
cr = 1.0 / math.log(args.lr_warmup)
scheduler = optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=lambda t: cr * math.log(t + 1) if t < args.lr_warmup else 1)
# Get data loader
log.info('Building dataset...')
train_dataset = SQuAD(args.train_record_file, args.use_squad_v2)
train_loader = data.DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
collate_fn=collate_fn)
dev_dataset = SQuAD(args.dev_record_file, args.use_squad_v2)
dev_loader = data.DataLoader(dev_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
collate_fn=collate_fn)
# Train
log.info('Training...')
steps_till_eval = args.eval_steps
epoch = step // len(train_dataset)
while epoch != args.num_epochs:
epoch += 1
log.info(f'Starting epoch {epoch}...')
with torch.enable_grad(), \
tqdm(total=len(train_loader.dataset)) as progress_bar:
for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in train_loader:
# Setup for forward
cw_idxs = cw_idxs.to(device)
cc_idxs = cc_idxs.to(device)
qw_idxs = qw_idxs.to(device)
qc_idxs = qc_idxs.to(device)
batch_size = cw_idxs.size(0)
optimizer.zero_grad()
# Forward
log_p1, log_p2 = model(cw_idxs, cc_idxs, qw_idxs, qc_idxs)
y1, y2 = y1.to(device), y2.to(device)
loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)
loss_val = loss.item()
# Backward
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step(step // batch_size)
ema(model, step // batch_size)
# Log info
step += batch_size
progress_bar.update(batch_size)
progress_bar.set_postfix(epoch=epoch,
NLL=loss_val)
tbx.add_scalar('train/NLL', loss_val, step)
tbx.add_scalar('train/LR',
optimizer.param_groups[0]['lr'],
step)
steps_till_eval -= batch_size
if steps_till_eval <= 0:
steps_till_eval = args.eval_steps
# Evaluate and save checkpoint
log.info(f'Evaluating at step {step}...')
ema.assign(model)
results, pred_dict = evaluate(model, dev_loader, device,
args.dev_eval_file,
args.max_ans_len,
args.use_squad_v2)
saver.save(step, model, results[args.metric_name], device)
ema.resume(model)
# Log to console
results_str = ', '.join(f'{k}: {v:05.2f}' for k, v in results.items())
log.info(f'Dev {results_str}')
# Log to TensorBoard
log.info('Visualizing in TensorBoard...')
for k, v in results.items():
tbx.add_scalar(f'dev/{k}', v, step)
util.visualize(tbx,
pred_dict=pred_dict,
eval_path=args.dev_eval_file,
step=step,
split='dev',
num_visuals=args.num_visuals)
def evaluate(model, data_loader, device, eval_file, max_len, use_squad_v2):
nll_meter = util.AverageMeter()
model.eval()
pred_dict = {}
with open(eval_file, 'r') as fh:
gold_dict = json_load(fh)
with torch.no_grad(), \
tqdm(total=len(data_loader.dataset)) as progress_bar:
for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in data_loader:
# Setup for forward
cw_idxs = cw_idxs.to(device)
cc_idxs = cc_idxs.to(device)
qw_idxs = qw_idxs.to(device)
qc_idxs = qc_idxs.to(device)
batch_size = cw_idxs.size(0)
# Forward
log_p1, log_p2 = model(cw_idxs, cc_idxs, qw_idxs, qc_idxs)
y1, y2 = y1.to(device), y2.to(device)
loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)
nll_meter.update(loss.item(), batch_size)
# Get F1 and EM scores
p1, p2 = log_p1.exp(), log_p2.exp()
starts, ends = util.discretize(p1, p2, max_len, use_squad_v2)
# Log info
progress_bar.update(batch_size)
progress_bar.set_postfix(NLL=nll_meter.avg)
preds, _ = util.convert_tokens(gold_dict,
ids.tolist(),
starts.tolist(),
ends.tolist(),
use_squad_v2)
pred_dict.update(preds)
model.train()
results = util.eval_dicts(gold_dict, pred_dict, use_squad_v2)
results_list = [('NLL', nll_meter.avg),
('F1', results['F1']),
('EM', results['EM'])]
if use_squad_v2:
results_list.append(('AvNA', results['AvNA']))
results = OrderedDict(results_list)
return results, pred_dict
if __name__ == '__main__':
main(get_train_args())
| 38.533632
| 106
| 0.546259
|
8f4d5430a8bf4c6f245db4ffe45907bb026f0de6
| 1,474
|
py
|
Python
|
libs/glew/glew.py
|
gitdevmod/craft-blueprints-kde
|
81a866d2d606dabd57347fbac7cdab42979332dd
|
[
"BSD-2-Clause"
] | 14
|
2017-09-04T09:01:03.000Z
|
2022-01-04T20:09:00.000Z
|
libs/glew/glew.py
|
gitdevmod/craft-blueprints-kde
|
81a866d2d606dabd57347fbac7cdab42979332dd
|
[
"BSD-2-Clause"
] | 14
|
2017-12-15T08:11:22.000Z
|
2020-12-29T19:11:13.000Z
|
libs/glew/glew.py
|
gitdevmod/craft-blueprints-kde
|
81a866d2d606dabd57347fbac7cdab42979332dd
|
[
"BSD-2-Clause"
] | 19
|
2017-09-05T19:16:21.000Z
|
2020-10-18T12:46:06.000Z
|
# -*- coding: utf-8 -*-
import info
from Package.CMakePackageBase import *
class subinfo(info.infoclass):
def setTargets(self):
for ver in ['1.5.4', '1.7.0', '1.9.0', '1.10.0', '2.1.0', '2.2.0']:
self.targets[ver] = 'http://downloads.sourceforge.net/project/glew/glew/' + ver + '/glew-' + ver + '.zip'
self.targetInstSrc[ver] = 'glew-' + ver
self.patchToApply['1.5.4'] = [('glew-1.5.4-20100708.diff', 1)]
self.patchToApply['1.7.0'] = [('glew-1.7.0-20120320.diff', 1)]
self.patchToApply['1.9.0'] = [('glew-1.9.0-20130124.diff', 1)]
self.patchToApply['1.10.0'] = [('glew-1.9.0-20130124.diff', 1), ('split-string-literals-in-rc-files.diff', 1)]
self.targetDigests['1.7.0'] = '107c155ff5b69d97b9c530b40e4e8da571aaf729'
self.targetDigests['1.9.0'] = '6c0dd8d6af14db93868bb5482b55e784b2dc1127'
self.targetDigests['1.10.0'] = 'da45a883ca9b2a8e8fc1a642bd043f251ad69151'
self.targetDigests['2.1.0'] = '85ea9f4d1279b107019e48b9174c34e86c634830'
self.targetDigests['2.2.0'] = 'f1d3f046e44a4cb62d09547cf8f053d5b16b516f'
self.targetConfigurePath['2.2.0'] = 'build/cmake'
self.description = "OpenGL Extension Wrangler Library"
self.defaultTarget = '2.2.0'
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = None
class Package(CMakePackageBase):
def __init__(self, **args):
CMakePackageBase.__init__(self)
| 43.352941
| 118
| 0.641113
|
24c0a87b14d812e75ccaf8cfc5cde2bf8a616ead
| 39,067
|
py
|
Python
|
release/scripts/addons_contrib/space_view3d_align_tools.py
|
BlazesRus/Bforartists
|
126bdd9e47cc984fd97ba5299bfb92ec5278e754
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2019-07-08T15:51:14.000Z
|
2019-07-08T15:51:14.000Z
|
release/scripts/addons_contrib/space_view3d_align_tools.py
|
BlazesRus/Bforartists
|
126bdd9e47cc984fd97ba5299bfb92ec5278e754
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
release/scripts/addons_contrib/space_view3d_align_tools.py
|
BlazesRus/Bforartists
|
126bdd9e47cc984fd97ba5299bfb92ec5278e754
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# -*- coding: utf-8 -*-
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Contributed to by gabhead, Lell, Anfeo, meta-androcto
bl_info = {
"name": "Align Tools",
"author": "gabhead, Lell, Anfeo",
"version": (0, 3, 3),
"blender": (2, 80, 0),
"location": "View3D > Tool Shelf > Tools",
"description": "Align Selected Objects to Active Object",
"warning": "",
"wiki_url": "https://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/3D interaction/Align_Tools",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"category": "3D View",
}
import bpy
from bpy.types import (
Operator,
Panel,
AddonPreferences,
)
from bpy.props import (
EnumProperty,
BoolProperty,
FloatVectorProperty,
StringProperty,
)
from mathutils import (
Vector,
Matrix,
)
# Simple Align Defs #
# Align all
def main(context):
for i in bpy.context.selected_objects:
i.location = bpy.context.active_object.location
i.rotation_euler = bpy.context.active_object.rotation_euler
# Align Location
def LocAll(context):
for i in bpy.context.selected_objects:
i.location = bpy.context.active_object.location
def LocX(context):
for i in bpy.context.selected_objects:
i.location.x = bpy.context.active_object.location.x
def LocY(context):
for i in bpy.context.selected_objects:
i.location.y = bpy.context.active_object.location.y
def LocZ(context):
for i in bpy.context.selected_objects:
i.location.z = bpy.context.active_object.location.z
# Align Rotation
def RotAll(context):
for i in bpy.context.selected_objects:
i.rotation_euler = bpy.context.active_object.rotation_euler
def RotX(context):
for i in bpy.context.selected_objects:
i.rotation_euler.x = bpy.context.active_object.rotation_euler.x
def RotY(context):
for i in bpy.context.selected_objects:
i.rotation_euler.y = bpy.context.active_object.rotation_euler.y
def RotZ(context):
for i in bpy.context.selected_objects:
i.rotation_euler.z = bpy.context.active_object.rotation_euler.z
# Align Scale
def ScaleAll(context):
for i in bpy.context.selected_objects:
i.scale = bpy.context.active_object.scale
def ScaleX(context):
for i in bpy.context.selected_objects:
i.scale.x = bpy.context.active_object.scale.x
def ScaleY(context):
for i in bpy.context.selected_objects:
i.scale.y = bpy.context.active_object.scale.y
def ScaleZ(context):
for i in bpy.context.selected_objects:
i.scale.z = bpy.context.active_object.scale.z
# Advanced Align Defs #
# subject to object 0, 1 and 2 to pivot for cursor
def align_function(subject, active_too, consistent, self_or_active, loc_x, loc_y, loc_z, ref1, ref2, loc_offset,
rot_x, rot_y, rot_z, rot_offset, scale_x, scale_y, scale_z, scale_offset,
fit_x, fit_y, fit_z):
sel_obj = bpy.context.selected_objects
act_obj = bpy.context.active_object
global sel_max
global sel_min
global sel_center
global ref2_co
def get_reference_points(obj, space):
me = obj.data
co_list = []
# let's get all the points coodinates
if space == "global":
ok = False
obj_mtx = obj.matrix_world
if obj.type == 'MESH' and len(me.vertices) > 0:
ok = True
for p in me.vertices:
co_list.append((obj_mtx @ p.co))
elif obj.type == 'SURFACE' and len(me.splines) > 0:
ok = True
for s in me.splines:
for p in s.points:
co_list.append((obj_mtx @ p.co))
elif obj.type == 'FONT' and len(me.splines) > 0:
ok = True
for s in me.splines:
for p in s.bezier_points:
co_list.append((obj_mtx @ p.co))
elif space == "local":
ok = False
if obj.type == 'MESH' and len(me.vertices) > 0:
ok = True
for p in me.vertices:
co_list.append(p.co)
elif obj.type == 'SURFACE' and len(me.splines) > 0:
ok = True
for s in me.splines:
for p in s.points:
co_list.append(p.co)
elif obj.type == 'FONT' and len(obj.data.splines) > 0:
ok = True
for s in me.splines:
for p in s.bezier_points:
co_list.append(p.co)
# if a valid point found
# proceed to calculate the extremes
if ok:
max_x = co_list[0][0]
min_x = co_list[0][0]
max_y = co_list[0][1]
min_y = co_list[0][1]
max_z = co_list[0][2]
min_z = co_list[0][2]
for v in co_list:
# the strings of the list compared with the smaller and more found
# in order to find the minor and major for each axis
act_x = v[0]
if act_x > max_x:
max_x = act_x
if act_x < min_x:
min_x = act_x
act_y = v[1]
if act_y > max_y:
max_y = act_y
if act_y < min_y:
min_y = act_y
act_z = v[2]
if act_z > max_z:
max_z = act_z
if act_z < min_z:
min_z = act_z
else:
# otherwise use the pivot object
a = obj.location
min_x = a[0]
max_x = a[0]
min_y = a[1]
max_y = a[1]
min_z = a[2]
max_z = a[2]
center_x = min_x + ((max_x - min_x) / 2)
center_y = min_y + ((max_y - min_y) / 2)
center_z = min_z + ((max_z - min_z) / 2)
reference_points = [min_x, center_x, max_x, min_y, center_y, max_y, min_z, center_z, max_z]
return reference_points
def get_sel_ref(ref_co, sel_obj): # I look for the selection end points
sel_min = ref_co.copy()
sel_max = ref_co.copy()
for obj in sel_obj:
if obj != act_obj or (active_too and obj == act_obj):
ref_points = get_reference_points(obj, "global")
ref_min = Vector([ref_points[0], ref_points[3], ref_points[6]])
ref_max = Vector([ref_points[2], ref_points[5], ref_points[8]])
if ref_min[0] < sel_min[0]:
sel_min[0] = ref_min[0]
if ref_max[0] > sel_max[0]:
sel_max[0] = ref_max[0]
if ref_min[1] < sel_min[1]:
sel_min[1] = ref_min[1]
if ref_max[1] > sel_max[1]:
sel_max[1] = ref_max[1]
if ref_min[2] < sel_min[2]:
sel_min[2] = ref_min[2]
if ref_max[2] > sel_max[2]:
sel_max[2] = ref_max[2]
return sel_min, sel_max
def find_ref2_co(act_obj):
# It contains the coordinates of the reference point for the positioning
if ref2 == "0":
ref_points = get_reference_points(act_obj, "global")
ref2_co = [ref_points[0], ref_points[3], ref_points[6]]
ref2_co = Vector(ref2_co)
elif ref2 == "1":
ref_points = get_reference_points(act_obj, "global")
ref2_co = [ref_points[1], ref_points[4], ref_points[7]]
ref2_co = Vector(ref2_co)
elif ref2 == "2":
ref2_co = act_obj.location
ref2_co = Vector(ref2_co)
elif ref2 == "3":
ref_points = get_reference_points(act_obj, "global")
ref2_co = [ref_points[2], ref_points[5], ref_points[8]]
ref2_co = Vector(ref2_co)
elif ref2 == "4":
ref2_co = bpy.context.scene.cursor.location
return ref2_co
def find_new_coord(obj):
ref_points = get_reference_points(obj, "global")
if loc_x is True:
if ref1 == "0":
min_x = ref_points[0]
new_x = ref2_co[0] + (obj.location[0] - min_x) + loc_offset[0]
elif ref1 == "1":
center_x = ref_points[1]
new_x = ref2_co[0] + (obj.location[0] - center_x) + loc_offset[0]
elif ref1 == "2":
new_x = ref2_co[0] + loc_offset[0]
elif ref1 == "3":
max_x = ref_points[2]
new_x = ref2_co[0] - (max_x - obj.location[0]) + loc_offset[0]
obj.location[0] = new_x
if loc_y is True:
if ref1 == "0":
min_y = ref_points[3]
new_y = ref2_co[1] + (obj.location[1] - min_y) + loc_offset[1]
elif ref1 == "1":
center_y = ref_points[4]
new_y = ref2_co[1] + (obj.location[1] - center_y) + loc_offset[1]
elif ref1 == "2":
new_y = ref2_co[1] + loc_offset[1]
elif ref1 == "3":
max_y = ref_points[5]
new_y = ref2_co[1] - (max_y - obj.location[1]) + loc_offset[1]
obj.location[1] = new_y
if loc_z is True:
if ref1 == "0":
min_z = ref_points[6]
new_z = ref2_co[2] + (obj.location[2] - min_z) + loc_offset[2]
elif ref1 == "1":
center_z = ref_points[7]
new_z = ref2_co[2] + (obj.location[2] - center_z) + loc_offset[2]
elif ref1 == "2":
new_z = ref2_co[2] + loc_offset[2]
elif ref1 == "3":
max_z = ref_points[8]
new_z = ref2_co[2] - (max_z - obj.location[2]) + loc_offset[2]
obj.location[2] = new_z
def find_new_rotation(obj):
if rot_x is True:
obj.rotation_euler[0] = act_obj.rotation_euler[0] + rot_offset[0]
if rot_y is True:
obj.rotation_euler[1] = act_obj.rotation_euler[1] + rot_offset[1]
if rot_z is True:
obj.rotation_euler[2] = act_obj.rotation_euler[2] + rot_offset[2]
def find_new_scale(obj):
if scale_x is True:
obj.scale[0] = act_obj.scale[0] + scale_offset[0]
if scale_y is True:
obj.scale[1] = act_obj.scale[1] + scale_offset[1]
if scale_z is True:
obj.scale[2] = act_obj.scale[2] + scale_offset[2]
def find_new_dimensions(obj, ref_dim):
ref_points = get_reference_points(obj, "local")
if fit_x:
dim = ref_points[2] - ref_points[0]
obj.scale[0] = (ref_dim[0] / dim) * act_obj.scale[0]
if fit_y:
dim = ref_points[5] - ref_points[3]
obj.scale[1] = (ref_dim[1] / dim) * act_obj.scale[1]
if fit_z:
dim = ref_points[8] - ref_points[6]
obj.scale[2] = (ref_dim[2] / dim) * act_obj.scale[2]
def move_pivot(obj):
me = obj.data
vec_ref2_co = Vector(ref2_co)
offset = vec_ref2_co - obj.location
offset_x = [offset[0] + loc_offset[0], 0, 0]
offset_y = [0, offset[1] + loc_offset[1], 0]
offset_z = [0, 0, offset[2] + loc_offset[2]]
def movement(vec):
obj_mtx = obj.matrix_world.copy()
# What's the displacement vector for the pivot?
move_pivot = Vector(vec)
# Move the pivot point (which is the object's location)
pivot = obj.location
pivot += move_pivot
nm = obj_mtx.inverted() * Matrix.Translation(-move_pivot) @ obj_mtx
# Transform the mesh now
me.transform(nm)
if loc_x:
movement(offset_x)
if loc_y:
movement(offset_y)
if loc_z:
movement(offset_z)
def point_in_selection(act_obj, sel_obj):
ok = False
for o in sel_obj:
if o != act_obj:
ref_ob = o
obj_mtx = o.matrix_world
if o.type == 'MESH' and len(o.data.vertices) > 0:
ref_co = o.data.vertices[0].co.copy()
ref_co = obj_mtx * ref_co
ok = True
break
elif o.type == 'CURVE' and len(o.data.splines) > 0:
ref_co = o.data.splines[0].bezier_point[0].co.copy()
ref_co = obj_mtx * ref_co
ok = True
break
elif o.type == 'SURFACE' and len(o.data.splines) > 0:
ref_co = o.data.splines[0].points[0].co.copy()
ref_co = obj_mtx * ref_co
ok = True
break
elif o.type == 'FONT' and len(o.data.splines) > 0:
ref_co = o.data.splines[0].bezier_points[0].co.copy()
ref_co = obj_mtx * ref_co
ok = True
break
# if no object had data, use the position of an object that was not active as an internal
# point of selection
if ok is False:
ref_co = ref_ob.location
return ref_co
if subject == "0":
# if act_obj.type == ('MESH' or 'FONT' or 'CURVE' or 'SURFACE'):
if act_obj.type == 'MESH' or act_obj.type == 'FONT' or act_obj.type == 'SURFACE':
ref2_co = find_ref2_co(act_obj)
else:
if ref2 == "4":
ref2_co = bpy.context.scene.cursor.location
else:
ref2_co = act_obj.location
# in the case of substantial selection
if consistent:
# I am seeking a point that is in the selection space
ref_co = point_in_selection(act_obj, sel_obj)
sel_min, sel_max = get_sel_ref(ref_co, sel_obj)
sel_center = sel_min + ((sel_max - sel_min) / 2)
translate = [0, 0, 0]
# calculating how much to move the selection
if ref1 == "0":
translate = ref2_co - sel_min + loc_offset
elif ref1 == "1":
translate = ref2_co - sel_center + loc_offset
elif ref1 == "3":
translate = ref2_co - sel_max + loc_offset
# Move the various objects
for obj in sel_obj:
if obj != act_obj or (active_too and obj == act_obj):
if loc_x:
obj.location[0] += translate[0]
if loc_y:
obj.location[1] += translate[1]
if loc_z:
obj.location[2] += translate[2]
else: # not consistent
for obj in sel_obj:
if obj != act_obj:
if rot_x or rot_y or rot_z:
find_new_rotation(obj)
if fit_x or fit_y or fit_z:
dim = [0, 0, 0]
ref_points = get_reference_points(act_obj, "local")
dim[0] = ref_points[2] - ref_points[0]
dim[1] = ref_points[5] - ref_points[3]
dim[2] = ref_points[8] - ref_points[6]
find_new_dimensions(obj, dim)
if scale_x or scale_y or scale_z:
find_new_scale(obj)
if loc_x or loc_y or loc_z:
# print("ehy", ref2_co)
find_new_coord(obj)
if active_too is True:
if loc_x or loc_y or loc_z:
find_new_coord(act_obj)
if rot_x or rot_y or rot_z:
find_new_rotation(act_obj)
if scale_x or scale_y or scale_z:
find_new_scale(act_obj)
# add dimensions if dim offset will be added
elif subject == "1":
if self_or_active == "1":
if act_obj.type == 'MESH':
ref2_co = find_ref2_co(act_obj)
for obj in sel_obj:
if self_or_active == "0":
ref2_co = find_ref2_co(obj)
if loc_x or loc_y or loc_z:
if obj != act_obj and obj.type == 'MESH':
move_pivot(obj)
if active_too is True:
if act_obj.type == 'MESH':
if loc_x or loc_y or loc_z:
if self_or_active == "0":
ref2_co = find_ref2_co(act_obj)
move_pivot(act_obj)
elif subject == "2":
if self_or_active == "1":
if act_obj.type == 'MESH' or act_obj.type == 'FONT' or act_obj.type == 'SURFACE':
ref2_co = find_ref2_co(act_obj)
ref_points = get_reference_points(act_obj, "global")
else:
ref2_co = act_obj.location
ref_points = [act_obj.location[0], act_obj.location[0], act_obj.location[0],
act_obj.location[1], act_obj.location[1], act_obj.location[1],
act_obj.location[2], act_obj.location[2], act_obj.location[2]]
if ref2 == "0":
if loc_x is True:
bpy.context.scene.cursor.location[0] = ref_points[0] + loc_offset[0]
if loc_y is True:
bpy.context.scene.cursor.location[1] = ref_points[3] + loc_offset[1]
if loc_z is True:
bpy.context.scene.cursor.location[2] = ref_points[6] + loc_offset[2]
elif ref2 == "1":
if loc_x is True:
bpy.context.scene.cursor.location[0] = ref_points[1] + loc_offset[0]
if loc_y is True:
bpy.context.scene.cursor.location[1] = ref_points[4] + loc_offset[1]
if loc_z is True:
bpy.context.scene.cursor.location[2] = ref_points[7] + loc_offset[2]
elif ref2 == "2":
if loc_x is True:
bpy.context.scene.cursor.location[0] = act_obj.location[0] + loc_offset[0]
if loc_y is True:
bpy.context.scene.cursor.location[1] = act_obj.location[1] + loc_offset[1]
if loc_z is True:
bpy.context.scene.cursor.location[2] = act_obj.location[2] + loc_offset[2]
elif ref2 == "3":
if loc_x is True:
bpy.context.scene.cursor.location[0] = ref_points[2] + loc_offset[0]
if loc_y is True:
bpy.context.scene.cursor.location[1] = ref_points[5] + loc_offset[1]
if loc_z is True:
bpy.context.scene.cursor.location[2] = ref_points[8] + loc_offset[2]
elif self_or_active == "2":
ref_co = point_in_selection(act_obj, sel_obj)
sel_min, sel_max = get_sel_ref(ref_co, sel_obj)
sel_center = sel_min + ((sel_max - sel_min) / 2)
if ref2 == "0":
if loc_x is True:
bpy.context.scene.cursor.location[0] = sel_min[0] + loc_offset[0]
if loc_y is True:
bpy.context.scene.cursor.location[1] = sel_min[1] + loc_offset[1]
if loc_z is True:
bpy.context.scene.cursor.location[2] = sel_min[2] + loc_offset[2]
elif ref2 == "1":
if loc_x is True:
bpy.context.scene.cursor.location[0] = sel_center[0] + loc_offset[0]
if loc_y is True:
bpy.context.scene.cursor.location[1] = sel_center[1] + loc_offset[1]
if loc_z is True:
bpy.context.scene.cursor.location[2] = sel_center[2] + loc_offset[2]
elif ref2 == "3":
if loc_x is True:
bpy.context.scene.cursor.location[0] = sel_max[0] + loc_offset[0]
if loc_y is True:
bpy.context.scene.cursor.location[1] = sel_max[1] + loc_offset[1]
if loc_z is True:
bpy.context.scene.cursor.location[2] = sel_max[2] + loc_offset[2]
# Classes #
# Advanced Align
class OBJECT_OT_align_tools(Operator):
bl_idname = "object.align_tools"
bl_label = "Align Operator"
bl_description = "Align Object Tools"
bl_options = {'REGISTER', 'UNDO'}
# property definitions
# Object-Pivot-Cursor:
subject: EnumProperty(
items=(("0", "Object", "Align Objects"),
("1", "Pivot", "Align Objects Pivot"),
("2", "Cursor", "Align Cursor To Active")),
name="Align To",
description="What will be moved"
)
# Move active Too:
active_too: BoolProperty(
name="Active too",
default=False,
description="Move the active object too"
)
# advanced options
advanced: BoolProperty(
name="Advanced Options",
default=False,
description="Show advanced options"
)
consistent: BoolProperty(
name="Consistent Selection",
default=False,
description="Use consistent selection"
)
# Align Location:
loc_x: BoolProperty(
name="Align to X axis",
default=False,
description="Enable X axis alignment"
)
loc_y: BoolProperty(
name="Align to Y axis",
default=False,
description="Enable Y axis alignment"
)
loc_z: BoolProperty(
name="Align to Z axis",
default=False,
description="Enable Z axis alignment"
)
# Selection Option:
ref1: EnumProperty(
items=(("3", "Max", "Align the maximum point"),
("1", "Center", "Align the center point"),
("2", "Pivot", "Align the pivot"),
("0", "Min", "Align the minimum point")),
name="Selection reference",
description="Moved objects reference point"
)
# Active Oject Option:
ref2: EnumProperty(
items=(("3", "Max", "Align to the maximum point"),
("1", "Center", "Align to the center point"),
("2", "Pivot", "Align to the pivot"),
("0", "Min", "Align to the minimum point"),
("4", "Cursor", "Description")),
name="Active reference",
description="Destination point"
)
self_or_active: EnumProperty(
items=(("0", "Self", "In relation of itself"),
("1", "Active", "In relation of the active object"),
("2", "Selection", "In relation of the entire selection")),
name="Relation",
default="1",
description="To what the pivot will be aligned"
)
# Location Offset
loc_offset: FloatVectorProperty(
name="Location Offset",
description="Offset for location align position",
default=(0.0, 0.0, 0.0),
subtype='XYZ', size=3
)
# Rotation Offset
rot_offset: FloatVectorProperty(
name="Rotation Offset",
description="Offset for rotation alignment",
default=(0.0, 0.0, 0.0),
subtype='EULER', size=3
)
# Scale Offset
scale_offset: FloatVectorProperty(
name="Scale Offset",
description="Offset for scale match",
default=(0.0, 0.0, 0.0),
subtype='XYZ', size=3
)
# Fit Dimension Prop:
fit_x: BoolProperty(
name="Fit Dimension to X axis",
default=False,
description=""
)
fit_y: BoolProperty(
name="Fit Dimension to Y axis",
default=False,
description=""
)
fit_z: BoolProperty(
name="Fit Dimension to Z axis",
default=False,
description=""
)
# Apply Fit Dimension:
apply_dim: BoolProperty(
name="Apply Dimension",
default=False,
description=""
)
# Align Rot Prop:
rot_x: BoolProperty(
name="Align Rotation to X axis",
default=False,
description=""
)
rot_y: BoolProperty(
name="Align Rotation to Y axis",
default=False,
description=""
)
rot_z: BoolProperty(
name="Align Rotation to Z axis",
default=False,
description=""
)
# Apply Rot:
apply_rot: BoolProperty(
name="Apply Rotation",
default=False,
description=""
)
# Align Scale:
scale_x: BoolProperty(
name="Match Scale to X axis",
default=False,
description=""
)
scale_y: BoolProperty(
name="Match Scale to Y axis",
default=False,
description=""
)
scale_z: BoolProperty(
name="match Scale to Z axis",
default=False,
description=""
)
# Apply Scale:
apply_scale: BoolProperty(
name="Apply Scale",
default=False,
description=""
)
def draw(self, context):
layout = self.layout
# Object-Pivot-Cursor:
row0 = layout.row()
row0.prop(self, 'subject', expand=True)
# Move active Too:
row1 = layout.row()
row1.prop(self, 'active_too')
row1.prop(self, 'advanced')
if self.advanced:
row1b = layout.row()
row1b.prop(self, 'consistent')
row2 = layout.row()
row2.label(text="Align Location:")
# Align Location:
row3 = layout.row()
row3.prop(self, "loc_x", text="X", toggle=True)
row3.prop(self, "loc_y", text="Y", toggle=True)
row3.prop(self, "loc_z", text="Z", toggle=True)
# Offset:
if self.advanced is True:
# row8 = col.row()
# row8.label(text='Location Offset')
row9 = layout.row()
row9.prop(self, 'loc_offset', text='')
# Selection Options
if self.advanced is True:
sel = bpy.context.selected_objects
sel_obs = len(sel)
if sel_obs != 0:
row4 = layout.row()
row4.label(text="Selected: " + str(sel_obs) + " Objects", icon='OBJECT_DATA')
if self.subject == "1" or self.subject == "2":
row5b = layout.row()
row5b.prop(self, 'self_or_active', expand=True)
else:
row5 = layout.row()
row5.prop(self, 'ref1', expand=True)
# Active Object Options: Number of select objects
act = bpy.context.active_object
if self.advanced is True:
if act:
row6 = layout.row()
row6.label(text="Active: " + act.name, icon='OBJECT_DATA')
row7 = layout.row()
row7.prop(self, 'ref2', expand=True)
if self.subject == "0":
row12 = layout.row()
row12.label(text='Align Rotation:')
row13 = layout.row(align=True)
row13.prop(self, 'rot_x', text='X', toggle=True)
row13.prop(self, 'rot_y', text='Y', toggle=True)
row13.prop(self, 'rot_z', text='Z', toggle=True)
row13.prop(self, 'apply_rot', text='Apply', toggle=True)
if self.advanced is True:
row13b = layout.row()
row13b.prop(self, 'rot_offset', text='')
row14 = layout.row()
row14.label(text='Match Scale:')
row15 = layout.row(align=True)
row15.prop(self, 'scale_x', text='X', toggle=True)
row15.prop(self, 'scale_y', text='Y', toggle=True)
row15.prop(self, 'scale_z', text='Z', toggle=True)
row15.prop(self, 'apply_scale', text='Apply', toggle=True)
if self.advanced is True:
row15b = layout.row()
row15b.prop(self, 'scale_offset', text='')
row10 = layout.row()
row10.label(text='Fit Dimensions:')
row11 = layout.row(align=True)
row11.prop(self, 'fit_x', text='X', toggle=True)
row11.prop(self, 'fit_y', text='Y', toggle=True)
row11.prop(self, 'fit_z', text='Z', toggle=True)
row11.prop(self, 'apply_dim', text='Apply', toggle=True)
def execute(self, context):
align_function(
self.subject, self.active_too, self.consistent,
self.self_or_active, self.loc_x, self.loc_y, self.loc_z,
self.ref1, self.ref2, self.loc_offset,
self.rot_x, self.rot_y, self.rot_z, self.rot_offset,
self.scale_x, self.scale_y, self.scale_z, self.scale_offset,
self.fit_x, self.fit_y, self.fit_z
)
return {'FINISHED'}
# Simple Align Classes #
# Align All Rotation And Location
class AlignOperator(Operator):
bl_idname = "object.align"
bl_label = "Align Selected To Active"
bl_description = "Align Selected To Active"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
main(context)
return {'FINISHED'}
# Align Location All
class AlignLocationOperator(Operator):
bl_idname = "object.align_location_all"
bl_label = "Align Selected Location To Active"
bl_description = "Align Selected Location To Active"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
LocAll(context)
return {'FINISHED'}
# Align Location X
class AlignLocationXOperator(Operator):
bl_idname = "object.align_location_x"
bl_label = "Align Selected Location X To Active"
bl_description = "Align Selected Location X To Active"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
LocX(context)
return {'FINISHED'}
# Align Location Y
class AlignLocationYOperator(Operator):
bl_idname = "object.align_location_y"
bl_label = "Align Selected Location Y To Active"
bl_description = "Align Selected Location Y To Active"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
LocY(context)
return {'FINISHED'}
# Align LocationZ
class AlignLocationZOperator(Operator):
bl_idname = "object.align_location_z"
bl_label = "Align Selected Location Z To Active"
bl_description = "Align Selected Location Z To Active"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
LocZ(context)
return {'FINISHED'}
# Align Rotation All
class AlignRotationOperator(Operator):
bl_idname = "object.align_rotation_all"
bl_label = "Align Selected Rotation To Active"
bl_description = "Align Selected Rotation To Active"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
RotAll(context)
return {'FINISHED'}
# Align Rotation X
class AlignRotationXOperator(Operator):
bl_idname = "object.align_rotation_x"
bl_label = "Align Selected Rotation X To Active"
bl_description = "Align Selected Rotation X To Active"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
RotX(context)
return {'FINISHED'}
# Align Rotation Y
class AlignRotationYOperator(Operator):
bl_idname = "object.align_rotation_y"
bl_label = "Align Selected Rotation Y To Active"
bl_description = "Align Selected Rotation Y To Active"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
RotY(context)
return {'FINISHED'}
# Align Rotation Z
class AlignRotationZOperator(Operator):
bl_idname = "object.align_rotation_z"
bl_label = "Align Selected Rotation Z To Active"
bl_description = "Align Selected Rotation Z To Active"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
RotZ(context)
return {'FINISHED'}
# Scale All
class AlignScaleOperator(Operator):
bl_idname = "object.align_objects_scale_all"
bl_label = "Align Selected Scale To Active"
bl_description = "Align Selected Scale To Active"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
ScaleAll(context)
return {'FINISHED'}
# Align Scale X
class AlignScaleXOperator(Operator):
bl_idname = "object.align_objects_scale_x"
bl_label = "Align Selected Scale X To Active"
bl_description = "Align Selected Scale X To Active"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
ScaleX(context)
return {'FINISHED'}
# Align Scale Y
class AlignScaleYOperator(Operator):
bl_idname = "object.align_objects_scale_y"
bl_label = "Align Selected Scale Y To Active"
bl_description = "Align Selected Scale Y To Active"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
ScaleY(context)
return {'FINISHED'}
# Align Scale Z
class AlignScaleZOperator(Operator):
bl_idname = "object.align_objects_scale_z"
bl_label = "Align Selected Scale Z To Active"
bl_description = "Align Selected Scale Z To Active"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
ScaleZ(context)
return {'FINISHED'}
# Interface Panel
class VIEW3D_PT_AlignUi(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_label = "Align Tools"
bl_context = "objectmode"
bl_category = 'Tools'
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
obj = context.object
if obj is not None:
row = layout.row()
row.label(text="Active object is: ", icon='OBJECT_DATA')
box = layout.box()
box.label(text=obj.name, icon='EDITMODE_HLT')
col = layout.column()
col.label(text="Align Loc + Rot:")
col = layout.column(align=False)
col.operator("object.align", text="XYZ")
col = layout.column()
col.label(text="Align Location:")
col = layout.column_flow(columns=4, align=True)
col.operator("object.align_location_x", text="X")
col.operator("object.align_location_y", text="Y")
col.operator("object.align_location_z", text="Z")
col.operator("object.align_location_all", text="All")
col = layout.column()
col.label(text="Align Rotation:")
col = layout.column_flow(columns=4, align=True)
col.operator("object.align_rotation_x", text="X")
col.operator("object.align_rotation_y", text="Y")
col.operator("object.align_rotation_z", text="Z")
col.operator("object.align_rotation_all", text="All")
col = layout.column()
col.label(text="Align Scale:")
col = layout.column_flow(columns=4, align=True)
col.operator("object.align_objects_scale_x", text="X")
col.operator("object.align_objects_scale_y", text="Y")
col.operator("object.align_objects_scale_z", text="Z")
col.operator("object.align_objects_scale_all", text="All")
if obj is not None:
col = layout.column()
col.label(text="Advanced Align")
layout = self.layout
self.layout.operator("object.align_tools", text="Advanced")
# Add-ons Preferences Update Panel
# Define Panel classes for updating
panels = (
VIEW3D_PT_AlignUi,
)
def update_panel(self, context):
message = "Align Tools: Updating Panel locations has failed"
try:
for panel in panels:
if "bl_rna" in panel.__dict__:
bpy.utils.unregister_class(panel)
for panel in panels:
panel.bl_category = context.preferences.addons[__name__].preferences.category
bpy.utils.register_class(panel)
except Exception as e:
print("\n[{}]\n{}\n\nError:\n{}".format(__name__, message, e))
pass
class AlignAddonPreferences(AddonPreferences):
# this must match the addon name, use '__package__'
# when defining this in a submodule of a python package.
bl_idname = __name__
category: StringProperty(
name="Tab Category",
description="Choose a name for the category of the panel",
default="Tools",
update=update_panel
)
def draw(self, context):
layout = self.layout
row = layout.row()
col = row.column()
col.label(text="Tab Category:")
col.prop(self, "category", text="")
# Class List
classes = (
VIEW3D_PT_AlignUi,
AlignOperator,
AlignLocationOperator,
AlignLocationXOperator,
AlignLocationYOperator,
AlignLocationZOperator,
AlignRotationOperator,
AlignRotationXOperator,
AlignRotationYOperator,
AlignRotationZOperator,
AlignScaleOperator,
AlignScaleXOperator,
AlignScaleYOperator,
AlignScaleZOperator,
OBJECT_OT_align_tools,
AlignAddonPreferences,
)
# Register all operators and panels
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
register()
| 33.390598
| 112
| 0.554151
|
d07feb2dcc06e10e3e2434b0b9d0348caa2d5e25
| 7,074
|
py
|
Python
|
etrm/raster.py
|
dgketchum/etrm
|
f74f5771fbc6ba5750a790e384eac422b598325a
|
[
"Apache-2.0"
] | 1
|
2021-08-11T20:10:52.000Z
|
2021-08-11T20:10:52.000Z
|
etrm/raster.py
|
dgketchum/etrm
|
f74f5771fbc6ba5750a790e384eac422b598325a
|
[
"Apache-2.0"
] | null | null | null |
etrm/raster.py
|
dgketchum/etrm
|
f74f5771fbc6ba5750a790e384eac422b598325a
|
[
"Apache-2.0"
] | null | null | null |
# ===============================================================================
# Copyright 2017 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
import os
from numpy import array, asarray, where, zeros_like, nonzero
from numpy.ma import masked_where, nomask
from app.paths import paths
gmask = None
gmask_path = None
class Raster(object):
_band = 1
_path = None
_arr = None
_geo = None
_masked_arr = None
def __init__(self, path=None, root=None, band=1):
if path is not None:
if root is not None:
path = os.path.join(root, path)
self.open(path, band)
@classmethod
def fromarray(cls, arr, geo=None):
r = cls()
r._arr = arr
r._geo = geo
return r
@property
def geo(self):
return self._geo
@property
def as_bool_array(self):
return asarray(self._arr, dtype=bool)
@property
def array(self):
"""
:return:
"""
return self._masked_arr if self._masked_arr is not None else self._arr
def set_array(self, arr):
self._arr = arr
def apply_mask(self):
"""
apply mask to ourself
:return:
"""
self._masked_arr = self.masked()
def filter_greater(self, fvalue, value):
"""
where arr is greater than fvalue set arr to value
:param fvalue: float
:param value: float
:return:
"""
if self._masked_arr:
arr = self._masked_arr
else:
arr = self._arr
arr[arr > fvalue] = value
def filter_less(self, fvalue, value):
"""
where arr is greater than fvalue set arr to value
:param fvalue: float
:param value: float
:return:
"""
if self._masked_arr:
arr = self._masked_arr
else:
arr = self._arr
arr[arr < fvalue] = value
def remove_negatives(self):
self.filter_less(0, 0)
def unmasked(self, tiff_shape):
narr = self._arr
if paths.mask and os.path.isfile(paths.mask):
idxs = self._get_masked_indices()
idxs = asarray(idxs, int)
masked_arr = masked_where(idxs == 0, idxs)
z = zeros_like(idxs, dtype=float)
z[~masked_arr.mask] = narr.ravel()
narr = z
else:
print('tiff shape', tiff_shape)
if tiff_shape is None:
print('You need to define tiff shape (cols,rows) if not using a mask')
import sys
sys.exit()
print("tiff shape", tiff_shape)
tiff = tiff_shape.split(',')
# narr = narr.reshape(int(tiff_shape[0]), int(tiff_shape[1]))
rows = int(tiff[0])
cols = int(tiff[1])
narr = narr.reshape(rows, cols)
return narr
def unmasked2(self): #TODO check gabe merge
idxs = self._get_masked_indices()
# print 'asdfasfdsadf', idxs
if idxs is not None:
idxs = asarray(idxs, int)
masked_arr = masked_where(idxs == 0, idxs)
z_float = zeros_like(idxs, dtype=float)
# masked_arr = self._arr.reshape(len(masked_arr), len(masked_arr[0]))
z_float[~masked_arr.mask] = self._arr.ravel()
masked_arr = z_float
# masked_arr.mask = nomask
else:
masked_arr = self._arr#.ravel()
return masked_arr
def masked(self):
"""
returns valid points as 1-d array
:return:
"""
a = self._arr
if paths.mask and os.path.isfile(paths.mask):
idxs = self._get_masked_indices()
a = a[idxs]
# print self._arr
return a.flatten()
def open(self, path, band=1):
"""
:param path: path to GeoTiff
:param band:
:return:
"""
if not os.path.isfile(path):
print('Not a valid file: {}'.format(path))
return
self._path = path
self._band = band
raster = gdal.Open(self._path)
rband = raster.GetRasterBand(band)
self._arr = array(rband.ReadAsArray(), dtype=float)
self._geo = {'cols': raster.RasterXSize, 'rows': raster.RasterYSize, 'bands': raster.RasterCount,
'data_type': rband.DataType, 'projection': raster.GetProjection(),
'geotransform': raster.GetGeoTransform(), 'resolution': raster.GetGeoTransform()[1]}
del raster
def save(self, output, arr=None, geo=None, band=None):
"""
save an array as a GeoTiff
:param arr:
:param geo:
:param band:
:return:
"""
if arr is None:
arr = self._arr
if geo is None:
geo = self._geo
if band is None:
band = self._band
self._save(output, arr, geo, band)
# private
def _get_masked_indices(self):
global gmask_path, gmask
if gmask is None or gmask_path != paths.mask:
print('caching mask: {}'.format(paths.mask))
mask = Raster(paths.mask)
gmask = mask.as_bool_array
gmask_path = paths.mask
return gmask
def _get_masked_indices2(self): #TODO check gabe merge
global gmask_path, gmask
if paths.mask:
if gmask is None or gmask_path != paths.mask:
if os.path.isfile(paths.mask):
print('caching mask: {}'.format(paths.mask))
mask = Raster(paths.mask)
gmask = mask.as_bool_array
gmask_path = paths.mask
return gmask
def _save(self, path, arr, geo, band):
driver = gdal.GetDriverByName('GTiff')
out_data_set = driver.Create(path, geo['cols'], geo['rows'],
geo['bands'], geo['data_type'])
out_data_set.SetGeoTransform(geo['geotransform'])
out_data_set.SetProjection(geo['projection'])
output_band = out_data_set.GetRasterBand(band)
output_band.WriteArray(arr, 0, 0)
del out_data_set, output_band
# ============= EOF =============================================
| 29.722689
| 105
| 0.538027
|
aa21e2ed356e6e29df86bed8377d326b65f59281
| 5,238
|
py
|
Python
|
checkmate/tf2/wrapper.py
|
hy00nc/checkmate
|
f3452a30dbf8d00c5ce9607712e335f39d2f6c5b
|
[
"Apache-2.0"
] | null | null | null |
checkmate/tf2/wrapper.py
|
hy00nc/checkmate
|
f3452a30dbf8d00c5ce9607712e335f39d2f6c5b
|
[
"Apache-2.0"
] | null | null | null |
checkmate/tf2/wrapper.py
|
hy00nc/checkmate
|
f3452a30dbf8d00c5ce9607712e335f39d2f6c5b
|
[
"Apache-2.0"
] | null | null | null |
import logging
import subprocess
import psutil
import tensorflow as tf
from checkmate.core.solvers.strategy_chen import solve_chen_sqrtn
try:
from checkmate.core.solvers.gurobi_solver import solve_ilp_gurobi as solver
except:
try:
from checkmate.core.solvers.cvxpy_solver import solve_checkmate_cvxpy as solver
except:
solver = solve_chen_sqrtn
from checkmate.tf2.execution import edit_graph
from checkmate.tf2.extraction import dfgraph_from_tf_function
def set_opts():
opts = {}
# tf.config.optimizer.set_jit(False)
# opts["dependency"] = False
opts["remapper"] = False
tf.config.optimizer.set_experimental_options(opts)
def _using_gpu_check():
return tf.test.is_gpu_available() and tf.test.is_built_with_cuda()
def nvidiasmi_query(query="memory.total"):
# from https://discuss.pytorch.org/t/access-gpu-memory-usage-in-pytorch/3192/4
mem = subprocess.check_output(
["nvidia-smi", "--query-gpu={}".format(query), "--format=csv,nounits,noheader"], encoding="utf-8"
)
query_result_list = [int(x) for x in mem.strip().split("\n")]
return dict(zip(range(len(query_result_list)), query_result_list))
def _get_gpu_memory():
if _using_gpu_check(): # choose based on available GPU RAM
gpu_ram = nvidiasmi_query("memory.total")
budget = min(gpu_ram.values()) * 0.9
logging.info(
"[checkmate] No budget specified; defaulting to the minimum amount of total GPU RAM on any single "
"GPU, {0:.2f}MB".format(budget)
)
else: # choose based available system memory
budget = psutil.virtual_memory().available * 0.8 / 1000000
logging.debug("[checkmate] No GPU detected, using system DRAM on CPU")
logging.info("[checkmate] No budget specified; defaulting to {0:.2f}MB".format(budget))
return budget
def get_function(model, input_shape, label_shape, optimizer, loss):
@tf.function
def grads_check(data, label):
with tf.GradientTape() as check_tape:
predictions = model(data)
loss_val = loss(label, predictions)
gradients = check_tape.gradient(loss_val, model.trainable_variables)
return predictions, loss_val, gradients
return grads_check
def compile_tf2(
model: tf.keras.Model,
loss: tf.losses.Loss,
optimizer: tf.optimizers.Optimizer,
input_spec=None,
label_spec=None,
scheduler=solver,
budget="auto",
**kwargs
):
set_opts()
"""
Checkmate optimizes your DNN graphs to consume less GPU memory. Call this function using a tf.function
:param model: a keras Model to optimize
:param loss: loss function to use when training
:param input_spec: tf.TensorSpec list that corresponds to model inputs
:param budget:
"""
# set input, output shapes
if model.input_spec is None and input_spec is None:
raise ValueError(
"Keras model has not been compiled yet! If model.input_spec is not defined, then input_spec "
"parameter must be set in the call to checkmate.tensorflow2.compile."
)
if label_spec is None:
raise ValueError(
"Checkmate needs the shape of the label in order to calculate the size of all operations. Pass in"
"an example input or tf.TensorSpec object representing the shape of the label."
)
input_spec = model.input_spec if input_spec is None else input_spec
# query budget if not specified
if budget == "auto":
budget = _get_gpu_memory()
# build gradient function for model
@tf.function
def grads_check(data, label):
with tf.GradientTape() as check_tape:
predictions = model(data)
loss_val = loss(label, predictions)
gradients = check_tape.gradient(loss_val, model.trainable_variables)
return predictions, loss_val, gradients
fn = grads_check.get_concrete_function(input_spec, label_spec)
g = dfgraph_from_tf_function(fn)
# choose solver and calculate solver
logging.error(
"[checkmate] At the moment, Checkmate does not guarentee scheduling under the specified budget. "
"This feature will appear soon."
)
logging.debug("[checkmate] Solving for recomputation schedule, may take a while")
logging.debug("[checkmate] Using Chen et al. (2016) sqrt(n) algorithm")
if solver != solve_chen_sqrtn:
sched_result = scheduler(g, budget, **kwargs)
else:
sched_result = solver(g, **kwargs)
logging.debug("[checkmate] Schedule solved")
# create recomputed gradient function
def clean_bs(tensorspec):
newshape = list(tensorspec.shape)
newshape[0] = None
return tf.TensorSpec(shape=newshape, dtype=tensorspec.dtype)
fn_nobatchsize = grads_check.get_concrete_function(clean_bs(input_spec), clean_bs(label_spec))
grad_fn_check = edit_graph(fn_nobatchsize, g.op_dict, sched_result.schedule)
@tf.function
def train_step_check(data, labels):
predictions, loss_val, gradients = grad_fn_check(data, labels)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return predictions, loss_val
return train_step_check
| 36.375
| 111
| 0.695494
|
70ffa549a57dba66d9a52f4fb61aeb45383fbb50
| 45,534
|
py
|
Python
|
src/pipeline_pre.py
|
scaomath/BCAI_kaggle_CHAMPS
|
4a42e18b5b88043fb40ec15289216a1d88789698
|
[
"MIT"
] | 108
|
2019-09-13T15:06:08.000Z
|
2022-03-10T08:27:54.000Z
|
src/pipeline_pre.py
|
scaomath/BCAI_kaggle_CHAMPS
|
4a42e18b5b88043fb40ec15289216a1d88789698
|
[
"MIT"
] | 1
|
2020-11-19T09:29:14.000Z
|
2020-11-19T09:29:14.000Z
|
src/pipeline_pre.py
|
scaomath/BCAI_kaggle_CHAMPS
|
4a42e18b5b88043fb40ec15289216a1d88789698
|
[
"MIT"
] | 31
|
2019-09-13T15:06:23.000Z
|
2022-02-10T14:51:43.000Z
|
#!/usr/bin/env python
## Copyright (c) 2017 Robert Bosch GmbH
## All rights reserved.
##
## This source code is licensed under the MIT license found in the
## LICENSE file in the root directory of this source tree.
import collections
import gzip
import itertools
import json
import os
import pickle
import sys
import numpy as np
import pandas as pd
import rdkit
import xyz2mol as x2m
# Due to some compatibility issues between rdkit/pybel and torch, we have to load them as needed.
# Rules are meant to be broken, including best-programming practices :)
bond_order_dict = { rdkit.Chem.rdchem.BondType.SINGLE: 1,
rdkit.Chem.rdchem.BondType.AROMATIC: 1.5,
rdkit.Chem.rdchem.BondType.DOUBLE: 2,
rdkit.Chem.rdchem.BondType.TRIPLE: 3}
root = '../' # This should be the root of the archive
with open(os.path.join(root,'SETTINGS.json')) as f:
settings = json.load(f)
with open(os.path.join(root,settings['CONFIG_DIR'],'manual_bond_order_fix.json')) as f:
manual_bond_order_dict = json.load(f)
atomic_num_dict = { 'H':1, 'C':6, 'N':7, 'O':8, 'F':9 }
# These were mistaken or too small datasets, so we are relabeling them.
classification_corrections = {
'1JHN_2_2_1_1':'1JHN_3_2_2_1',
'3JHN_4.5_3_1.5_1.5':'3JHN_4_3_1.5_1.5',
'2JHC_3_3_1_1':'2JHC_4_3_2_1',
'3JHC_3_3_1_1':'3JHC_4_3_2_1',
'3JHC_4_2_2_2':'3JHC_4_2_3_1'}
# These have less than 1000 between train and test, so we will drop the subtypes
small_longtypes = {'2JHN_4.5_2_3_1.5', '3JHN_4_2_3_1', '2JHN_4_2_3_1',
'2JHN_4.5_3_1.5_1.5', '2JHN_4_3_2_1', '3JHN_4_4_1_1',
'3JHN_4_3_2_1', '2JHN_4_4_1_1', '3JHN_4.5_2_3_1.5',
'2JHN_4_2_2_2', '3JHN_4_2_2_2', '1JHN_4_3_2_1',
'1JHN_4_4_1_1', '2JHN_3_1_3_0'}
(MAX_ATOM_COUNT,MAX_BOND_COUNT,MAX_TRIPLET_COUNT,MAX_QUAD_COUNT) = (29, 406, 54, 117)
def make_structure_dict(atoms_dataframe):
"""Convert from structures.csv output to a dictionary data storage.
Args:
atoms_dataframe: The dataframe corresponding to structures.csv
Returns:
dict: Mapping of molecule name to molecule properties.
"""
atoms = atoms_dataframe.sort_values(["molecule_name", "atom_index"]) # ensure ordering is consistent
# Make a molecule-based dictionary of the information
structure_dict = collections.defaultdict(lambda: {"symbols":[],"positions":[]})
for index,row in atoms.iterrows():
structure_dict[row["molecule_name"]]["symbols"].append(row["atom"])
structure_dict[row["molecule_name"]]["positions"].append([row["x"],row["y"],row["z"]])
return structure_dict
def enhance_structure_dict(structure_dict):
"""Add derived information to the structure dictionary.
Args:
structure_dict: Output of :func:`make_structure_dict`.
Returns:
dict: The same, modified in-place, with derived information (e.g. atom distances).
Caution: If torch is imported at the same time as this is run, you may get a segmentation fault. Complain to pybel or rdkit, I suppose.
"""
import pybel
for molecule_name in structure_dict:
# positions - array (N,3) of Cartesian positions
molecule = structure_dict[molecule_name]
positions = np.array(molecule['positions'])
n_atom = positions.shape[0]
molecule['positions'] = positions
# distances - array (N,N) of distances between atoms
pos1 = np.tile(positions, (n_atom,1,1) )
pos2 = np.transpose(pos1, (1,0,2) )
dist = np.linalg.norm(pos1 - pos2, axis=-1)
molecule['distances'] = dist
# angle - array (N,) of angles to the 2 closest atoms
sorted_j = np.argsort(dist, axis=-1)
relpos1 = positions[sorted_j[:,1],:] - positions[sorted_j[:,0],:]
relpos2 = positions[sorted_j[:,2],:] - positions[sorted_j[:,0],:]
cos = np.sum(relpos1*relpos2,axis=1) / (np.linalg.norm(relpos1,axis=1) * np.linalg.norm(relpos2,axis=1))
angle = np.arccos( np.clip(cos,-1.0,1.0) ).reshape((n_atom,1)) / np.pi
molecule['angle'] = angle[:,0]
# bond orders - array (N,N) of the bond order (0 for no chemical bond)
# Note this relies on a few manual corrections
molecule['bond_orders'] = np.zeros((n_atom,n_atom))
atomicNumList = [atomic_num_dict[symbol] for symbol in molecule['symbols']]
if molecule_name in manual_bond_order_dict:
molecule['bond_orders'] = np.array(manual_bond_order_dict[molecule_name],dtype=float)
else:
mol = x2m.xyz2mol(atomicNumList,0,positions,True,True)
for bond in mol.GetBonds():
atom0, atom1 = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
bond_order = bond.GetBondType()
molecule['bond_orders'][atom0,atom1] = bond_order_dict[bond_order]
molecule['bond_orders'][atom1,atom0] = bond_order_dict[bond_order]
# Supplementary information for tagging:
# top_bonds: (N,4 or less) bond orders of the top 4 bonds, for each atom
# bond_ids: (N,4): Label the atom with the following 4 linear transform of top_bonds:
# * total num bonds (valence), counting double as 2
# * total num bonded neighbors, counting double as 1
# * largest order
# * second largest order.
molecule['top_bonds'] = np.sort(molecule['bond_orders'],axis=-1)[:,-1:-5:-1]
molecule['bond_ids'] = np.hstack((molecule['top_bonds'].sum(axis=-1)[:,np.newaxis],
np.sum(molecule['top_bonds']>1e-3,axis=-1)[:,np.newaxis],
molecule['top_bonds'][:,:2]))
# long_symbols (N,) string relabel of the symbol straight from bond_ids
molecule['long_symbols'] = ['_'.join([
molecule['symbols'][i]]+[str(x) for x in molecule['bond_ids'][i]])
for i in range(n_atom)]
chem_bond_atoms = [sorted([molecule['symbols'][i] for i in molecule['bond_orders'][atom_index].nonzero()[0]])
for atom_index in range(n_atom)]
molecule['sublabel_atom'] = ['-'.join([molecule['long_symbols'][atom_index]]+chem_bond_atoms[atom_index])
for atom_index in range(n_atom)]
# pybel information. I think we only end up using Gastiger charges.
# Each of these is (N,) arrays
# Convert to xyz string for pybel's I/O
xyz = str(n_atom)+'\n\n' + '\n'.join([ ' '.join( [
str(molecule['symbols'][i]),
str(molecule['positions'][i,0]),
str(molecule['positions'][i,1]),
str(molecule['positions'][i,2])] )
for i in range(n_atom)])
mol = pybel.readstring('xyz',xyz)
molecule['charges'] = [mol.atoms[i].partialcharge for i in range(n_atom)]
molecule['spins'] = [mol.atoms[i].spin for i in range(n_atom)]
molecule['heavyvalences'] = [mol.atoms[i].heavyvalence for i in range(n_atom)]
molecule['heterovalences'] = [mol.atoms[i].heterovalence for i in range(n_atom)]
molecule['valences'] = [mol.atoms[i].valence for i in range(n_atom)]
molecule['hyb_types'] = [mol.atoms[i].type for i in range(n_atom)]
return structure_dict
def enhance_atoms(atoms_dataframe,structure_dict):
"""Enhance the atoms dataframe by including derived information.
Args:
atoms_dataframe: Pandas dataframe read from structures.csv.
structure_dict: Output of :func:`make_structure_dict`, after running :func:`enhance_structure_dict`.
Returns:
pandas.DataFrame: Same dataframe, modified in-place, with derived information added.
"""
assert int(atoms_dataframe.groupby("molecule_name").count().max()[0]) <= MAX_ATOM_COUNT
for key in ['distances','angle', 'bond_orders', 'top_bonds', 'bond_ids', 'long_symbols','sublabel_atom',
'charges', 'spins', 'heavyvalences', 'heterovalences', 'valences', 'hyb_types']:
newkey = key if key[-1]!='s' else key[:-1]
atoms_dataframe[newkey] = atoms_dataframe.apply(lambda x:
structure_dict[x['molecule_name']][key][x['atom_index']],
axis=1)
atoms_dataframe.rename(columns={'long_symbol':'labeled_atom'},inplace=True)
return atoms_dataframe
def enhance_bonds(bond_dataframe,structure_dict):
"""Enhance the bonds dataframe by including derived information.
Args:
bond_dataframe: Pandas dataframe read from train.csv or test.csv.
structure_dict: Output of :func:`make_structure_dict`, after running :func:`enhance_structure_dict`.
Returns:
pandas.DataFrame: Same dataframe, modified in-place, with derived information added.
"""
bond_dataframe.sort_values(['molecule_name','atom_index_0','atom_index_1'],inplace=True)
assert int(bond_dataframe.groupby("molecule_name").count().max()[0]) <= MAX_BOND_COUNT
new_columns = collections.defaultdict(list)
for index,row in bond_dataframe.iterrows():
molecule_name, iatom0, iatom1 = row['molecule_name'],row['atom_index_0'],row['atom_index_1']
if 'predict' not in structure_dict[molecule_name]:
structure_dict[molecule_name]['predict'] = structure_dict[molecule_name]['bond_orders'] * 0
structure_dict[molecule_name]['predict'][iatom0,iatom1] = 1
structure_dict[molecule_name]['predict'][iatom1,iatom0] = 1
long_symbols = [structure_dict[molecule_name]['long_symbols'][x] for x in [iatom0,iatom1]]
# labeled_type
if all([x[0]=='H' for x in long_symbols]):
lt = row['type']
elif not any([x[0]=='H' for x in long_symbols]):
raise ValueError("No hydrogen found in {}".format(row))
else:
ls = [x for x in long_symbols if x[0]!='H'][0]
lt = row["type"] + ls[1:].replace('.0','')
if lt in classification_corrections:
lt = classification_corrections[lt]
if lt in small_longtypes:
lt = lt.split('_')[0]
new_columns["labeled_type"].append(lt)
# sublabeled type
new_columns["sublabel_type"].append(row['type'] + '-'+ '-'.join(sorted(long_symbols)))
# bond order
new_columns["bond_order"].append(structure_dict[molecule_name]['bond_orders'][iatom0,iatom1])
new_columns["predict"].append(1)
for key in new_columns:
bond_dataframe[key] = new_columns[key]
return bond_dataframe
def add_all_pairs(bond_dataframe,structure_dict):
"""Add all pairs of atoms, including those without coupling and without chemical bonds.
Args:
bond_dataframe: Pandas dataframe read from train.csv or test.csv, after running :func:`enhance_bonds`.
structure_dict: Output of :func:`make_structure_dict`, after running :func:`enhance_structure_dict`.
Returns:
pandas.DataFrame: New dataframe, with new bonds added.
"""
# NOTE: The convention for id used to be very large numbers for new bonds; now it is negative.
iadd = -1
new_data = collections.defaultdict(list)
for molecule_name in bond_dataframe["molecule_name"].unique():
n_atom = len(structure_dict[molecule_name]["symbols"])
# for backwards compatibility, this is iatom1,iatom0. See make_new_csv.py, write_pairs.
for iatom1,iatom0 in itertools.combinations(range(n_atom),r=2):
if 'predict' not in structure_dict[molecule_name]:
raise KeyError('{} has no "predict" value'.format(molecule_name))
if structure_dict[molecule_name]['predict'][iatom0,iatom1]:
continue # already got it
symbols = [structure_dict[molecule_name]['symbols'][i] for i in [iatom0,iatom1]]
bond_order = structure_dict[molecule_name]['bond_orders'][iatom0,iatom1]
nottype = '-'.join(sorted(symbols)) + '_' + str(bond_order)
row = {'id':iadd,'molecule_name':molecule_name,'atom_index_0':iatom0,'atom_index_1':iatom1,
'type':nottype,'labeled_type':nottype,'sublabel_type':nottype,
'bond_order': bond_order,
'predict':0}
if 'scalar_coupling_constant' in bond_dataframe:
row['scalar_coupling_constant'] = 0.
for k,v in row.items():
new_data[k].append(v)
iadd -= 1
new_data = pd.DataFrame(new_data)
if bond_dataframe.index.name!='id':
bond_dataframe = bond_dataframe.set_index('id')
new_data.set_index('id',inplace=True)
all_data = bond_dataframe.append(new_data,verify_integrity=True,sort=False)
return all_data
def make_triplets(molecule_list,structure_dict):
"""Make the triplet dataframe.
Args:
molecule_list: List of molecules to generate.
structure_dict: Output of :func:`make_structure_dict`, after running :func:`enhance_structure_dict`.
Returns:
pandas.DataFrame: New dataframe, with triplets and related information. The convention is the bond looks like 1-0-2, where 0 is the central atom.
"""
new_data = collections.defaultdict(list)
for molecule_name in molecule_list:
molecule = structure_dict[molecule_name]
bond_orders = molecule['bond_orders']
short = molecule['symbols']
long = molecule['long_symbols']
for i, atom_bond_order in enumerate(bond_orders):
connection_indices = atom_bond_order.nonzero()[0]
pairs = itertools.combinations(connection_indices,2)
for pair in pairs:
j, k = pair[0], pair[1]
atom0_short = short[i] + long[i].split('_')[2]
atom1_short = short[j] + long[j].split('_')[2]
atom2_short = short[k] + long[k].split('_')[2]
atom0_long = long[i]
atom1_long = long[j]
atom2_long = long[k]
#labels = ['-'.join([atom1_short,str(atom_bond_order[j])]),
# '-'.join([atom2_short,str(atom_bond_order[k])])]
labels = [atom1_short,atom2_short]
labels.sort()
label = '-'.join([atom0_short]+labels)
#sublabels = ['-'.join([atom1_long,str(atom_bond_order[j])]),
# '-'.join([atom2_long,str(atom_bond_order[k])])]
sublabels = [atom1_long,atom2_long]
sublabels.sort()
sublabel = '-'.join([atom0_long]+sublabels)
r10 = molecule['positions'][j] - molecule['positions'][i]
r20 = molecule['positions'][k] - molecule['positions'][i]
angle = np.sum(r10*r20) / (np.linalg.norm(r10)*np.linalg.norm(r20))
angle = np.arccos( np.clip(angle,-1.0,1.0) )
row = {'molecule_name':molecule_name,'atom_index_0':i,'atom_index_1':j,'atom_index_2':k,
'label':label,'sublabel':sublabel,'angle':angle}
for k,v in row.items():
new_data[k].append(v)
ans = pd.DataFrame(new_data)
ans.sort_values(['molecule_name','atom_index_0','atom_index_1','atom_index_2'])
assert int(ans.groupby("molecule_name").count().max()[0]) <= MAX_TRIPLET_COUNT
return ans
def make_quadruplets(molecule_list,structure_dict):
"""Make the quadruplet dataframe.
Args:
molecule_list: List of molecules to generate.
structure_dict: Output of :func:`make_structure_dict`, after running :func:`enhance_structure_dict`.
Returns:
pandas.DataFrame: New dataframe, with quadruplets and related information. Make quadruplets. Convention is that they are connected 2-0-1-3, where 0,1 are the central atoms and 0-2 is a bond.
"""
new_data = collections.defaultdict(list)
icount = 0 # for debugging
for molecule_name in molecule_list:
molecule = structure_dict[molecule_name]
bond_orders = molecule['bond_orders']
short = molecule['symbols']
long = molecule['long_symbols']
pos = molecule['positions']
for i,j in zip(*bond_orders.nonzero()):
if i > j:
continue # we will get it the other way
for i_nei,j_nei in itertools.product(
bond_orders[i].nonzero()[0],bond_orders[j].nonzero()[0]):
if j_nei==i or i_nei==j:
continue # no self
# But we could have i_nei==j_nei, which is a triangle
# Atomic structure looks like i_nei-i-j-j_nei
# There's an easy way and a quick way.
mode = 'fast'
assert ['test','fast','slow'].count(mode),'Mode must be one of: test, fast, slow'
if ['test','slow'].count(mode):
plane_1 = np.cross( pos[i_nei]-pos[i], pos[j]-pos[i])
plane_2 = np.cross( pos[i]-pos[j],pos[j_nei]-pos[j])
if np.allclose(plane_1,0.) or np.allclose(plane_2,0.):
# Planar; not really a dihedral
continue
# Compute the dihedral in radians
costheta = np.dot(plane_1,plane_2) / (
np.linalg.norm(plane_1)*np.linalg.norm(plane_2))
costheta1 = costheta
if ['test','fast'].count(mode): # this way is much faster
# Uses some clever algebra
ijpos = np.array([
pos[i_nei] - pos[i],
pos[j] - pos[i],
pos[j_nei] - pos[j],
])
# For simplicity, call these a,b,c
dots = np.dot(ijpos,ijpos.T)
# numerator = (a x b).(-b x c)
# denominator = |a x b| |b x c|
# So:
# -(axb).(bxc) = (b.b)(a.c) - (a.b)(b.c)
numerator = dots[1,1]*dots[0,2] - dots[0,1]*dots[1,2]
# |axb|^2=|a|^2|b|^2-(a.b)^2
denominator = np.sqrt( (
dots[0,0]*dots[1,1]-dots[0,1]**2) * (
dots[2,2]*dots[1,1]-dots[2,1]**2 ))
if abs(denominator) < 1e-7:
# Planar, not really a dihedral
continue
costheta = numerator / denominator
if mode=='test':
assert abs(costheta-costheta1)<1e-4,"Fancy algebra failed"
icount += 1
if icount > 50000:
raise Exception("50K counts confirmed.")
assert abs(costheta)<1.0001,'Cos theta too large'
dihedral = np.arccos( np.clip(costheta,-1.0,1.0) )
# Start labeling
label = '_'.join(sorted([
'_'.join([short[i],short[i_nei]]),
'_'.join([short[j],short[j_nei]]),
]))
# This definition finds several unique labels in the test set, e.g. 'C3_C4_C4_N4'
#sublabel = '_'.join(sorted([
# '_'.join([short[i]+long[i].split('_')[1],short[i_nei]+long[i_nei].split('_')[1]]),
# '_'.join([short[j]+long[j].split('_')[1],short[j_nei]+long[j_nei].split('_')[1]]),
# ])).replace('.0','')
# This definition finds several unique labels in the test set, e.g. C_3_3_1_1_C_C_4_4_1_1_N
#sublabel2 = '_'.join(sorted([
# '_'.join([long[i],short[i_nei]]),
# '_'.join([long[j],short[j_nei]]),
# ])).replace('.0','')
# This definition finds several unique labels in the test set, {'C_O_1_N_C_2_2',
# 'N_C_1_N_O_1_2', 'N_N_2_O_C_1_1'}
sublabel4 = '_'.join(sorted([
'_'.join([short[i],short[i_nei],str(bond_orders[i,i_nei].round(1))]),
'_'.join([short[j],short[j_nei],str(bond_orders[j,j_nei].round(1))]),
]) + [str(bond_orders[i,j].round(1))]
).replace('.0','')
# This definition finds several unique labels in the test set, e.g. C3_C4_1_C4_N4_1_1'
#sublabel4 = '_'.join(sorted([
# '_'.join([short[i]+long[i].split('_')[1],short[i_nei]+long[i_nei].split('_')[1],
# str(bond_orders[i,i_nei].round(1))]),
# '_'.join([short[j]+long[j].split('_')[1],short[j_nei]+long[j_nei].split('_')[1],
# str(bond_orders[j,j_nei].round(1))]),
# ]) + [str(bond_orders[i,j].round(1))]
# ).replace('.0','')
sublabel = '_'.join(sorted([
'_'.join([short[i],short[i_nei]]),
'_'.join([short[j],short[j_nei]]),
]) + [str(bond_orders[i,j].round(1))]
).replace('.0','')
sublabel2 = '_'.join(sorted([
'_'.join([short[i]+long[i].split('_')[1],short[i_nei]]),
'_'.join([short[j]+long[j].split('_')[1],short[j_nei]]),
]) + [str(bond_orders[i,j].round(1))]
).replace('.0','')
sublabel3 = '_'.join(sorted([
'_'.join([short[i]+long[i].split('_')[1],short[i_nei]]),
'_'.join([short[j]+long[j].split('_')[1],short[j_nei]]),
])).replace('.0','')
row = {'molecule_name':molecule_name,
'atom_index_0':i,'atom_index_1':j,'atom_index_2':i_nei,'atom_index_3':j_nei,
'label':label,'sublabel':sublabel,'sublabel2':sublabel2,'sublabel3':sublabel3,
'sublabel4':sublabel4,'angle':dihedral}
for k,v in row.items():
new_data[k].append(v)
ans = pd.DataFrame(new_data)
ans.sort_values(['molecule_name','atom_index_0','atom_index_1','atom_index_2','atom_index_3'])
assert int(ans.groupby("molecule_name").count().max()[0]) <= MAX_QUAD_COUNT
return ans
def write_csv(directory,label,atoms,bonds,triplets,quadruplets):
"""Write the relevant dataframes to a CSV file.
Args:
directory: Directory to write to.
label (str): How to label the files, e.g. test or train.
atoms: Pandas dataframe read from structures.csv, after running :func:`enhance_atoms`.
bonds: Pandas dataframe read from train.csv or test.csv, after running :func:`enhance_bonds`.
triplets: Pandas dataframe created by :func:`make_triplets`.
quadruplets: Pandas dataframe created by :func:`make_quadruplets`.
Returns:
None
"""
filename = os.path.join(directory,'new_big_{}.csv.bz2')
if atoms is not None and len(atoms):
atoms = atoms.sort_values(["molecule_name",'atom_index'])
for i in range(4):
atoms["top_bond_{}".format(i)] = [x[i] if len(x)>i else 0.0 for x in atoms["top_bond"].values]
for i in ["x","y","z"]:
atoms[i] = atoms[i].values.round(10)
renames = {k:k[:-1] for k in atoms.columns if k[-1]=='s'}
renames.update({'long_symbols':'labeled_atom'})
atoms = atoms.rename(columns=renames)
atoms.to_csv(filename.format('structures'),index=False,columns=
'molecule_name,atom_index,atom,x,y,z,labeled_atom,angle,top_bond_0,top_bond_1,top_bond_2,top_bond_3,sublabel_atom,charge,spin,heavyvalence,heterovalence,valence,hyb_type'.split(','))
if bonds is not None and len(bonds):
bonds = bonds.reset_index()
bond_columns = 'id,molecule_name,atom_index_0,atom_index_1,type,scalar_coupling_constant,labeled_type,sublabel_type,bond_order,predict'.split(',')
if 'scalar_coupling_constant' not in bonds.columns:
bond_columns = [x for x in bond_columns if x!='scalar_coupling_constant']
bonds = bonds.sort_values(["predict","molecule_name",'atom_index_0','atom_index_1'],
ascending=[False,True,True,True])
bonds.to_csv(filename.format(label),index=False,columns=bond_columns)
if triplets is not None and len(triplets):
triplets = triplets.sort_values(["molecule_name",'atom_index_0','atom_index_1','atom_index_2'])
triplets.to_csv(filename.format(label+'_triplets'),index=False,columns=
'molecule_name,atom_index_0,atom_index_1,atom_index_2,label,sublabel,angle'.split(','))
if quadruplets is not None and len(quadruplets):
quadruplets = quadruplets.sort_values(["molecule_name",'atom_index_0','atom_index_1',
'atom_index_2','atom_index_3'])
quadruplets.to_csv(filename.format(label+'_quadruplets'),index=False,columns=
'molecule_name,atom_index_0,atom_index_1,atom_index_2,atom_index_3,label,sublabel,sublabel2,sublabel3,sublabel4,angle'.split(','))
def _create_embedding(series):
"""Create a one-hot encoding embedding.
Args:
series: A DataFrame series (column).
Returns:
dict: Mapping of the entries (or "<None>") to the index number.
"""
types = sorted(series.unique().tolist())
assert "<None>" not in types
emb_index = dict(zip(["<None>"] + types , range(len(types)+1)))
return emb_index
def add_embedding(atoms,bonds,triplets,quadruplets,embeddings=None):
"""Add embedding indices to the dataframes.
Args:
atoms: Pandas dataframe read from structures.csv, after running :func:`enhance_atoms`.
bonds: Pandas dataframe read from train.csv or test.csv, after running :func:`enhance_bonds`.
triplets: Pandas dataframe created by :func:`make_triplets`.
quadruplets: Pandas dataframe created by :func:`make_quadruplets`.
embeddings (dict or None): If None, we create a new embedding (e.g. train data), otherwise we use the given embeddigns thar are output by :func:`add_embedding` (e.g. test data).
Returns:
dict: The embedding dictionary that can be passed to this function for using the same embedding on a new dataset.
"""
# Add the embedding info to the dataframes.
atoms["type_0"] = atoms["atom"]
atoms["type_1"] = atoms["labeled_atom"].apply(lambda x : x[:5])
atoms["type_2"] = atoms["labeled_atom"]
bonds["type_0"] = bonds["type"]
bonds["type_1"] = bonds["labeled_type"]
bonds["type_2"] = bonds["sublabel_type"]
triplets["type_0"] = triplets["label"].apply(lambda x : x[0] + x[5] + x[10])
triplets["type_1"] = triplets["label"]
quadruplets["type_0"] = quadruplets["label"]
if embeddings is None:
embeddings = {}
embeddings.update({('atom',t):_create_embedding(atoms["type_" + str(t)]) for t in range(3)})
embeddings.update({('bond',t):_create_embedding(bonds["type_" + str(t)]) for t in range(3)})
embeddings.update({('triplet',t):_create_embedding(triplets["type_" + str(t)]) for t in range(2)})
embeddings.update({('quadruplet',t):_create_embedding(quadruplets["type_" + str(t)]) for t in range(1)})
for t in range(3):
atoms["type_index_" + str(t)] = atoms["type_" + str(t)].apply(lambda x : embeddings[('atom',t)][x])
for t in range(3):
bonds["type_index_" + str(t)] = bonds["type_" + str(t)].apply(lambda x : embeddings[('bond',t)][x])
for t in range(2):
triplets["type_index_" + str(t)] = triplets["type_" + str(t)].apply(lambda x : embeddings[('triplet',t)][x])
for t in range(1):
quadruplets["type_index_" + str(t)] = quadruplets["type_" + str(t)].apply(lambda x : embeddings[('quadruplet',t)][x])
return embeddings
def get_scaling(bonds_train):
"""Get the mean/std scaling factors for each ``labeled_type``.
Args:
bonds_train: The training data that we can use to set the values.
Returns:
tuple: Mean and std dicts, mapping labeled_type to scalar_coupling_constant mean/std.
"""
# Get the mean/std scaling factors
means = bonds_train.groupby("labeled_type").mean()["scalar_coupling_constant"].to_dict()
stds = bonds_train.groupby("labeled_type").std()["scalar_coupling_constant"].to_dict()
return means,stds
def add_scaling(bonds,means,stds):
"""Add the scaling information to the bonds dataframe.
Args:
bonds (pd.DataFrame): The dataframe of the bonds, after :func:`enhance_bonds`.
means (dict): Output of :func:`get_scaling`.
stds (dict): Output of :func:`get_scaling`.
Returns:
pd.DataFrame: Same dataframe, with added columns.
"""
# Add mean/std scaling factors to bonds dataframe
bonds["sc_mean"] = bonds["labeled_type"].apply(lambda x : means[x])
bonds["sc_std"] = bonds["labeled_type"].apply(lambda x : stds[x])
if "scalar_coupling_constant" in bonds.columns:
bonds["sc_scaled"] = (bonds["scalar_coupling_constant"] - bonds["sc_mean"]) / bonds["sc_std"]
return bonds
def create_dataset(atoms, bonds, triplets, quads, labeled = True, max_count = 10**10):
"""Create the python loaders, which we can pkl to a file for batching.
Args:
atoms: Pandas dataframe read from structures.csv, after running :func:`enhance_atoms`.
bonds: Pandas dataframe read from train.csv or test.csv, after running :func:`enhance_bonds`.
triplets: Pandas dataframe created by :func:`make_triplets`.
quads: Pandas dataframe created by :func:`make_quadruplets`.
labeled (bool): Whether this is train data, labeled with the y value.
max_count (int): Maximum number of entries; useful for testing.
Returns:
tuple: With the following entries
* x_index: (M,) Index of the molecule.
* x_atom: (M,N,3) Atom type index.
* x_atom_pos: (M,N,5) Atom position (3), closest-atom angle (1), and partial charge (1).
* x_bond: (M,B,5) Bond type index (3), Atom index (2) corresponding to the bond.
* x_bond_dist: (M,B) Distance of the bond.
* x_triplet: (N,P,7): Triplet type (2), Atom index (3), Bond index (2) corresponding to the triplet.
* x_triplet_angle: (N,P) Triplet angle.
* x_quad: (N,Q,10) Quadruplet type (1), Atom index (4), Bond index (3), and triplet index (2) corresponding to the quadruplet.
* x_quad_angle: (N,Q) Quadruplet dihedral angle.
* y_bond_scalar_coupling: (N,M,4) of the scalar coupling constant, type mean, type std, and whether it should be predicted.
"""
import torch
from tqdm import tqdm
# create mapping from molecule names to indices
mol_unique = sorted(bonds["molecule_name"].unique().tolist())
index = dict(zip(mol_unique, range(len(mol_unique))))
atoms = atoms.set_index("molecule_name")
bonds = bonds.set_index("molecule_name")
triplets = triplets.set_index("molecule_name")
quads = quads.set_index("molecule_name")
quad_mols = set(quads.index)
max_count = M = min(max_count, len(index))
x_index = torch.arange(M, dtype=torch.long)
x_atom = torch.zeros(M, MAX_ATOM_COUNT, 3, dtype=torch.long)
x_atom_pos = torch.zeros(M, MAX_ATOM_COUNT, 5)
x_bond = torch.zeros(M, MAX_BOND_COUNT, 5, dtype=torch.long)
x_bond_dist = torch.zeros(M, MAX_BOND_COUNT)
x_triplet = torch.zeros(M, MAX_TRIPLET_COUNT, 7, dtype=torch.long)
x_triplet_angle = torch.zeros(M, MAX_TRIPLET_COUNT)
x_quad = torch.zeros(M, MAX_QUAD_COUNT, 10, dtype=torch.long)
x_quad_angle = torch.zeros(M, MAX_QUAD_COUNT)
y_bond_scalar_coupling = torch.zeros(M, MAX_BOND_COUNT, 4)
for k,i in tqdm(index.items()):
if i >= M:
break
mol_atoms = atoms.loc[[k]]
mol_bonds = bonds.loc[[k]]
mol_real_bonds = mol_bonds[(mol_bonds["predict"]==1) | (mol_bonds["bond_order"]>0)]
mol_fake_bonds = mol_bonds[(mol_bonds["predict"]==0) & (mol_bonds["bond_order"]==0)]
mol_triplets = triplets.loc[[k]]
n = mol_atoms.shape[0]
m = mol_bonds.shape[0]
mr = mol_real_bonds.shape[0]
mf = mol_fake_bonds.shape[0]
p = mol_triplets.shape[0]
assert mr + mf == m, "Real + fake bonds != number of bonds?"
assert mr < MAX_BOND_COUNT, "The number of real bonds is SMALLER than the MAX_BOND_COUNT"
# STEP 1: Atoms
for t in range(3):
x_atom[i,:n,t] = torch.tensor(mol_atoms["type_index_" + str(t)].values)
x_atom_pos[i,:n,:3] = torch.tensor(mol_atoms[["x", "y", "z"]].values)
x_atom_pos[i,:n,3] = torch.tensor(mol_atoms["angle"].values)
x_atom_pos[i,:n,4] = torch.tensor(mol_atoms["charge"].values)
# STEP 2: Real bonds
for t in range(3):
x_bond[i,:mr,t] = torch.tensor(mol_real_bonds["type_index_" + str(t)].values)
x_bond[i,:mr,3] = torch.tensor(mol_real_bonds["atom_index_0"].values)
x_bond[i,:mr,4] = torch.tensor(mol_real_bonds["atom_index_1"].values)
idx1 = torch.tensor(mol_real_bonds["atom_index_0"].values)
idx2 = torch.tensor(mol_real_bonds["atom_index_1"].values)
x_bond_dist[i,:mr] = ((x_atom_pos[i,idx1,:3] - x_atom_pos[i,idx2,:3])**2).sum(1)
if mf > 0:
# STEP 3: Fake bonds
fidx1 = torch.tensor(mol_fake_bonds["atom_index_0"].values)
fidx2 = torch.tensor(mol_fake_bonds["atom_index_1"].values)
fdists = ((x_atom_pos[i,fidx1,:3] - x_atom_pos[i,fidx2,:3])**2).sum(1) # Length mf
argsort_fdists = torch.argsort(fdists)
top_count = min(MAX_BOND_COUNT - mr, mf)
for t in range(3):
x_bond[i,mr:mr+top_count,t] = torch.tensor(mol_fake_bonds["type_index_" + str(t)].values)[argsort_fdists][:top_count]
x_bond[i,mr:mr+top_count,3] = torch.tensor(mol_fake_bonds["atom_index_0"].values)[argsort_fdists][:top_count]
x_bond[i,mr:mr+top_count,4] = torch.tensor(mol_fake_bonds["atom_index_1"].values)[argsort_fdists][:top_count]
x_bond_dist[i,mr:mr+top_count] = fdists[argsort_fdists][:top_count]
# STEP 4: Triplets
for t in range(2):
x_triplet[i,:p,t] = torch.tensor(mol_triplets["type_index_" + str(t)].values)
x_triplet[i,:p,2] = torch.tensor(mol_triplets["atom_index_0"].values)
x_triplet[i,:p,3] = torch.tensor(mol_triplets["atom_index_1"].values)
x_triplet[i,:p,4] = torch.tensor(mol_triplets["atom_index_2"].values)
x_triplet_angle[i,:p] = torch.tensor(mol_triplets["angle"].values)
lookup = dict(zip(mol_real_bonds["atom_index_0"].apply(str) + "_" + mol_real_bonds["atom_index_1"].apply(str),
range(mol_real_bonds.shape[0])))
lookup.update(dict(zip(mol_real_bonds["atom_index_1"].apply(str) + "_" + mol_real_bonds["atom_index_0"].apply(str),
range(mol_real_bonds.shape[0]))))
b_idx1 = (mol_triplets["atom_index_0"].apply(str) + "_" +
mol_triplets["atom_index_1"].apply(str)).apply(lambda x : lookup[x])
b_idx2 = (mol_triplets["atom_index_0"].apply(str) + "_" +
mol_triplets["atom_index_2"].apply(str)).apply(lambda x : lookup[x])
x_triplet[i,:p,5] = torch.tensor(b_idx1.values)
x_triplet[i,:p,5] = torch.tensor(b_idx2.values)
# STEP 5: Quadruplets
if k in quad_mols:
mol_quads = quads.loc[[k]]
q = mol_quads.shape[0]
x_quad[i,:q,0] = torch.tensor(mol_quads["type_index_0"].values)
x_quad[i,:q,1] = torch.tensor(mol_quads["atom_index_0"].values)
x_quad[i,:q,2] = torch.tensor(mol_quads["atom_index_1"].values)
x_quad[i,:q,3] = torch.tensor(mol_quads["atom_index_2"].values)
x_quad[i,:q,4] = torch.tensor(mol_quads["atom_index_3"].values)
x_quad_angle[i,:q] = torch.tensor(mol_quads["angle"].values)
# Triplet convention is 1-0-2, so only 1/2 are exchangeable
# Quadruplet convention is 2-0-1-3
lookup3 = dict(zip(mol_triplets["atom_index_0"].apply(str) + "_" +
mol_triplets["atom_index_1"].apply(str) + "_" +
mol_triplets["atom_index_2"].apply(str),
range(mol_triplets.shape[0])))
lookup3.update(dict(zip(mol_triplets["atom_index_0"].apply(str) + "_" +
mol_triplets["atom_index_2"].apply(str) + "_" +
mol_triplets["atom_index_1"].apply(str),
range(mol_triplets.shape[0]))))
b_idx1 = (mol_quads["atom_index_0"].apply(str) + "_" +
mol_quads["atom_index_1"].apply(str)).apply(lambda x : lookup[x])
b_idx2 = (mol_quads["atom_index_0"].apply(str) + "_" +
mol_quads["atom_index_2"].apply(str)).apply(lambda x : lookup[x])
b_idx3 = (mol_quads["atom_index_1"].apply(str) + "_" +
mol_quads["atom_index_3"].apply(str)).apply(lambda x : lookup[x])
t_idx1 = (mol_quads["atom_index_0"].apply(str) + "_" +
mol_quads["atom_index_1"].apply(str) + "_" +
mol_quads["atom_index_2"].apply(str)).apply(lambda x : lookup3[x])
t_idx2 = (mol_quads["atom_index_1"].apply(str) + "_" +
mol_quads["atom_index_0"].apply(str) + "_" +
mol_quads["atom_index_3"].apply(str)).apply(lambda x : lookup3[x])
x_quad[i,:q,5] = torch.tensor(b_idx1.values)
x_quad[i,:q,6] = torch.tensor(b_idx2.values)
x_quad[i,:q,7] = torch.tensor(b_idx3.values)
x_quad[i,:q,8] = torch.tensor(t_idx1.values)
x_quad[i,:q,9] = torch.tensor(t_idx2.values)
x_quad_angle[i,:q] = torch.tensor(mol_quads["angle"].values)
if labeled:
y_bond_scalar_coupling[i,:mr, 0] = torch.tensor(mol_real_bonds["scalar_coupling_constant"].values)
else:
y_bond_scalar_coupling[i,:mr, 0] = torch.tensor(mol_real_bonds["id"].values)
y_bond_scalar_coupling[i,:mr, 1] = torch.tensor(mol_real_bonds["sc_mean"].values)
y_bond_scalar_coupling[i,:mr, 2] = torch.tensor(mol_real_bonds["sc_std"].values)
y_bond_scalar_coupling[i,:mr, 3] = torch.tensor(mol_real_bonds["predict"].values).float() # binary tensor (1s to be predicted)
return x_index, x_atom, x_atom_pos, x_bond, x_bond_dist, x_triplet, x_triplet_angle, x_quad, x_quad_angle, y_bond_scalar_coupling
def auto_preproc_stage1():
"""Stage 1: Read and process csv files to new csv files."""
print('Reading structures...')
atoms = pd.read_csv(os.path.join(root,settings['RAW_DATA_DIR'],'structures.csv'))
print('Parsing structures...')
structure_dict = make_structure_dict(atoms)
print('Adding structure features...')
enhance_structure_dict(structure_dict)
print('Updating atoms dataframe...')
enhance_atoms(atoms,structure_dict)
print('Writing structures...')
write_csv(os.path.join(root,settings['PROCESSED_DATA_DIR']),'',atoms,None,None,None)
print('Reading bonds for train...')
bonds = pd.read_csv(os.path.join(root,settings['RAW_DATA_DIR'],'train.csv'))
print('Parsing bonds...')
enhance_bonds(bonds,structure_dict)
bonds = add_all_pairs(bonds,structure_dict)
triplets = make_triplets(bonds["molecule_name"].unique(),structure_dict)
quadruplets = make_quadruplets(bonds["molecule_name"].unique(),structure_dict)
print('Writing bonds...')
write_csv(os.path.join(root,settings['PROCESSED_DATA_DIR']),'train',None,bonds,triplets,quadruplets)
print('Reading bonds for test...')
bonds = pd.read_csv(os.path.join(root,settings['RAW_DATA_DIR'],'test.csv'))
print('Parsing bonds...')
enhance_bonds(bonds,structure_dict)
bonds = add_all_pairs(bonds,structure_dict)
triplets = make_triplets(bonds["molecule_name"].unique(),structure_dict)
quadruplets = make_quadruplets(bonds["molecule_name"].unique(),structure_dict)
print('Writing bonds...')
write_csv(os.path.join(root,settings['PROCESSED_DATA_DIR']),'test',None,bonds,triplets,quadruplets)
def auto_preproc_stage2():
import torch
print("Loading data...")
os.chdir(os.path.join(root,settings['PROCESSED_DATA_DIR']))
atoms = pd.read_csv('new_big_structures.csv.bz2')
bonds = pd.read_csv('new_big_train.csv.bz2')
triplets = pd.read_csv('new_big_train_triplets.csv.bz2')
quadruplets = pd.read_csv('new_big_train_quadruplets.csv.bz2')
print('Sorting...')
atoms.sort_values(['molecule_name','atom_index'],inplace=True)
bonds.sort_values(['molecule_name','atom_index_0','atom_index_1'],inplace=True)
triplets.sort_values(['molecule_name','atom_index_0','atom_index_1','atom_index_2'],inplace=True)
quadruplets.sort_values(['molecule_name','atom_index_0','atom_index_1','atom_index_2','atom_index_3'],inplace=True)
assert int(atoms.groupby("molecule_name").count().max()[0]) <= MAX_ATOM_COUNT
assert int(bonds.groupby("molecule_name").count().max()[0]) <= MAX_BOND_COUNT
assert int(triplets.groupby("molecule_name").count().max()[0]) <= MAX_TRIPLET_COUNT
assert int(quadruplets.groupby("molecule_name").count().max()[0]) <= MAX_QUAD_COUNT
print("Adding embeddings and scaling...")
embeddings = add_embedding(atoms,bonds,triplets,quadruplets)
means,stds = get_scaling(bonds)
bonds = add_scaling(bonds,means,stds)
print("Creating train dataset...")
D = create_dataset(atoms, bonds, triplets, quadruplets, labeled=True)
print('Splitting train dataset...')
#Split the training data into train (80%) and validation (20%) for model selection.
np.random.seed(0)
p = np.random.permutation(D[0].shape[0])
idx_train = torch.cat([torch.tensor(p[:int(0.6*len(p))]), torch.tensor(p[int(0.8*len(p)):])])
idx_val = torch.tensor(p[int(0.6*len(p)):int(0.8*len(p))])
D_train = tuple([d[idx_train] for d in D])
D_val = tuple([d[idx_val] for d in D])
print('Saving train (80%)/validation (20%) datasets...')
# If too large, save the two parts (just so that we can push to github)
if sum([d.nelement() for d in D_train]) > 3e8:
# Split D_train into 2 parts
print("Splitting the 80% training data into part 1 and 2...")
total_len = D_train[0].size(0)
D_train_part1 = tuple([d[:total_len//2].clone().detach() for d in D_train])
D_train_part2 = tuple([d[total_len//2:].clone().detach() for d in D_train])
with gzip.open("torch_proc_train_p1.pkl.gz", "wb") as f:
pickle.dump(D_train_part1, f, protocol=4)
with gzip.open("torch_proc_train_p2.pkl.gz", "wb") as f:
pickle.dump(D_train_part2, f, protocol=4)
else:
with gzip.open("torch_proc_train.pkl.gz", "wb") as f:
pickle.dump(D_train, f, protocol=4)
with gzip.open("torch_proc_val.pkl.gz", "wb") as f:
pickle.dump(D_val, f, protocol=4)
print("Saving the full train dataset. Splitting into part 1 and 2...")
total_len = D[0].size(0)
D_part1 = tuple([d[:total_len//2].clone().detach() for d in D])
D_part2 = tuple([d[total_len//2:].clone().detach() for d in D])
with gzip.open("torch_proc_train_full_p1.pkl.gz", "wb") as f:
pickle.dump(D_part1, f, protocol=4)
with gzip.open("torch_proc_train_full_p2.pkl.gz", "wb") as f:
pickle.dump(D_part2, f, protocol=4)
# ## Test
print('Loading test data...')
bonds = pd.read_csv('new_big_test.csv.bz2')
triplets = pd.read_csv('new_big_test_triplets.csv.bz2')
quadruplets = pd.read_csv('new_big_test_quadruplets.csv.bz2')
print('Sorting...')
bonds.sort_values(['molecule_name','atom_index_0','atom_index_1'],inplace=True)
triplets.sort_values(['molecule_name','atom_index_0','atom_index_1','atom_index_2'],inplace=True)
quadruplets.sort_values(['molecule_name','atom_index_0','atom_index_1','atom_index_2','atom_index_3'],inplace=True)
assert int(atoms.groupby("molecule_name").count().max()[0]) <= MAX_ATOM_COUNT
assert int(bonds.groupby("molecule_name").count().max()[0]) <= MAX_BOND_COUNT
assert int(triplets.groupby("molecule_name").count().max()[0]) <= MAX_TRIPLET_COUNT
assert int(quadruplets.groupby("molecule_name").count().max()[0]) <= MAX_QUAD_COUNT
print('Adding embedding and scaling...')
add_embedding(atoms,bonds,triplets,quadruplets, embeddings=embeddings)
bonds = add_scaling(bonds,means,stds)
print('Creating test dataset...')
D_sub = create_dataset(atoms, bonds, triplets, quadruplets, labeled=False)
print('Saving file...')
with gzip.open("torch_proc_submission.pkl.gz", "wb") as f:
pickle.dump(D_sub, f, protocol=4)
return
if __name__=='__main__':
# There is a segmentation fault if stage1 is run while torch is loaded. So we have to run them separately.
if '1' in sys.argv:
auto_preproc_stage1()
elif '2' in sys.argv:
auto_preproc_stage2()
else:
print('Please identify either stage 1 or stage 2.')
| 50.258278
| 198
| 0.616243
|
99babed8f7673d415f49f279dbcc93e6a09553e5
| 27,053
|
py
|
Python
|
tests/test_blob_encryption.py
|
rajrohith/blobstore
|
92c3a172895fb2ad46f7a34281976fe33f72e47e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_blob_encryption.py
|
rajrohith/blobstore
|
92c3a172895fb2ad46f7a34281976fe33f72e47e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_blob_encryption.py
|
rajrohith/blobstore
|
92c3a172895fb2ad46f7a34281976fe33f72e47e
|
[
"Apache-2.0"
] | null | null | null |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import unittest
from io import(
StringIO,
BytesIO,
)
from os import(
urandom,
path,
remove,
)
from json import loads
from azure.storage.blob import (
Blob,
BlockBlobService,
AppendBlobService,
PageBlobService,
)
from tests.testcase import (
StorageTestCase,
TestMode,
record,
)
from tests.test_encryption_helper import (
KeyWrapper,
KeyResolver,
RSAKeyWrapper,
)
from azure.storage._error import(
_ERROR_OBJECT_INVALID,
_ERROR_DECRYPTION_FAILURE,
_ERROR_VALUE_SHOULD_BE_BYTES,
_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION,
)
from azure.common import AzureException
from azure.storage._encryption import(
_dict_to_encryption_data,
_validate_and_unwrap_cek,
_generate_AES_CBC_cipher,
)
from cryptography.hazmat.primitives.padding import PKCS7
from azure.storage._common_conversion import _decode_base64_to_bytes
#------------------------------------------------------------------------------
TEST_CONTAINER_PREFIX = 'encryption_container'
TEST_BLOB_PREFIXES = {'block_blob':'encryption_block_blob',
'page_blob':'encryption_page_blob'}
FILE_PATH = 'blob_input.temp.dat'
#------------------------------------------------------------------------------
class StorageBlobEncryptionTest(StorageTestCase):
def setUp(self):
super(StorageBlobEncryptionTest, self).setUp()
self.bbs = self._create_storage_service(BlockBlobService, self.settings)
self.pbs = self._create_storage_service(PageBlobService, self.settings)
self.service_dict = {'block_blob':self.bbs,
'page_blob':self.pbs}
self.container_name = self.get_resource_name('utcontainer')
self.bytes = b'Foo'
if not self.is_playback():
self.bbs.create_container(self.container_name)
self.bbs.MAX_BLOCK_SIZE = 4 * 1024
self.bbs.MAX_SINGLE_PUT_SIZE = 32 * 1024
self.pbs.MAX_PAGE_SIZE = 4 * 1024
def tearDown(self):
if not self.is_playback():
try:
self.bbs.delete_container(self.container_name)
except:
pass
if path.isfile(FILE_PATH):
try:
remove(FILE_PATH)
except:
pass
return super(StorageBlobEncryptionTest, self).tearDown()
#--Helpers-----------------------------------------------------------------
def _get_container_reference(self):
return self.get_resource_name(TEST_CONTAINER_PREFIX)
def _get_blob_reference(self, type):
return self.get_resource_name(TEST_BLOB_PREFIXES[type])
def _create_small_blob(self, type):
blob_name = self._get_blob_reference(type)
self.service_dict[type].create_blob_from_bytes(self.container_name, blob_name, self.bytes)
return blob_name
#--Test cases for blob encryption ----------------------------------------
@record
def test_missing_attribute_kek_wrap(self):
# In the shared method _generate_blob_encryption_key
# Arrange
self.bbs.require_encryption = True
valid_key = KeyWrapper('key1')
# Act
invalid_key_1 = lambda: None #functions are objects, so this effectively creates an empty object
invalid_key_1.get_key_wrap_algorithm = valid_key.get_key_wrap_algorithm
invalid_key_1.get_kid = valid_key.get_kid
# No attribute wrap_key
self.bbs.key_encryption_key = invalid_key_1
with self.assertRaises(AttributeError):
self._create_small_blob('block_blob')
invalid_key_2 = lambda: None #functions are objects, so this effectively creates an empty object
invalid_key_2.wrap_key = valid_key.wrap_key
invalid_key_2.get_kid = valid_key.get_kid
# No attribute get_key_wrap_algorithm
self.bbs.key_encryption_key = invalid_key_2
with self.assertRaises(AttributeError):
self._create_small_blob('block_blob')
invalid_key_3 = lambda: None #functions are objects, so this effectively creates an empty object
invalid_key_3.get_key_wrap_algorithm = valid_key.get_key_wrap_algorithm
invalid_key_3.wrap_key = valid_key.wrap_key
# No attribute get_kid
self.bbs.key_encryption_key = invalid_key_2
with self.assertRaises(AttributeError):
self._create_small_blob('block_blob')
@record
def test_invalid_value_kek_wrap(self):
# Arrange
self.bbs.require_encryption = True
self.bbs.key_encryption_key = KeyWrapper('key1')
self.bbs.key_encryption_key.get_key_wrap_algorithm = None
try:
self._create_small_blob('block_blob')
self.fail()
except AttributeError as e:
self.assertEqual(str(e), _ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm'))
self.bbs.key_encryption_key = KeyWrapper('key1')
self.bbs.key_encryption_key.get_kid = None
with self.assertRaises(AttributeError):
self._create_small_blob('block_blob')
self.bbs.key_encryption_key = KeyWrapper('key1')
self.bbs.key_encryption_key.wrap_key = None
with self.assertRaises(AttributeError):
self._create_small_blob('block_blob')
@record
def test_missing_attribute_kek_unwrap(self):
# Shared between all services in _decrypt_blob
# Arrange
self.bbs.require_encryption = True
valid_key = KeyWrapper('key1')
self.bbs.key_encryption_key = valid_key
blob_name = self._create_small_blob('block_blob')
# Act
# Note that KeyWrapper has a default value for key_id, so these Exceptions
# are not due to non_matching kids.
invalid_key_1 = lambda: None #functions are objects, so this effectively creates an empty object
invalid_key_1.get_kid = valid_key.get_kid
#No attribute unwrap_key
self.bbs.key_encryption_key = invalid_key_1
with self.assertRaises(AzureException):
self.bbs.get_blob_to_bytes(self.container_name, blob_name)
invalid_key_2 = lambda: None #functions are objects, so this effectively creates an empty object
invalid_key_2.unwrap_key = valid_key.unwrap_key
#No attribute get_kid
with self.assertRaises(AzureException):
self.bbs.get_blob_to_bytes(self.container_name, blob_name)
@record
def test_invalid_value_kek_unwrap(self):
# Arrange
self.bbs.require_encryption = True
self.bbs.key_encryption_key = KeyWrapper('key1')
blob_name = self._create_small_blob('block_blob')
# Act
self.bbs.key_encryption_key = KeyWrapper('key1')
self.bbs.key_encryption_key.unwrap_key = None
try:
self.bbs.get_blob_to_bytes(self.container_name, blob_name)
self.fail()
except AzureException as e:
self.assertEqual(str(e), _ERROR_DECRYPTION_FAILURE)
@record
def test_get_blob_kek(self):
# Arrange
self.bbs.require_encryption = True
self.bbs.key_encryption_key = KeyWrapper('key1')
blob_name = self._create_small_blob('block_blob')
# Act
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name)
# Assert
self.assertEqual(blob.content, self.bytes)
@record
def test_get_blob_resolver(self):
# Arrange
self.bbs.require_encryption = True
self.bbs.key_encryption_key = KeyWrapper('key1')
key_resolver = KeyResolver()
key_resolver.put_key(self.bbs.key_encryption_key)
self.bbs.key_resolver_function = key_resolver.resolve_key
blob_name = self._create_small_blob('block_blob')
# Act
self.bbs.key_encryption_key = None
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name)
# Assert
self.assertEqual(blob.content, self.bytes)
def test_get_blob_kek_RSA(self):
# We can only generate random RSA keys, so this must be run live or
# the playback test will fail due to a change in kek values.
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
self.bbs.require_encryption = True
self.bbs.key_encryption_key = RSAKeyWrapper('key2')
blob_name = self._create_small_blob('block_blob')
# Act
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name)
# Assert
self.assertEqual(blob.content, self.bytes)
@record
def test_get_blob_nonmatching_kid(self):
# Arrange
self.bbs.require_encryption = True
self.bbs.key_encryption_key = KeyWrapper('key1')
blob_name = self._create_small_blob('block_blob')
# Act
self.bbs.key_encryption_key.kid = 'Invalid'
# Assert
try:
self.bbs.get_blob_to_bytes(self.container_name, blob_name)
self.fail()
except AzureException as e:
self.assertEqual(str(e), _ERROR_DECRYPTION_FAILURE)
@record
def test_put_blob_invalid_stream_type(self):
# Arrange
self.bbs.require_encryption = True
self.bbs.key_encryption_key = KeyWrapper('key1')
small_stream = StringIO(u'small')
large_stream = StringIO(u'large' * self.bbs.MAX_SINGLE_PUT_SIZE)
blob_name = self._get_blob_reference('block_blob')
# Assert
# Block blob specific single shot
try:
self.bbs.create_blob_from_stream(self.container_name, blob_name, small_stream, count=5)
self.fail()
except TypeError as e:
self.assertEqual(str(e), _ERROR_VALUE_SHOULD_BE_BYTES.format('blob'))
# Generic blob chunked
with self.assertRaises(TypeError):
self.bbs.create_blob_from_stream(self.container_name, blob_name, large_stream)
def test_put_blob_chunking_required_mult_of_block_size(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
self.bbs.key_encryption_key = KeyWrapper('key1')
self.bbs.require_encryption = True
content = self.get_random_bytes(self.bbs.MAX_SINGLE_PUT_SIZE + self.bbs.MAX_BLOCK_SIZE)
blob_name = self._get_blob_reference('block_blob')
# Act
self.bbs.create_blob_from_bytes(self.container_name, blob_name, content, max_connections=3)
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name)
# Assert
self.assertEqual(content, blob.content)
def test_put_blob_chunking_required_non_mult_of_block_size(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
self.bbs.key_encryption_key = KeyWrapper('key1')
self.bbs.require_encryption = True
content = urandom(self.bbs.MAX_SINGLE_PUT_SIZE + 1)
blob_name = self._get_blob_reference('block_blob')
# Act
self.bbs.create_blob_from_bytes(self.container_name, blob_name, content, max_connections=3)
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name)
# Assert
self.assertEqual(content, blob.content)
def test_put_blob_chunking_required_range_specified(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
self.bbs.key_encryption_key = KeyWrapper('key1')
self.bbs.require_encryption = True
content = self.get_random_bytes(self.bbs.MAX_SINGLE_PUT_SIZE * 2)
blob_name = self._get_blob_reference('block_blob')
# Act
self.bbs.create_blob_from_bytes(self.container_name, blob_name, content, max_connections=3,
count=self.bbs.MAX_SINGLE_PUT_SIZE+53)
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name)
# Assert
self.assertEqual(content[:self.bbs.MAX_SINGLE_PUT_SIZE+53], blob.content)
@record
def test_put_block_blob_single_shot(self):
# Arrange
self.bbs.key_encryption_key = KeyWrapper('key1')
self.bbs.require_encryption = True
content = b'small'
blob_name = self._get_blob_reference('block_blob')
# Act
self.bbs.create_blob_from_bytes(self.container_name, blob_name, content)
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name)
# Assert
self.assertEqual(content, blob.content)
@record
def test_put_blob_range(self):
# Arrange
self.bbs.require_encryption = True
self.bbs.key_encryption_key = KeyWrapper('key1')
content = b'Random repeats' * self.bbs.MAX_SINGLE_PUT_SIZE * 5
# All page blob uploads call _upload_chunks, so this will test the ability
# of that function to handle ranges even though it's a small blob
blob_name = self._get_blob_reference('block_blob')
# Act
self.bbs.create_blob_from_bytes(self.container_name, blob_name, content, index=2,
count=self.bbs.MAX_SINGLE_PUT_SIZE + 5,
max_connections=1)
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name)
# Assert
self.assertEqual(content[2:2 + self.bbs.MAX_SINGLE_PUT_SIZE + 5], blob.content)
@record
def test_put_blob_empty(self):
# Arrange
self.bbs.key_encryption_key = KeyWrapper('key1')
self.bbs.require_encryption = True
content = b''
blob_name = self._get_blob_reference('block_blob')
# Act
self.bbs.create_blob_from_bytes(self.container_name, blob_name, content)
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name)
# Assert
self.assertEqual(content, blob.content)
@record
def test_put_blob_serial_upload_chunking(self):
# Arrange
self.bbs.key_encryption_key = KeyWrapper('key1')
self.bbs.require_encryption = True
content = self.get_random_bytes(self.bbs.MAX_SINGLE_PUT_SIZE + 1)
blob_name = self._get_blob_reference('block_blob')
# Act
self.bbs.create_blob_from_bytes(self.container_name, blob_name, content, max_connections=1)
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name, max_connections=1)
# Assert
self.assertEqual(content, blob.content)
@record
def test_get_blob_range_beginning_to_middle(self):
# Arrange
self.bbs.key_encryption_key = KeyWrapper('key1')
self.bbs.require_encryption = True
content = self.get_random_bytes(128)
blob_name = self._get_blob_reference('block_blob')
# Act
self.bbs.create_blob_from_bytes(self.container_name, blob_name, content)
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name, start_range=0, end_range=50)
# Assert
self.assertEqual(content[:51], blob.content)
@record
def test_get_blob_range_middle_to_end(self):
# Arrange
self.bbs.key_encryption_key = KeyWrapper('key1')
self.bbs.require_encryption = True
content = self.get_random_bytes(128)
blob_name = self._get_blob_reference('block_blob')
# Act
self.bbs.create_blob_from_bytes(self.container_name, blob_name, content)
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name, start_range=50, end_range=127)
blob2 = self.bbs.get_blob_to_bytes(self.container_name, blob_name, start_range=50)
# Assert
self.assertEqual(content[50:], blob.content)
self.assertEqual(content[50:], blob.content)
@record
def test_get_blob_range_middle_to_middle(self):
# Arrange
self.bbs.key_encryption_key = KeyWrapper('key1')
self.bbs.require_encryption = True
content = self.get_random_bytes(128)
blob_name = self._get_blob_reference('block_blob')
# Act
self.bbs.create_blob_from_bytes(self.container_name, blob_name, content)
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name, start_range=50, end_range=93)
# Assert
self.assertEqual(content[50:94], blob.content)
@record
def test_get_blob_range_aligns_on_16_byte_block(self):
# Arrange
self.bbs.key_encryption_key = KeyWrapper('key1')
self.bbs.require_encryption = True
content = self.get_random_bytes(128)
blob_name = self._get_blob_reference('block_blob')
# Act
self.bbs.create_blob_from_bytes(self.container_name, blob_name, content)
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name, start_range=48, end_range=63,
max_connections=1)
# Assert
self.assertEqual(content[48:64], blob.content)
@record
def test_get_blob_range_expanded_to_beginning_block_align(self):
# Arrange
self.bbs.key_encryption_key = KeyWrapper('key1')
self.bbs.require_encryption = True
content = self.get_random_bytes(128)
blob_name = self._get_blob_reference('block_blob')
# Act
self.bbs.create_blob_from_bytes(self.container_name, blob_name, content)
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name, start_range=5, end_range=50)
# Assert
self.assertEqual(content[5:51], blob.content)
@record
def test_get_blob_range_expanded_to_beginning_iv(self):
# Arrange
self.bbs.key_encryption_key = KeyWrapper('key1')
self.bbs.require_encryption = True
content = self.get_random_bytes(128)
blob_name = self._get_blob_reference('block_blob')
# Act
self.bbs.create_blob_from_bytes(self.container_name, blob_name, content)
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name, start_range=22, end_range=42)
# Assert
self.assertEqual(content[22:43], blob.content)
@record
def test_put_blob_strict_mode(self):
# Arrange
blob_name = self._get_blob_reference('block_blob')
for service in self.service_dict.values():
service.require_encryption = True
content = urandom(512)
# Assert
for service in self.service_dict.values():
with self.assertRaises(ValueError):
service.create_blob_from_bytes(self.container_name, blob_name, content)
stream = BytesIO(content)
with self.assertRaises(ValueError):
service.create_blob_from_stream(self.container_name, blob_name, stream, count=512)
FILE_PATH = 'blob_input.temp.dat'
with open(FILE_PATH, 'wb') as stream:
stream.write(content)
with self.assertRaises(ValueError):
service.create_blob_from_path(self.container_name, blob_name, FILE_PATH)
if not isinstance(service, PageBlobService):
with self.assertRaises(ValueError):
service.create_blob_from_text(self.container_name, blob_name, 'To encrypt')
@record
def test_get_blob_strict_mode_no_policy(self):
# Arrange
self.bbs.require_encryption = True
self.bbs.key_encryption_key = KeyWrapper('key1')
blob_name = self._create_small_blob('block_blob')
# Act
self.bbs.key_encryption_key = None
# Assert
with self.assertRaises(ValueError):
self.bbs.get_blob_to_bytes(self.container_name, blob_name)
@record
def test_get_blob_strict_mode_unencrypted_blob(self):
# Arrange
blob_name = self._create_small_blob('block_blob')
# Act
self.bbs.require_encryption = True
self.bbs.key_encryption_key = KeyWrapper('key1')
# Assert
with self.assertRaises(AzureException):
self.bbs.get_blob_to_bytes(self.container_name, blob_name)
@record
def test_invalid_methods_fail_block(self):
# Arrange
self.bbs.key_encryption_key = KeyWrapper('key1')
blob_name = self._get_blob_reference('block_blob')
# Assert
try:
self.bbs.put_block(self.container_name, blob_name, urandom(32), 'block1')
self.fail()
except ValueError as e:
self.assertEqual(str(e), _ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
try:
self.bbs.put_block_list(self.container_name, blob_name, ['block1'])
self.fail()
except ValueError as e:
self.assertEqual(str(e), _ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
@record
def test_invalid_methods_fail_append(self):
# Arrange
abs = self._create_storage_service(AppendBlobService, self.settings)
abs.key_encryption_key = KeyWrapper('key1')
blob_name = self._get_blob_reference('block_blob')
# Assert
try:
abs.append_block(self.container_name, blob_name, urandom(32), 'block1')
self.fail()
except ValueError as e:
self.assertEqual(str(e), _ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
try:
abs.create_blob(self.container_name, blob_name)
self.fail()
except ValueError as e:
self.assertEqual(str(e), _ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
# All append_from operations funnel into append_from_stream, so testing one is sufficient
with self.assertRaises(ValueError):
abs.append_blob_from_bytes(self.container_name, blob_name, b'To encrypt')
@record
def test_invalid_methods_fail_page(self):
# Arrange
self.pbs.key_encryption_key = KeyWrapper('key1')
blob_name = self._get_blob_reference('page_blob')
# Assert
try:
self.pbs.update_page(self.container_name, blob_name, urandom(512), 0, 511)
self.fail()
except ValueError as e:
self.assertEqual(str(e), _ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
try:
self.pbs.create_blob(self.container_name, blob_name, 512)
self.fail()
except ValueError as e:
self.assertEqual(str(e), _ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
@record
def test_validate_encryption(self):
# Arrange
self.bbs.require_encryption = True
kek = KeyWrapper('key1')
self.bbs.key_encryption_key = kek
blob_name = self._create_small_blob('block_blob')
# Act
self.bbs.require_encryption = False
self.bbs.key_encryption_key = None
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name)
encryption_data = _dict_to_encryption_data(loads(blob.metadata['encryptiondata']))
iv = encryption_data.content_encryption_IV
content_encryption_key = _validate_and_unwrap_cek(encryption_data, kek, None)
cipher = _generate_AES_CBC_cipher(content_encryption_key, iv)
decryptor = cipher.decryptor()
unpadder = PKCS7(128).unpadder()
content = decryptor.update(blob.content) + decryptor.finalize()
content = unpadder.update(content) + unpadder.finalize()
self.assertEqual(self.bytes, content)
@record
def test_create_block_blob_from_star(self):
self._create_blob_from_star('block_blob', self.bytes, self.bbs.create_blob_from_bytes, self.bytes)
stream = BytesIO(self.bytes)
self._create_blob_from_star('block_blob', self.bytes, self.bbs.create_blob_from_stream, stream)
FILE_PATH = 'blob_input.temp.dat'
with open(FILE_PATH, 'wb') as stream:
stream.write(self.bytes)
self._create_blob_from_star('block_blob', self.bytes, self.bbs.create_blob_from_path, FILE_PATH)
self._create_blob_from_star('block_blob', b'To encrypt', self.bbs.create_blob_from_text, 'To encrypt')
@record
def test_create_page_blob_from_star(self):
content = self.get_random_bytes(512)
self._create_blob_from_star('page_blob', content, self.pbs.create_blob_from_bytes, content)
stream = BytesIO(content)
self._create_blob_from_star('page_blob', content, self.pbs.create_blob_from_stream, stream, count=512)
FILE_PATH = 'blob_input.temp.dat'
with open(FILE_PATH, 'wb') as stream:
stream.write(content)
self._create_blob_from_star('page_blob', content, self.pbs.create_blob_from_path, FILE_PATH)
def _create_blob_from_star(self, type, content, create_method, data, **kwargs):
self.service_dict[type].key_encryption_key = KeyWrapper('key1')
self.service_dict[type].require_encryption = True
blob_name = self._get_blob_reference(type)
create_method(self.container_name, blob_name, data, **kwargs)
blob = self.service_dict[type].get_blob_to_bytes(self.container_name, blob_name)
self.assertEqual(content, blob.content)
@record
def test_get_blob_to_star(self):
# Arrange
self.bbs.require_encryption = True
self.bbs.key_encryption_key = KeyWrapper('key1')
blob_name = self._create_small_blob('block_blob')
# Act
bytes_blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name)
stream = BytesIO()
self.bbs.get_blob_to_stream(self.container_name, blob_name, stream)
stream.seek(0)
text_blob = self.bbs.get_blob_to_text(self.container_name, blob_name, encoding='utf-8')
self.bbs.get_blob_to_path(self.container_name, blob_name, FILE_PATH)
# Assert
self.assertEqual(self.bytes, bytes_blob.content)
self.assertEqual(self.bytes, stream.read())
self.assertEqual(self.bytes.decode(), text_blob.content)
with open(FILE_PATH, 'rb') as stream:
self.assertEqual(self.bytes, stream.read())
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 38.102817
| 114
| 0.664363
|
fc184214e1feaba6e4e69a1fc60fcee1b15fb6bd
| 3,962
|
py
|
Python
|
tests/test_backbones/test_vgg.py
|
agim-a/mmclassification
|
b56fde1957edccf610a3ce06f0b8b346cb387112
|
[
"Apache-2.0"
] | 31
|
2020-11-14T02:47:54.000Z
|
2021-12-14T06:26:10.000Z
|
tests/test_backbones/test_vgg.py
|
ly015/mmclassification
|
f355f15485d77a6adebc62ac860e35786e7a1d76
|
[
"Apache-2.0"
] | 2
|
2020-09-01T00:53:39.000Z
|
2022-01-27T20:26:11.000Z
|
tests/test_backbones/test_vgg.py
|
ly015/mmclassification
|
f355f15485d77a6adebc62ac860e35786e7a1d76
|
[
"Apache-2.0"
] | 4
|
2021-01-14T18:12:38.000Z
|
2021-11-11T11:46:50.000Z
|
import pytest
import torch
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmcls.models.backbones import VGG
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
def test_vgg():
"""Test VGG backbone"""
with pytest.raises(KeyError):
# VGG depth should be in [11, 13, 16, 19]
VGG(18)
with pytest.raises(AssertionError):
# In VGG: 1 <= num_stages <= 5
VGG(11, num_stages=0)
with pytest.raises(AssertionError):
# In VGG: 1 <= num_stages <= 5
VGG(11, num_stages=6)
with pytest.raises(AssertionError):
# len(dilations) == num_stages
VGG(11, dilations=(1, 1), num_stages=3)
with pytest.raises(TypeError):
# pretrained must be a string path
model = VGG(11)
model.init_weights(pretrained=0)
# Test VGG11 norm_eval=True
model = VGG(11, norm_eval=True)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
# Test VGG11 forward without classifiers
model = VGG(11, out_indices=(0, 1, 2, 3, 4))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == (1, 64, 112, 112)
assert feat[1].shape == (1, 128, 56, 56)
assert feat[2].shape == (1, 256, 28, 28)
assert feat[3].shape == (1, 512, 14, 14)
assert feat[4].shape == (1, 512, 7, 7)
# Test VGG11 forward with classifiers
model = VGG(11, num_classes=10, out_indices=(0, 1, 2, 3, 4, 5))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 6
assert feat[0].shape == (1, 64, 112, 112)
assert feat[1].shape == (1, 128, 56, 56)
assert feat[2].shape == (1, 256, 28, 28)
assert feat[3].shape == (1, 512, 14, 14)
assert feat[4].shape == (1, 512, 7, 7)
assert feat[5].shape == (1, 10)
# Test VGG11BN forward
model = VGG(11, norm_cfg=dict(type='BN'), out_indices=(0, 1, 2, 3, 4))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == (1, 64, 112, 112)
assert feat[1].shape == (1, 128, 56, 56)
assert feat[2].shape == (1, 256, 28, 28)
assert feat[3].shape == (1, 512, 14, 14)
assert feat[4].shape == (1, 512, 7, 7)
# Test VGG11BN forward with classifiers
model = VGG(
11,
num_classes=10,
norm_cfg=dict(type='BN'),
out_indices=(0, 1, 2, 3, 4, 5))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 6
assert feat[0].shape == (1, 64, 112, 112)
assert feat[1].shape == (1, 128, 56, 56)
assert feat[2].shape == (1, 256, 28, 28)
assert feat[3].shape == (1, 512, 14, 14)
assert feat[4].shape == (1, 512, 7, 7)
assert feat[5].shape == (1, 10)
# Test VGG13 with layers 1, 2, 3 out forward
model = VGG(13, out_indices=(0, 1, 2))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == (1, 64, 112, 112)
assert feat[1].shape == (1, 128, 56, 56)
assert feat[2].shape == (1, 256, 28, 28)
# Test VGG16 with top feature maps out forward
model = VGG(16)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat.shape == (1, 512, 7, 7)
# Test VGG19 with classification score out forward
model = VGG(19, num_classes=10)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat.shape == (1, 10)
| 28.919708
| 74
| 0.587582
|
ef2d8a019c5283f2cc38949b504ec94a60927b92
| 292
|
py
|
Python
|
st3/yamlmacros/src/macro_error.py
|
Thom1729/yaml-macros-engine
|
f2cbdaf4d4300c842c6a6c954dceb626e3942316
|
[
"MIT"
] | null | null | null |
st3/yamlmacros/src/macro_error.py
|
Thom1729/yaml-macros-engine
|
f2cbdaf4d4300c842c6a6c954dceb626e3942316
|
[
"MIT"
] | null | null | null |
st3/yamlmacros/src/macro_error.py
|
Thom1729/yaml-macros-engine
|
f2cbdaf4d4300c842c6a6c954dceb626e3942316
|
[
"MIT"
] | null | null | null |
from ruamel.yaml import Node
from .types import ContextType
__all__ = ['MacroError']
class MacroError(Exception):
def __init__(self, message: str, node: Node, context: ContextType = None) -> None:
self.message = message
self.node = node
self.context = context
| 22.461538
| 86
| 0.678082
|
15cd993002db74bf1ee0d6b6b7ec3168075d3fee
| 5,939
|
py
|
Python
|
sqlalchemy_trino/datatype.py
|
erikerlandson/sqlalchemy-trino
|
29020cdcaf72531418f8dfb7b9b833ccb8b1a567
|
[
"Apache-2.0"
] | 20
|
2021-01-22T14:04:31.000Z
|
2022-03-30T06:42:00.000Z
|
sqlalchemy_trino/datatype.py
|
erikerlandson/sqlalchemy-trino
|
29020cdcaf72531418f8dfb7b9b833ccb8b1a567
|
[
"Apache-2.0"
] | 37
|
2021-02-26T02:41:12.000Z
|
2022-01-26T17:15:51.000Z
|
sqlalchemy_trino/datatype.py
|
erikerlandson/sqlalchemy-trino
|
29020cdcaf72531418f8dfb7b9b833ccb8b1a567
|
[
"Apache-2.0"
] | 19
|
2021-04-14T20:41:49.000Z
|
2022-02-09T14:13:53.000Z
|
import re
from typing import *
from sqlalchemy import util
from sqlalchemy.sql import sqltypes
from sqlalchemy.sql.type_api import TypeEngine
SQLType = Union[TypeEngine, Type[TypeEngine]]
class DOUBLE(sqltypes.Float):
__visit_name__ = "DOUBLE"
class MAP(TypeEngine):
__visit_name__ = "MAP"
def __init__(self, key_type: SQLType, value_type: SQLType):
if isinstance(key_type, type):
key_type = key_type()
self.key_type: TypeEngine = key_type
if isinstance(value_type, type):
value_type = value_type()
self.value_type: TypeEngine = value_type
@property
def python_type(self):
return dict
class ROW(TypeEngine):
__visit_name__ = "ROW"
def __init__(self, attr_types: List[Tuple[Optional[str], SQLType]]):
self.attr_types: List[Tuple[Optional[str], SQLType]] = []
for attr_name, attr_type in attr_types:
if isinstance(attr_type, type):
attr_type = attr_type()
self.attr_types.append((attr_name, attr_type))
@property
def python_type(self):
return list
# https://trino.io/docs/current/language/types.html
_type_map = {
# === Boolean ===
'boolean': sqltypes.BOOLEAN,
# === Integer ===
'tinyint': sqltypes.SMALLINT,
'smallint': sqltypes.SMALLINT,
'int': sqltypes.INTEGER,
'integer': sqltypes.INTEGER,
'bigint': sqltypes.BIGINT,
# === Floating-point ===
'real': sqltypes.REAL,
'double': DOUBLE,
# === Fixed-precision ===
'decimal': sqltypes.DECIMAL,
# === String ===
'varchar': sqltypes.VARCHAR,
'char': sqltypes.CHAR,
'varbinary': sqltypes.VARBINARY,
'json': sqltypes.JSON,
# === Date and time ===
'date': sqltypes.DATE,
'time': sqltypes.TIME,
'timestamp': sqltypes.TIMESTAMP,
# 'interval year to month':
# 'interval day to second':
#
# === Structural ===
# 'array': ARRAY,
# 'map': MAP
# 'row': ROW
#
# === Mixed ===
# 'ipaddress': IPADDRESS
# 'uuid': UUID,
# 'hyperloglog': HYPERLOGLOG,
# 'p4hyperloglog': P4HYPERLOGLOG,
# 'qdigest': QDIGEST,
# 'tdigest': TDIGEST,
}
def unquote(string: str, quote: str = '"', escape: str = '\\') -> str:
"""
If string starts and ends with a quote, unquote it
"""
if string.startswith(quote) and string.endswith(quote):
string = string[1:-1]
string = string.replace(f"{escape}{quote}", quote) \
.replace(f"{escape}{escape}", escape)
return string
def aware_split(string: str, delimiter: str = ',', maxsplit: int = -1,
quote: str = '"', escaped_quote: str = r'\"',
open_bracket: str = '(', close_bracket: str = ')') -> Iterator[str]:
"""
A split function that is aware of quotes and brackets/parentheses.
:param string: string to split
:param delimiter: string defining where to split, usually a comma or space
:param maxsplit: Maximum number of splits to do. -1 (default) means no limit.
:param quote: string, either a single or a double quote
:param escaped_quote: string representing an escaped quote
:param open_bracket: string, either [, {, < or (
:param close_bracket: string, either ], }, > or )
"""
parens = 0
quotes = False
i = 0
if maxsplit < -1:
raise ValueError(f"maxsplit must be >= -1, got {maxsplit}")
elif maxsplit == 0:
yield string
return
for j, character in enumerate(string):
complete = parens == 0 and not quotes
if complete and character == delimiter:
if maxsplit != -1:
maxsplit -= 1
yield string[i:j]
i = j + len(delimiter)
if maxsplit == 0:
break
elif character == open_bracket:
parens += 1
elif character == close_bracket:
parens -= 1
elif character == quote:
if quotes and string[j - len(escaped_quote) + 1: j + 1] != escaped_quote:
quotes = False
elif not quotes:
quotes = True
yield string[i:]
def parse_sqltype(type_str: str) -> TypeEngine:
type_str = type_str.strip().lower()
match = re.match(r'^(?P<type>\w+)\s*(?:\((?P<options>.*)\))?', type_str)
if not match:
util.warn(f"Could not parse type name '{type_str}'")
return sqltypes.NULLTYPE
type_name = match.group("type")
type_opts = match.group("options")
if type_name == "array":
item_type = parse_sqltype(type_opts)
if isinstance(item_type, sqltypes.ARRAY):
dimensions = (item_type.dimensions or 1) + 1
return sqltypes.ARRAY(item_type.item_type, dimensions=dimensions)
return sqltypes.ARRAY(item_type)
elif type_name == "map":
key_type_str, value_type_str = aware_split(type_opts)
key_type = parse_sqltype(key_type_str)
value_type = parse_sqltype(value_type_str)
return MAP(key_type, value_type)
elif type_name == "row":
attr_types: List[Tuple[Optional[str], SQLType]] = []
for attr in aware_split(type_opts):
attr_name, attr_type_str = aware_split(attr.strip(), delimiter=' ', maxsplit=1)
attr_name = unquote(attr_name)
attr_type = parse_sqltype(attr_type_str)
attr_types.append((attr_name, attr_type))
return ROW(attr_types)
if type_name not in _type_map:
util.warn(f"Did not recognize type '{type_name}'")
return sqltypes.NULLTYPE
type_class = _type_map[type_name]
type_args = [int(o.strip()) for o in type_opts.split(',')] if type_opts else []
if type_name in ('time', 'timestamp'):
type_kwargs = dict(timezone=type_str.endswith("with time zone"))
return type_class(**type_kwargs) # TODO: handle time/timestamp(p) precision
return type_class(*type_args)
| 31.759358
| 91
| 0.609025
|
9be7d3f58111f1edc3611c4f749df33fde08b03e
| 3,254
|
py
|
Python
|
Agents/VIPAgent/setup.py
|
Entek-Technical-Services/BEMOSS3.5
|
581a205b4129530474a5ceee93cb36ef62992d4c
|
[
"BSD-3-Clause"
] | 73
|
2017-07-11T21:46:41.000Z
|
2022-03-11T03:35:25.000Z
|
Agents/VIPAgent/setup.py
|
Entek-Technical-Services/BEMOSS3.5
|
581a205b4129530474a5ceee93cb36ef62992d4c
|
[
"BSD-3-Clause"
] | 19
|
2017-10-10T22:06:15.000Z
|
2022-03-28T21:03:33.000Z
|
Agents/VIPAgent/setup.py
|
Entek-Technical-Services/BEMOSS3.5
|
581a205b4129530474a5ceee93cb36ef62992d4c
|
[
"BSD-3-Clause"
] | 36
|
2017-06-24T00:17:03.000Z
|
2022-03-31T13:58:36.000Z
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2015, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#}}}
from setuptools import setup, find_packages
packages = find_packages('.')
package = packages[0]
setup(
name = package + 'agent',
version = "3.0",
install_requires = ['volttron'],
packages = packages,
entry_points = {
'setuptools.installation': [
'eggsecutable = ' + package + '.agent:main',
]
}
)
| 42.815789
| 72
| 0.762139
|
4e1a25c0fad91252d6ed79dd4b207f03feb3775d
| 23,877
|
py
|
Python
|
CFG/testbed-TSN.py
|
abacuspix/NFV_project
|
f5585a6750119b1f954fea65c37a14badad1fd62
|
[
"MIT"
] | null | null | null |
CFG/testbed-TSN.py
|
abacuspix/NFV_project
|
f5585a6750119b1f954fea65c37a14badad1fd62
|
[
"MIT"
] | null | null | null |
CFG/testbed-TSN.py
|
abacuspix/NFV_project
|
f5585a6750119b1f954fea65c37a14badad1fd62
|
[
"MIT"
] | null | null | null |
from fabric.api import env
#Management ip addresses of hosts in the cluster
host1 = 'root@17.1.1.231'
host2 = 'root@17.1.1.232'
host3 = 'root@17.1.1.233'
host4 = 'root@17.1.1.234'
#host5 = 'root@1.1.1.5'
#host6 = 'root@1.1.1.6'
#host7 = 'root@1.1.1.7'
#host8 = 'root@1.1.1.8'
#host9 = 'root@1.1.1.9'
#host10 = 'root@1.1.1.10'
#External routers if any
#for eg.
#ext_routers = [('mx1', '10.204.216.253')]
ext_routers = []
#Autonomous system number
router_asn = 65452
#Host from which the fab commands are triggered to install and provision
host_build = 'root@17.1.1.241'
#Role definition of the hosts.
env.roledefs = {
'all': [host1,host2,host3,host4],
'cfgm': [host1],
'openstack': [host1],
'control': [host1],
'compute': [host2,host3,host4],
'collector': [host1],
'webui': [host1],
'database': [host1],
'build': [host_build],
'storage-master': [host1],
#'storage-compute': [host2],
# 'vgw': [host4, host5], # Optional, Only to enable VGW. Only compute can support vgw
'tsn': [host3], # Optional, Only to enable TSN. Only compute can support TSN
'toragent': [host3], #Optional, Only to enable Tor Agent. Only compute can support Tor Agent
# 'backup':[backup_node], # only if the backup_node is defined
}
env.hostnames = {
'all': ['host1', 'host2','host3', 'host4']
}
#Openstack admin password
env.openstack_admin_password = 'root123'
# Passwords of each host
# for passwordless login's no need to set env.passwords,
# instead populate env.key_filename in testbed.py with public key.
env.passwords = {
host1: 'root123',
host2: 'root123',
host3: 'root123',
host4: 'root123',
#host5: 'secret',
#host6: 'secret',
#host7: 'secret',
#host8: 'secret',
#host9: 'secret',
#host10: 'secret',
# backup_node: 'secret',
host_build: 'root123',
}
# SSH Public key file path for passwordless logins
# if env.passwords is not specified.
#env.key_filename = '/root/.ssh/id_rsa.pub'
#For reimage purpose
env.ostypes = {
host1: 'ubuntu',
host2: 'ubuntu',
host3: 'ubuntu',
host4: 'ubuntu',
#host5: 'centos',
#host6: 'centos',
#host7: 'centos',
#host8: 'centos',
#host9: 'centos',
#host10: 'centos',
}
#env.orchestrator = 'openstack' #other values are 'vcenter', 'none' default:openstack
#ntp server the servers should point to
#env.ntp_server = 'ntp.juniper.net'
# OPTIONAL COMPUTE HYPERVISOR CHOICE:
#======================================
# Compute Hypervisor
env.hypervisor = {
host2: 'libvirt',
host3: 'libvirt',
host4: 'libvirt',
}
# Specify the hypervisor to be provisioned in the compute node.(Default=libvirt)
# INFORMATION FOR DB BACKUP/RESTORE ..
#=======================================================
#Optional,Backup Host configuration if it is not available then it will put in localhost
#backup_node = 'root@2.2.2.2'
# Optional, Local/Remote location of backup_data path
# if it is not passed then it will use default path
#backup_db_path= ['/home/','/root/']
#cassandra backup can be defined either "full" or "custom"
#full -> take complete snapshot of cassandra DB
#custom -> take snapshot except defined in skip_keyspace
#cassandra_backup='custom' [ MUST OPTION]
#skip_keyspace=["ContrailAnalytics"] IF cassandra_backup is selected as custom
#service token need to define to do restore of backup data
#service_token = '53468cf7552bbdc3b94f'
#OPTIONAL ANALYTICS CONFIGURATION
#================================
# database_dir is the directory where cassandra data is stored
#
# If it is not passed, we will use cassandra's default
# /var/lib/cassandra/data
#
#database_dir = '<separate-partition>/cassandra'
#
# analytics_data_dir is the directory where cassandra data for analytics
# is stored. This is used to seperate cassandra's main data storage [internal
# use and config data] with analytics data. That way critical cassandra's
# system data and config data are not overrun by analytis data
#
# If it is not passed, we will use cassandra's default
# /var/lib/cassandra/data
#
#analytics_data_dir = '<separate-partition>/analytics_data'
#
# ssd_data_dir is the directory where cassandra can store fast retrievable
# temporary files (commit_logs). Giving cassandra an ssd disk for this
# purpose improves cassandra performance
#
# If it is not passed, we will use cassandra's default
# /var/lib/cassandra/commit_logs
#
#ssd_data_dir = '<seperate-partition>/commit_logs_data'
#following variables allow analytics data to have different TTL in cassandra database
#analytics_config_audit_ttl controls TTL for config audit logs
#analytics_statistics_ttl controls TTL for stats
#analytics_flow_ttl controls TTL for flow data
#database_ttl controls TTL for rest of the data
#
#database_ttl = 48
#analytics_config_audit_ttl = 48
#analytics_statistics_ttl = 48
#analytics_flow_ttl = 48
#following parameter allows to specify minimum amount of disk space in the analytics
#database partition, if configured amount of space is not present, it will fail provisioning
minimum_diskGB = 256
#OPTIONAL BONDING CONFIGURATION
#==============================
#Inferface Bonding
#bond= {
# host2 : { 'name': 'bond0', 'member': ['p2p0p0','p2p0p1','p2p0p2','p2p0p3'], 'mode': '802.3ad', 'xmit_hash_policy': 'layer3+4' },
# host5 : { 'name': 'bond0', 'member': ['p4p0p0','p4p0p1','p4p0p2','p4p0p3'], 'mode': '802.3ad', 'xmit_hash_policy': 'layer3+4' },
#}
#OPTIONAL SEPARATION OF MANAGEMENT AND CONTROL + DATA and OPTIONAL VLAN INFORMATION
#==================================================================================
control_data = {
host1 : { 'ip': '11.11.11.11/24', 'gw' : '11.11.11.254', 'device':'eth1' },
host2 : { 'ip': '11.11.11.12/24', 'gw' : '11.11.11.254', 'device':'eth1' },
host3 : { 'ip': '11.11.11.13/24', 'gw' : '11.11.11.254', 'device':'eth1' },
host4 : { 'ip': '11.11.11.14/24', 'gw' : '11.11.11.254', 'device':'eth1' },
# host3 : { 'ip': '192.168.10.3/24', 'gw' : '192.168.10.254', 'device':'eth0', 'vlan': '224' },
# host4 : { 'ip': '192.168.10.4/24', 'gw' : '192.168.10.254', 'device':'eth3', 'vlan': '224' },
# host5 : { 'ip': '192.168.10.5/24', 'gw' : '192.168.10.254', 'device':'bond0', 'vlan': '224' },
# host6 : { 'ip': '192.168.10.6/24', 'gw' : '192.168.10.254', 'device':'eth0', 'vlan': '224' },
# host7 : { 'ip': '192.168.10.7/24', 'gw' : '192.168.10.254', 'device':'eth1', 'vlan': '224' },
# host8 : { 'ip': '192.168.10.8/24', 'gw' : '192.168.10.254', 'device':'eth1', 'vlan': '224' },
}
#OPTIONAL STATIC ROUTE CONFIGURATION
#===================================
static_route = {
host1 : [{ 'ip': '11.12.11.0', 'netmask' : '255.255.255.0', 'gw':'11.11.11.254', 'intf': 'eth1' }],
host2 : [{ 'ip': '11.12.11.0', 'netmask' : '255.255.255.0', 'gw':'11.11.11.254', 'intf': 'eth1' }],
host3 : [{ 'ip': '11.12.11.0', 'netmask' : '255.255.255.0', 'gw':'11.11.11.254', 'intf': 'eth1' }],
host4 : [{ 'ip': '11.12.11.0', 'netmask' : '255.255.255.0', 'gw':'11.11.11.254', 'intf': 'eth1' }],
# host2 : [{ 'ip': '10.1.1.0', 'netmask' : '255.255.255.0', 'gw':'192.168.10.254', 'intf': 'bond0' },
# { 'ip': '10.1.2.0', 'netmask' : '255.255.255.0', 'gw':'192.168.10.254', 'intf': 'bond0' }],
# host5 : [{ 'ip': '10.1.1.0', 'netmask' : '255.255.255.0', 'gw':'192.168.10.254', 'intf': 'bond0' }],
}
#storage compute disk config
#storage_node_config = {
# host4 : { 'disks' : ['/dev/sdc', '/dev/sdd'], 'journal' : ['/dev/sde', '/dev/sdf'] },
# host5 : { 'disks' : ['/dev/sdc:/dev/sde', '/dev/sdd:/dev/sde'], 'ssd-disks' : ['/dev/sdf', '/dev/sdg'] },
# host6 : { 'disks' : ['/dev/sdc', '/dev/sdd'], 'local-disks' : ['/dev/sde'], 'local-ssd-disks' : ['/dev/sdf'] },
# host7 : { 'nfs' : ['10.10.10.10:/nfs', '11.11.11.11:/nfs']},
#}
#
#Set Storage replica
#storage_replica_size = 3
#Base Openstack live migration configuration.
#live_migration = True
#Fix uid/gid for nova/libvirt-qemu so the ids are same across all nodes.
#nova_uid_fix = True
#Following are NFS based live migration configuration
#Enable this for External NFS server based live migration
#ext_nfs_livem = True
#ext_nfs_livem_mount = '11.1.0.1:/nfsvol'
#Enable this for Ceph based NFS VM server based live migration
#ceph_nfs_livem = True
#ceph_nfs_livem_subnet = '192.168.10.253/24'
#ceph_nfs_livem_image = '/ubuntu/livemnfs.qcow2'
#ceph_nfs_livem_host = host4
#To disable installing contrail interface rename package
#env.interface_rename = False
#Path where the CA certificate file is stored on the node where fab is run.
#Fab copies the file to node where TOR agent is run.
#This is optional and is required only when tor_ovs_protocol is pssl.
#The certificates on the TOR are based on this CA cert.
#env.ca_cert_file = '/root/file.pem'
#In environments where keystone is deployed outside of Contrail provisioning
#scripts , you can use the below options
#
# Note :
# "insecure" is applicable only when protocol is https
# The entries in env.keystone overrides the below options which used
# to be supported earlier :
# service_token
# keystone_ip
# keystone_admin_user
# keystone_admin_password
# region_name
#
#env.keystone = {
# 'keystone_ip' : 'x.y.z.a',
# 'auth_protocol' : 'http', #Default is http
# 'auth_port' : '35357', #Default is 35357
# 'admin_token' : '33c57636fbc2c5552fd2', #admin_token in keystone.conf
# 'admin_user' : 'admin', #Default is admin
# 'admin_password' : 'contrail123', #Default is contrail123
# 'nova_password' : 'contrail123', #Default is the password set in admin_password
# 'neutron_password': 'contrail123', #Default is the password set in admin_password
# 'service_tenant' : 'service', #Default is service
# 'admin_tenant' : 'admin', #Default is admin
# 'region_name' : 'RegionOne', #Default is RegionOne
# 'insecure' : 'True', #Default = False
# 'manage_neutron' : 'no', #Default = 'yes' , Does configure neutron user/role in keystone required.
#}
#
#env.nova = {
# 'cpu_mode': 'host-passthrough', # Possible options: none, host-passthrough, host-model, and custom
# # if cpu_mode is 'custom' specify cpu_model option too
# 'cpu_model': 'Nehalem', # relevant only if cpu_mode is 'custom'
#}
# In Openstack or Contrail High Availability setups.
# internal_vip : Virtual IP of the openstack HA Nodes in the data/control(internal) nerwork,
# all the Openstack services behind this VIP are accessed using this VIP.
# external_vip : Virtual IP of the Openstack HA Nodes in the management(external) nerwork,
# Openstack dashboard and novncproxy services behind this VIP are accessed using this VIP.
# contrail_internal_vip : Virtual IP of the Contrail HA Nodes in the data/control(internal) nerwork,
# all the Contrail services behind this VIP is accessed using this VIP.
# contrail_external_vip : Virtual IP of the Contrail HA Nodes in the management(external) nerwork,
# Contrail introspects are are accessed using this VIP.
# nfs_server : NFS server to be used to store the glance images.
# nfs_glance_path : NFS server image path, which will be mounted on the Openstack Nodes and
# the glance images will be placed/accesed in/from this location.
# internal_virtual_router_id : Virtual router ID for the Openstack HA nodes in control/data(internal) network.
# external_virtual_router_id : Virtual router ID for the Openstack HA nodes in management(external) network.
# contrail_internal_virtual_router_id : Virtual router ID for the Contrail HA nodes in control/data(internal) network.
# contrail_external_virtual_router_id : Virtual router ID for the Contrail HA nodes in management(external) network.
#env.ha = {
# 'internal_vip' : '1.1.1.100', #Internal Virtual IP of the openstack HA Nodes.
# 'external_vip' : '2.2.2.200', #External Virtual IP of the openstack HA Nodes.
# 'contrail_internal_vip' : '1.1.1.10', #Internal Virtual IP of the contrail HA Nodes.
# 'contrail_external_vip' : '2.2.2.20', #External Virtual IP of the contrail HA Nodes.
# 'nfs_server' : '3.3.3.3', #IP address of the NFS Server which will be mounted to /var/lib/glance/images of openstack Node, Defaults to env.roledefs['compute'][0]
# 'nfs_glance_path' : '/var/tmp/images/', #NFS Server path to save images, Defaults to /var/tmp/glance-images/
# 'internal_virtual_router_id' : 180, #Default = 100
# 'external_virtual_router_id' : 190, #Default = 100
# 'contrail_internal_virtual_router_id' : 200, #Default = 100
# 'contrail_external_virtual_router_id' : 210, #Default = 100
#}
# In environments where openstack services are deployed independently
# from contrail, you can use the below options
# service_token : Common service token for for all services like nova,
# neutron, glance, cinder etc
# amqp_host : IP of AMQP Server to be used in openstack
# manage_amqp : Default = 'no', if set to 'yes' provision's amqp in openstack nodes and
# openstack services uses the amqp in openstack nodes instead of config nodes.
# amqp_host is neglected if manage_amqp is set
#
#env.openstack = {
# 'service_token' : '33c57636fbc2c5552fd2', #Common service token for for all openstack services
# 'amqp_host' : '10.204.217.19', #IP of AMQP Server to be used in openstack
# 'manage_amqp' : 'yes', #Default no, Manage seperate AMQP for openstack services in openstack nodes.
# 'osapi_compute_workers' : 40, #Default 40, For low memory system reduce the osapi compute workers thread.
# 'conductor_workers' : 40, #Default 40, For low memory system reduce the conductor workers thread.
#}
# Link-Local Metadata Service
# By default fab scripts will retrieve metadata secret from openstack node.
# To override, Specify Metadata proxy secret from Openstack node
#neutron_metadata_proxy_shared_secret = <secret>
#To enable multi-tenancy feature
multi_tenancy = True
#To enable haproxy feature
#haproxy = True
#To Enable prallel execution of task in multiple nodes
do_parallel = True
# To configure the encapsulation priority. Default: MPLSoGRE
env.encap_priority = "'VXLAN','MPLSoUDP','MPLSoGRE'"
# Optional proxy settings.
# env.http_proxy = os.environ.get('http_proxy')
#To enable LBaaS feature
# Default Value: False
env.enable_lbaas = True
# Ceilometer enable/disable installation and provisioning
# Default Value: False
#enable_ceilometer = True
# Ceilometer polling interval for meters in seconds
# Default Value: 600
#ceilometer_polling_interval = 600
# Ceilometer data TTL in seconds
# Default Value: 7200
#ceilometer_ttl = 7200
#OPTIONAL REMOTE SYSLOG CONFIGURATION
#===================================
#For R1.10 this needs to be specified to enable rsyslog.
#For Later releases this would be enabled as part of provisioning,
#with following default values.
#
#port = 19876
#protocol = tcp
#collector = dynamic i.e. rsyslog clients will connect to servers in a round
# robin fasion. For static collector all clients will
# connect to a single collector. static - is a test
# only option.
#status = enable
#
#env.rsyslog_params = {'port':19876, 'proto':'tcp', 'collector':'dynamic', 'status':'enable'}
#OPTIONAL Virtual gateway CONFIGURATION
#=======================================
#Section vgw is only relevant when you want to use virtual gateway feature.
#You can use one of your compute node as gateway .
#Definition for the Key used
#-------------------------------------
#vn: Virtual Network fully qualified name. This particular VN will be used by VGW.
#ipam-subnets: Subnets used by vn. It can be single or multiple
#gateway-routes: If any route is present then only those routes will be published
#by VGW or Default route (0.0.0.0) will be published
#env.vgw = {host4: {'vgw1':{'vn':'default-domain:admin:public:public', 'ipam-subnets': ['10.204.220.128/29', '10.204.220.136/29', 'gateway-routes': ['8.8.8.0/24', '1.1.1.0/24']}]},
# 'vgw2':{'vn':'default-domain:admin:public1:public1', 'ipam-subnets': ['10.204.220.144/29']}},
# host5: {'vgw2':{'vn':'default-domain:admin:public1:public1', 'ipam-subnets': ['10.204.220.144/29']}}
# }
#OPTIONAL optional tor agent and tsn CONFIGURATION
#==================================================
#Section tor agent is only relevant when you want to use Tor Agent feature.
#You can use one of your compute node as Tor Agent . Same or diffrent compute
#node should be enable as tsn
#Definition for the Key used
#-------------------------------------
# tor_ip: IP of the tor switch
# tor_agent_id: Unique Id of the tor switch to identify. Typicaly a numeric value.
# tor_agent_name: Unique name for TOR Agent. This is an optional field. If this is
# not specified, name used will be <hostname>-<tor_agent_id>
# tor_type: Always ovs
# tor_ovs_port: Port number to be used by ovs. If any redundant TOR Agent is
# specified for this tor-agent, it should have the same 'tor_ovs_port'
# tor_ovs_protocol: Connection protocol between TOR Agent and TOR (tcp / pssl)
# tor_tsn_ip: TSN node ip
# tor_tsn_name: Name of the TSN node
# tor_name: Name of the tor switch. If any redundant TOR Agent is specified for
# this tor-agent, it should have the same 'tor_name'
# tor_tunnel_ip: Data plane IP for the tor switch
# tor_vendor_name: Vendor type for TOR switch
# tor_product_name: Product name of TOR switch. This is an optional field.
# tor_agent_http_server_port: HTTP server port. Same will be used by tor agent for introspect
# tor_agent_ovs_ka: Tor Agent OVSDB keepalive timer in milli seconds
#
env.tor_agent = {host3:[{
'tor_ip':'11.12.11.14',
'tor_agent_id':'1',
'tor_agent_name':'host3-1',
'tor_type':'ovs',
'tor_ovs_port':'9999',
'tor_ovs_protocol':'tcp',
'tor_tsn_ip':'11.11.11.12',
'tor_tsn_name':'host3',
'tor_name':'GZ-LAB-QFX5100-A',
'tor_tunnel_ip':'11.12.11.252',
'tor_vendor_name':'Juniper',
'tor_product_name':'QFX5100',
'tor_agent_http_server_port': '9010',
'tor_agent_ovs_ka': '10000',
}]
}
#######################################
#vcenter provisioning
#server is the vcenter server ip
#port is the port on which vcenter is listening for connection
#username is the vcenter username credentials
#password is the vcenter password credentials
#auth is the autentication type used to talk to vcenter, http or https
#datacenter is the datacenter name we are operating on
#cluster is the list of clusters we are operating on
#dvswitch section contains distributed switch related para,s
# dv_switch_name
#dvportgroup section contains the distributed port group info
# dv_portgroupname and the number of ports the group has
######################################
#env.vcenter = {
# 'server':'127.0.0.1',
# 'port': '443',
# 'username': 'administrator@vsphere.local',
# 'password': 'Contrail123!',
# 'auth': 'https',
# 'datacenter': 'kd_dc',
# 'cluster': ['kd_cluster_1','kd_cluster_2'],
# 'dv_switch': { 'dv_switch_name': 'kd_dvswitch',
# },
# 'dv_port_group': { 'dv_portgroup_name': 'kd_dvportgroup',
# 'number_of_ports': '3',
# },
#}
#
####################################################################################
# The compute vm provisioning on ESXI host
# This section is used to copy a vmdk on to the ESXI box and bring it up
# the contrailVM which comes up will be setup as a compute node with only
# vrouter running on it. Each host has an associated esxi to it.
#
# esxi_host information:
# ip: the esxi ip on which the contrailvm(host/compute) runs
# username: username used to login to esxi
# password: password for esxi
# fabric_vswitch: the name of the underlay vswitch that runs on esxi
# optional, defaults to 'vswitch0'
# fabric_port_group: the name of the underlay port group for esxi
# optional, defaults to contrail-fab-pg'
# uplinck_nic: the nic used for underlay
# optional, defaults to None
# data_store: the datastore on esxi where the vmdk is copied to
# cluster: name of the cluster to which this esxi is added
# contrail_vm information:
# uplink: The SRIOV or Passthrough PCI Id(04:10.1). If not provided
# will default to vmxnet3 based fabric uplink
# mac: the virtual mac address for the contrail vm
# host: the contrail_vm ip in the form of 'user@contrailvm_ip'
# vmdk: the absolute path of the contrail-vmdk used to spawn vm
# optional, if vmdk_download_path is specified
# vmdk_download_path: download path of the contrail-vmdk.vmdk used to spawn vm
# optional, if vmdk is specified
######################################################################################
#esxi_hosts = {
# 'esxi': {
# 'ip': '1.1.1.1',
# 'username': 'root',
# 'password': 'c0ntrail123',
# 'datastore': "/vmfs/volumes/ds1",
# 'cluster': "kd_cluster_1",
# 'contrail_vm': {
# 'mac': "00:50:56:05:ba:ba",
# 'host': "root@2.2.2.2",
# 'vmdk_download_path': "http://10.84.5.100/vmware/vmdk/ContrailVM-disk1.vmdk",
# }
# }
# OPTIONAL DPDK CONFIGURATION
# ===========================
# If some compute nodes should use DPDK vRouter version it has to be put in
# env.dpdk dictionary. The format is:
# env.dpdk = {
# host1: { 'huge_pages' : '50', 'coremask' : '0xf' },
# host2: { 'huge_pages' : '50', 'coremask' : '0,3-7' },
# }
# huge_pages - Specify what percentage of host memory should be reserved
# for access with huge pages
# coremask - Specify CPU affinity mask to run vRouter with. Supported formats:
# hexadecimal, comma-sepparated list of CPUs, dash-separated range
# of CPUs.
# OPTIONAL vrouter limit parameter
# ==================================
#env.vrouter_module_params = {
# host4:{'mpls_labels':'131072', 'nexthops':'131072', 'vrfs':'65536', 'macs':'262144'},
# host5:{'mpls_labels':'131072', 'nexthops':'131072', 'vrfs':'65536', 'macs':'262144'}
#}
#
# OPTIONAL md5 key enabling
# There are 2 ways of enabling BGP md5 key on node apart from the webui.
# 1. Before provisioning the node, include an env dict in testbed.py as shown below specifying the desired key value # on the node. The key should be of type "string" only.
# 2. If md5 is not included in testbed.py and the node is already provisioned, you can run the
# contrail-controller/src/config/utils/provision_control.py script with a newly added argument for md5
# The below env dict is for first method specified, where you include a dict in testbed.py as shown below:
# env.md5 = {
# host1: 'juniper',
# host2: 'juniper',
# host3: 'juniper',
# }
# 'juniper' is the md5 key that will be configured on the nodes.
| 44.463687
| 186
| 0.637769
|
78ce4fd09d550c82993fa65822995e5fa9c3153f
| 2,638
|
py
|
Python
|
examples/adspygoogle/dfp/v201208/update_placements.py
|
krux/adspygoogle
|
6505a71122f45fe3e675f27f2c29f67a1768069b
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
examples/adspygoogle/dfp/v201208/update_placements.py
|
krux/adspygoogle
|
6505a71122f45fe3e675f27f2c29f67a1768069b
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
examples/adspygoogle/dfp/v201208/update_placements.py
|
krux/adspygoogle
|
6505a71122f45fe3e675f27f2c29f67a1768069b
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 2
|
2020-04-02T19:00:31.000Z
|
2020-08-06T03:28:38.000Z
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates all placements to allow for AdSense targeting up to
the first 500. To determine which placements exist,
run get_all_placements.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
placement_service = client.GetService('PlacementService', version='v201208')
# Create a statement to select first 500 placements.
filter_statement = {'query': 'LIMIT 500'}
# Get placements by statement.
response = placement_service.GetPlacementsByStatement(filter_statement)[0]
placements = []
if 'results' in response:
placements = response['results']
if placements:
# Update each local placement object by enabling AdSense targeting.
for placement in placements:
if not placement['targetingDescription']:
placement['targetingDescription'] = 'Generic description'
placement['targetingAdLocation'] = 'All images on sports pages.'
placement['targetingSiteName'] = 'http://code.google.com'
placement['isAdSenseTargetingEnabled'] = 'true'
# Update placements remotely.
placements = placement_service.UpdatePlacements(placements)
# Display results.
if placements:
for placement in placements:
ad_unit_ids = ''
if 'targetedAdUnitIds' in placement:
ad_unit_ids = ', '.join(placement['targetedAdUnitIds'])
print ('Placement with id \'%s\', name \'%s\', and AdSense targeting '
'enabled \'%s\' was updated.'
% (placement['id'], placement['name'],
placement['isAdSenseTargetingEnabled']))
else:
print 'No placements were updated.'
else:
print 'No placements found to update.'
| 35.648649
| 80
| 0.720243
|
a6b1b256a5b669536fe3eccdd5e397027ecb1931
| 2,348
|
py
|
Python
|
fits/ShowHeader.py
|
sniemi/SamPy
|
e048756feca67197cf5f995afd7d75d8286e017b
|
[
"BSD-2-Clause"
] | 5
|
2016-05-28T14:12:28.000Z
|
2021-04-22T10:23:12.000Z
|
fits/ShowHeader.py
|
sniemi/SamPy
|
e048756feca67197cf5f995afd7d75d8286e017b
|
[
"BSD-2-Clause"
] | null | null | null |
fits/ShowHeader.py
|
sniemi/SamPy
|
e048756feca67197cf5f995afd7d75d8286e017b
|
[
"BSD-2-Clause"
] | 2
|
2015-07-13T10:04:10.000Z
|
2021-04-22T10:23:23.000Z
|
"""
Extremely simple script that prints out a FITS header to stdout.
Defaults to the 0th extension if not specified.
Accepts wildcard in the name, but then the filename must be given inside quote marks i.e. "*.fits"
:date: Mar 27, 2009
:author: Sami-Matias Niemi
:contact: sniemi@email.unc.edu
"""
import sys
import pyfits as PF
__author__ = 'Sami-Matias Niemi'
__version__ = '1.0'
def containsAny(str, set):
"""
Checks if a given string contains any of the characters in a given set.
:param str: input string
:type str: string
:param set: set if characters
:type set: string
:rtype: boolean
"""
for c in set:
if c in str: return True
return False
def containsAll(str, set):
"""
Checks if a given string contains all characters in a given set.
:param str: input string
:type: string
:param set: set if characters
:type: string
:rtype: boolean
"""
for c in set:
if c not in str: return False
return True
def showHeader(filename, extension):
"""
Shows the FITS header of a given file.
:note: Ignores missing END, for non-standard FITS files.
:param filename: name of the file
:type filename: string
:param extension: number of the FITS extension
:type extension: integer
"""
try:
if containsAny(filename, '*'):
print 'A wildcard detected..\n'
import glob
files = glob.glob(filename)
for file in files:
hdulist = PF.open(file, ignore_missing_end=True)
hd = hdulist[extension].header
hdulist.close()
print 'Header extension %i of %s' % (extension, file)
print hd
print
else:
hdulist = PF.open(filename, ignore_missing_end=True)
hd = hdulist[extension].header
hdulist.close()
print
print hd
except:
sys.exit('\nError while opening file %s and reading extensions %i' % (filename, extension))
if __name__ == "__main__":
try:
filename = sys.argv[1]
extension = int(sys.argv[2])
except:
print '\nNo header extension given, will print the first extension header of file: %s\n' % filename
extension = 0
showHeader(filename, extension)
| 26.088889
| 107
| 0.61201
|
80477249a567d1c8be69e4196d8693e9c474c1ed
| 85
|
py
|
Python
|
alchemist_lib/broker/__init__.py
|
Dodo33/alchemist-lib
|
40c2d3b48d5f46315eb09e7f572d578b7e5324b4
|
[
"MIT"
] | 5
|
2018-07-11T05:38:51.000Z
|
2021-12-19T03:06:51.000Z
|
alchemist_lib/broker/__init__.py
|
Dodo33/alchemist-lib
|
40c2d3b48d5f46315eb09e7f572d578b7e5324b4
|
[
"MIT"
] | null | null | null |
alchemist_lib/broker/__init__.py
|
Dodo33/alchemist-lib
|
40c2d3b48d5f46315eb09e7f572d578b7e5324b4
|
[
"MIT"
] | 2
|
2019-07-12T08:51:11.000Z
|
2021-09-29T22:22:46.000Z
|
from .poloniexbroker import PoloniexBroker
from .bittrexbroker import BittrexBroker
| 21.25
| 42
| 0.870588
|
41ea2b9c5a86e4ad3b8eb7571cdc243704514c62
| 795
|
py
|
Python
|
canopy/load_config.py
|
CanopySimulations/canopy-python
|
9ec37e674e65d6fbef0402ac0c612c163d55631e
|
[
"MIT"
] | null | null | null |
canopy/load_config.py
|
CanopySimulations/canopy-python
|
9ec37e674e65d6fbef0402ac0c612c163d55631e
|
[
"MIT"
] | 1
|
2022-01-31T10:18:08.000Z
|
2022-01-31T10:18:08.000Z
|
canopy/load_config.py
|
CanopySimulations/canopy-python
|
9ec37e674e65d6fbef0402ac0c612c163d55631e
|
[
"MIT"
] | null | null | null |
from typing import Optional
import canopy
async def load_config(
session: canopy.Session,
config_id: str,
tenant_id: Optional[str] = None,
sub_tree_path: Optional[str] = None,
sim_version: Optional[str] = None) -> canopy.ConfigResult:
session.authentication.authenticate()
if tenant_id is None:
tenant_id = session.authentication.tenant_id
config_api = canopy.openapi.ConfigApi(session.async_client)
config_result: canopy.openapi.GetConfigQueryResult = await config_api.config_get_config(
tenant_id,
config_id,
**canopy.defined_kwargs(
sim_version=sim_version,
sub_tree_path=sub_tree_path))
return canopy.ConfigResult(config_result.config, config_result.user_information)
| 29.444444
| 92
| 0.703145
|
43bc52c78a2510151693809f7fe4c68ded6ddcc4
| 2,347
|
py
|
Python
|
line_chart.py
|
atik2002/HackCBS4.0
|
bb0f954bacf3047e3d0856a42de83cecf8758bb1
|
[
"MIT"
] | null | null | null |
line_chart.py
|
atik2002/HackCBS4.0
|
bb0f954bacf3047e3d0856a42de83cecf8758bb1
|
[
"MIT"
] | null | null | null |
line_chart.py
|
atik2002/HackCBS4.0
|
bb0f954bacf3047e3d0856a42de83cecf8758bb1
|
[
"MIT"
] | null | null | null |
import pandas as pd
import plotly.express as px
import numpy as np
'''
def data_clean(state, city):
state_name=state
city_name=city
dfa=pd.read_csv(f"data/ahs-cab2014-{state_name.lower()}-{city_name.lower()}.csv")
s=["state_code","district_code","rural_urban","stratum","test_salt_iodine","record_code_iodine","Sex","usual_residance","Age_Code","Age","date_of_birth","month_of_birth","year_of_birth","Weight_in_kg","Length_height_cm","Haemoglobin_test","Haemoglobin_level","BP_systolic","BP_systolic_2_reading","BP_Diastolic","BP_Diastolic_2reading","Pulse_rate","Pulse_rate_2_reading","Diabetes_test","fasting_blood_glucose_mg_dl","Marital_status","gauna_perfor_not_perfor","duration_pregnanacy","first_breast_feeding","is_cur_breast_feeding","day_or_mn_for_breast_feeding_cd","day_or_month_for_breast_feeding","water_month","ani_milk_month","semisolid_month_or_day","solid_month","vegetables_month_or_day","illness_type","illness_duration"]
df=dfa[s].sort_values("year_of_birth")
return df
'''
def avg_height(dataset, gender):
dataset=dataset[dataset["Sex"]==str(gender.capitalize())]
year_list=list(dataset[dataset["year_of_birth"]<=1995]["year_of_birth"].sort_values().unique())
avg_h=[np.mean(dataset[dataset["year_of_birth"]==i]["Length_height_cm"]) for i in year_list]
height_age_df=pd.DataFrame({"Year":year_list, f"Average_height_{gender}":avg_h})
return height_age_df.dropna()
def avg_weight(dataset,gender):
dataset=dataset[dataset["Sex"]==str(gender.capitalize())]
year_list=list(dataset[dataset["year_of_birth"]<=1995]["year_of_birth"].sort_values().unique())
avg_w=[np.mean(dataset[dataset["year_of_birth"]==i]["Weight_in_kg"]) for i in year_list]
weight_age_df=pd.DataFrame({"Year":year_list, f"Average_weight_{gender}":avg_w})
return weight_age_df.dropna()
def avg_male_female_height(dataset):
datafr=pd.DataFrame({"Year": avg_height(dataset,"male")["Year"], "Male": avg_height(dataset, "male")["Average_height_male"], "Female": avg_height(dataset, "female")["Average_height_female"]})
return datafr
def avg_male_female_weight(dataset):
datafr=pd.DataFrame({"Year": avg_weight(dataset, "male")["Year"], "Male": avg_weight(dataset, "male")["Average_weight_male"], "Female": avg_weight(dataset, "female")["Average_weight_female"]})
return datafr
| 61.763158
| 732
| 0.757989
|
1baaad6877c54891d04b4a077b8b9564d2ef383f
| 2,856
|
py
|
Python
|
tdmelodic/filters/postprocess_modify_unigram_cost.py
|
tachi-hi/tdmelodic
|
9b146c9c788eb730855376c86a8b0eab4d8438ca
|
[
"BSD-3-Clause"
] | 73
|
2020-09-18T06:37:11.000Z
|
2022-03-15T12:40:47.000Z
|
tdmelodic/filters/postprocess_modify_unigram_cost.py
|
tachi-hi/tdmelodic
|
9b146c9c788eb730855376c86a8b0eab4d8438ca
|
[
"BSD-3-Clause"
] | 4
|
2021-02-06T14:52:39.000Z
|
2021-11-04T10:02:45.000Z
|
tdmelodic/filters/postprocess_modify_unigram_cost.py
|
tachi-hi/tdmelodic
|
9b146c9c788eb730855376c86a8b0eab4d8438ca
|
[
"BSD-3-Clause"
] | 9
|
2020-09-18T14:49:15.000Z
|
2022-03-18T02:53:54.000Z
|
# -----------------------------------------------------------------------------
# Copyright (c) 2019-, PKSHA Technology Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# -----------------------------------------------------------------------------
# -*- coding: utf-8 -*-
import sys
import os
import argparse
import csv
import copy
from tqdm import tqdm
from tdmelodic.util.dic_index_map import get_dictionary_index_map
from tdmelodic.util.util import count_lines
# unigram costなどを後処理で微調整するためのスクリプト
IDX_MAP = get_dictionary_index_map("unidic")
def avoid_overflow(line, cost, INT16_MIN = -32768, INT16_MAX = 32767):
"""avoid overflow (signed short int)"""
cost = INT16_MAX if cost > INT16_MAX else INT16_MIN if cost < INT16_MIN else cost
line[IDX_MAP["COST"]] = str(cost)
return line, cost
def modify_unigram_cost(line, verbose=True):
cost = int(line[IDX_MAP["COST"]])
# 数詞のコストを必要に応じて調整する
if (line[IDX_MAP["SURFACE"]][0] in [str(i) for i in range(10)]) and len(line[1]) >= 2:
cost = cost - 5000
# 人名のコストを必要に応じて調整する
elif line[IDX_MAP["POS1"]] == "名詞" and line[IDX_MAP["POS2"]] == "固有名詞" and line[IDX_MAP["POS3"]] == "人名":
cost = cost + 5000
else:
# 必要であればその他の単語のコストも全体的に高めるなど
# (例えばUniDicに同じ単語がある場合はUniDicを優先させるなど)
pass
#cost = cost + 10000
line, cost = avoid_overflow(line, cost)
return line
# ------------------------------------------------------------------------------------
def main_(fp_in, fp_out):
L = count_lines(fp_in)
for i, line in enumerate(tqdm(csv.reader(fp_in), total=L)):
# unigram cost を調整する
line_modified = modify_unigram_cost(copy.deepcopy(line))
if i % 100000 == 0:
print(i)
print("before", line, file=sys.stderr)
print("after", line_modified, file=sys.stderr)
# output
line = ','.join(line_modified) + '\n'
fp_out.write(line)
print("Complete!", file=sys.stderr)
return
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-i',
'--input',
nargs='?',
type=argparse.FileType("r"),
default=sys.stdin,
help='input CSV file (NEologd dicitionary file) <default=STDIN>')
parser.add_argument(
'-o',
'--output',
nargs='?',
type=argparse.FileType("w"),
default=sys.stdout,
help='output CSV file <default=STDOUT>')
args = parser.parse_args()
if args.input == args.output:
print("[ Error ] intput and output files should be different.")
else:
try:
main_(args.input, args.output)
except Exception as e:
print(e)
if __name__ == '__main__':
main()
| 29.75
| 109
| 0.573529
|
7cde1a5689e1e8bb5a38e43a03c47de725e697fe
| 682
|
py
|
Python
|
venv/lib/python3.8/site-packages/django/conf/locale/ga/formats.py
|
Joshua-Barawa/My-Photos
|
adcaea48149c6b31e9559b045709d538d0b749bc
|
[
"PostgreSQL",
"Unlicense"
] | 16
|
2019-08-10T12:24:06.000Z
|
2020-05-21T09:11:14.000Z
|
venv/lib/python3.8/site-packages/django/conf/locale/ga/formats.py
|
Joshua-Barawa/My-Photos
|
adcaea48149c6b31e9559b045709d538d0b749bc
|
[
"PostgreSQL",
"Unlicense"
] | 12
|
2019-08-10T11:55:29.000Z
|
2020-05-21T04:46:30.000Z
|
venv/lib/python3.8/site-packages/django/conf/locale/ga/formats.py
|
Joshua-Barawa/My-Photos
|
adcaea48149c6b31e9559b045709d538d0b749bc
|
[
"PostgreSQL",
"Unlicense"
] | 4
|
2022-03-12T10:17:00.000Z
|
2022-03-26T08:40:43.000Z
|
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j F Y"
TIME_FORMAT = "H:i"
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = "j F"
SHORT_DATE_FORMAT = "j M Y"
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = "."
THOUSAND_SEPARATOR = ","
# NUMBER_GROUPING =
| 31
| 78
| 0.766862
|
4c591f76f4480b03bfa938e5d06a60c86d868dce
| 1,512
|
py
|
Python
|
behavioral_patterns/pub;ish_subscribe/app.py
|
Stihotvor/python3_patterns
|
01627a2506d81982d9805ff6b1cda14ba214ee2e
|
[
"MIT"
] | null | null | null |
behavioral_patterns/pub;ish_subscribe/app.py
|
Stihotvor/python3_patterns
|
01627a2506d81982d9805ff6b1cda14ba214ee2e
|
[
"MIT"
] | null | null | null |
behavioral_patterns/pub;ish_subscribe/app.py
|
Stihotvor/python3_patterns
|
01627a2506d81982d9805ff6b1cda14ba214ee2e
|
[
"MIT"
] | null | null | null |
from publisher import Publisher
from subscriber import Subscriber
class Broker:
def __init__(self):
self.topics = set()
self.subscriptions = {}
self.message_queue = {}
def create_topic(self, topic_name):
self.topics.add(topic_name)
def subscribe(self, sub_name, topic_name):
if topic_name in self.topics:
self.subscriptions[topic_name] = sub_name
self.message_queue[sub_name] = []
else:
raise Exception('Topic does not exist')
def publish(self, topic_name, msg):
if (topic_name in self.topics) and self.subscriptions[topic_name]:
self.message_queue[self.subscriptions[topic_name]].append(msg)
else:
raise Exception('Topic or subscription does not exist')
def pull(self, sub_name):
if self.message_queue.get(sub_name):
if len(self.message_queue[sub_name]) == 0:
return []
else:
return self.message_queue[sub_name].pop(0)
else:
raise Exception('Subscription does not exist')
if __name__ == '__main__':
topic = 'topic'
subscription = 'sub'
mb = Broker()
pub = Publisher(broker=mb, topic_name=topic)
sub = Subscriber(broker=mb, topic_name=topic, sub_name=subscription)
pub.create_topic()
print(mb.topics)
sub.subscribe()
print(mb.subscriptions)
print(mb.message_queue)
pub.publish('Hi there')
message = sub.pull()
print(message)
| 30.24
| 74
| 0.628968
|
6fd3bb35b2d0d5bfdd48615828f6272c65dc2a64
| 2,839
|
py
|
Python
|
sdk/servicefabric/azure-mgmt-servicefabric/azure/mgmt/servicefabric/models/application_type_version_resource.py
|
vchske/azure-sdk-for-python
|
6383ed3676b7355af7be394562b126209961ec13
|
[
"MIT"
] | null | null | null |
sdk/servicefabric/azure-mgmt-servicefabric/azure/mgmt/servicefabric/models/application_type_version_resource.py
|
vchske/azure-sdk-for-python
|
6383ed3676b7355af7be394562b126209961ec13
|
[
"MIT"
] | 1
|
2019-06-04T18:12:16.000Z
|
2019-06-04T18:12:16.000Z
|
sdk/servicefabric/azure-mgmt-servicefabric/azure/mgmt/servicefabric/models/application_type_version_resource.py
|
vchske/azure-sdk-for-python
|
6383ed3676b7355af7be394562b126209961ec13
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_resource import ProxyResource
class ApplicationTypeVersionResource(ProxyResource):
"""An application type version resource for the specified application type
name resource.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource identifier.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: Azure resource location.
:type location: str
:param tags: Azure resource tags.
:type tags: dict[str, str]
:ivar etag: Azure resource etag.
:vartype etag: str
:ivar provisioning_state: The current deployment or provisioning state,
which only appears in the response
:vartype provisioning_state: str
:param app_package_url: Required. The URL to the application package
:type app_package_url: str
:ivar default_parameter_list: List of application type parameters that can
be overridden when creating or updating the application.
:vartype default_parameter_list: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
'app_package_url': {'required': True},
'default_parameter_list': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'app_package_url': {'key': 'properties.appPackageUrl', 'type': 'str'},
'default_parameter_list': {'key': 'properties.defaultParameterList', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(ApplicationTypeVersionResource, self).__init__(**kwargs)
self.provisioning_state = None
self.app_package_url = kwargs.get('app_package_url', None)
self.default_parameter_list = None
| 38.890411
| 94
| 0.620993
|
efad26f0f718db3c7a62292d5152445291f69361
| 436
|
py
|
Python
|
zucchini/penalizers/__init__.py
|
zucchini/zucchini
|
eedd3d8020ec3e80b7c218785379ceb32bc05e38
|
[
"Apache-2.0"
] | 3
|
2018-03-27T18:09:54.000Z
|
2021-04-08T03:03:55.000Z
|
zucchini/penalizers/__init__.py
|
zucchini/zucchini
|
eedd3d8020ec3e80b7c218785379ceb32bc05e38
|
[
"Apache-2.0"
] | 337
|
2017-12-17T13:22:26.000Z
|
2022-03-28T02:05:09.000Z
|
zucchini/penalizers/__init__.py
|
zucchini/zucchini
|
eedd3d8020ec3e80b7c218785379ceb32bc05e38
|
[
"Apache-2.0"
] | 7
|
2018-01-10T18:46:26.000Z
|
2020-10-17T17:47:07.000Z
|
from .exceptions import InvalidPenalizerConfigError
from .penalizer_interface import PenalizerInterface
from .late_penalizer import LatePenalizer
from .checkoff_penalizer import CheckoffPenalizer
__all__ = ['InvalidPenalizerConfigError', 'PenalizerInterface',
'LatePenalizer', 'CheckoffPenalizer']
_PENALIZERS = (
LatePenalizer,
CheckoffPenalizer
)
AVAILABLE_PENALIZERS = {cls.__name__: cls for cls in _PENALIZERS}
| 31.142857
| 65
| 0.807339
|
781f28be0f777f53114a912b4a2346a8aebf8484
| 9,917
|
py
|
Python
|
tests/acceptance/test_artificial.py
|
acconeer/vunit
|
88f0d9ebb8fb9c3145ab5098604c1e7ddc53659d
|
[
"Artistic-2.0"
] | 60
|
2015-01-12T14:52:22.000Z
|
2021-11-17T17:11:21.000Z
|
tests/acceptance/test_artificial.py
|
acconeer/vunit
|
88f0d9ebb8fb9c3145ab5098604c1e7ddc53659d
|
[
"Artistic-2.0"
] | 84
|
2015-01-14T21:53:29.000Z
|
2016-01-24T13:07:03.000Z
|
tests/acceptance/test_artificial.py
|
LarsAsplund/vunit
|
40135d936f0e3d6a77bb2e360b0611c2e70721a9
|
[
"Artistic-2.0"
] | 28
|
2015-02-17T12:37:06.000Z
|
2020-03-09T01:02:54.000Z
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014-2019, Lars Asplund lars.anders.asplund@gmail.com
"""
Acceptance test of VUnit end to end functionality
"""
import unittest
from os.path import join, dirname
from os import environ
from subprocess import call
import sys
from tests.common import check_report
from vunit.sim_if.common import has_simulator, simulator_is
@unittest.skipUnless(has_simulator(), "Requires simulator")
class TestVunitArtificial(unittest.TestCase):
"""
Acceptance test of VUnit end to end functionality using
artificial test benches.
"""
def setUp(self):
if simulator_is("activehdl"):
self.output_path = join(dirname(__file__), "artificial_out")
else:
# Spaces in path intentional to verify that it is supported
self.output_path = join(dirname(__file__), "artificial _out")
self.report_file = join(self.output_path, "xunit.xml")
self.artificial_run_vhdl = join(
dirname(__file__), "artificial", "vhdl", "run.py"
)
self.artificial_run_verilog = join(
dirname(__file__), "artificial", "verilog", "run.py"
)
@unittest.skipUnless(
simulator_is("modelsim", "rivierapro"),
"Only simulators with persistance functionality",
)
def test_artificial_modelsim_unique_sim(self):
self._test_artificial(args=["--unique-sim"])
def test_artificial(self):
self._test_artificial()
def test_artificial_elaborate_only(self):
self.check(self.artificial_run_vhdl, exit_code=1, args=["--elaborate"])
elab_expected_report = []
for status, name in EXPECTED_REPORT:
if name in ("lib.tb_elab_fail.all",):
status = "failed"
else:
status = "passed"
elab_expected_report.append((status, name))
check_report(self.report_file, elab_expected_report)
self.check(
self.artificial_run_vhdl,
exit_code=0,
clean=False,
args=["--elaborate", "lib.tb_pass.all"],
)
check_report(self.report_file, [("passed", "lib.tb_pass.all")])
self.check(
self.artificial_run_vhdl,
exit_code=1,
clean=False,
args=["--elaborate", "lib.tb_elab_fail.all"],
)
check_report(self.report_file, [("failed", "lib.tb_elab_fail.all")])
def _test_artificial(self, args=None):
"""
Utility function to run and check the result of all test benches
using either persistent or non-persistent simulator interface mode
"""
self.check(self.artificial_run_vhdl, exit_code=1, args=args)
check_report(self.report_file, EXPECTED_REPORT)
def test_run_selected_tests_in_same_sim_test_bench_vhdl(self):
self._test_run_selected_tests_in_same_sim_test_bench(self.artificial_run_vhdl)
@unittest.skipUnless(simulator_is("modelsim"), "Only modelsim supports verilog")
def test_run_selected_tests_in_same_sim_test_bench_verilog(self):
self._test_run_selected_tests_in_same_sim_test_bench(
self.artificial_run_verilog
)
def _test_run_selected_tests_in_same_sim_test_bench(self, run_file):
"""
Run selected "same_sim" test in isolation
"""
self.check(
run_file, exit_code=0, clean=True, args=["*same_sim_some_fail*Test 1*"]
)
check_report(self.report_file, [("passed", "lib.tb_same_sim_some_fail.Test 1")])
self.check(
run_file, exit_code=1, clean=False, args=["*same_sim_some_fail*Test 2*"]
)
check_report(self.report_file, [("failed", "lib.tb_same_sim_some_fail.Test 2")])
self.check(
run_file, exit_code=0, clean=False, args=["*same_sim_some_fail*Test 3*"]
)
check_report(self.report_file, [("passed", "lib.tb_same_sim_some_fail.Test 3")])
self.check(
run_file,
exit_code=1,
clean=False,
args=["*same_sim_some_fail*Test 2*", "*same_sim_some_fail*Test 3*"],
)
check_report(
self.report_file,
[
("failed", "lib.tb_same_sim_some_fail.Test 2"),
("skipped", "lib.tb_same_sim_some_fail.Test 3"),
],
)
@unittest.skipUnless(simulator_is("modelsim"), "Only modelsim supports verilog")
def test_artificial_verilog(self):
self.check(self.artificial_run_verilog, exit_code=1)
check_report(
self.report_file,
[
("passed", "lib.tb_other_file_tests.pass"),
("failed", "lib.tb_other_file_tests.fail"),
("passed", "lib.tb_magic_paths.Test magic paths are correct"),
("passed", "lib.tb_with_define.test 1"),
("failed", "lib.tb_fail_on_warning.fail"),
("failed", "lib.tb_fail_on_fatal_and_early_finish.fatal0"),
("failed", "lib.tb_fail_on_fatal_and_early_finish.fatal1"),
("failed", "lib.tb_fail_on_fatal_and_early_finish.finish0"),
("failed", "lib.tb_fail_on_fatal_and_early_finish.finish1"),
("passed", "lib.tb_with_parameter_config.Test 0"),
("passed", "lib.tb_with_parameter_config.cfg.Test 1"),
("passed", "lib.tb_with_parameter_config.Test 2"),
("passed", "lib.tb_with_parameter_config.cfg.Test 3"),
("passed", "lib.tb_with_parameter_config.cfg.Test 4"),
("passed", "lib.tb_same_sim_all_pass.cfg.Test 1"),
("passed", "lib.tb_same_sim_all_pass.cfg.Test 2"),
("passed", "lib.tb_same_sim_all_pass.cfg.Test 3"),
("passed", "lib.tb_same_sim_some_fail.Test 1"),
("failed", "lib.tb_same_sim_some_fail.Test 2"),
("skipped", "lib.tb_same_sim_some_fail.Test 3"),
("passed", "lib.tb_with_runner.pass"),
("failed", "lib.tb_with_runner.fail"),
],
)
# pylint: disable=too-many-arguments
def check(self, run_file, args=None, clean=True, exit_code=0):
"""
Run external run file and verify exit code
"""
args = args if args is not None else []
new_env = environ.copy()
new_env["VUNIT_VHDL_STANDARD"] = "2008"
if clean:
args += ["--clean"]
retcode = call(
[
sys.executable,
run_file,
"--output-path=%s" % self.output_path,
"--xunit-xml=%s" % self.report_file,
]
+ args,
env=new_env,
)
self.assertEqual(retcode, exit_code)
def test_exit_0_flag(self):
self.check(self.artificial_run_vhdl, exit_code=1, args=["lib.tb_fail.all"])
self.check(
self.artificial_run_vhdl, exit_code=0, args=["--exit-0", "lib.tb_fail.all"]
)
EXPECTED_REPORT = (
("passed", "lib.tb_other_file_tests.pass"),
("failed", "lib.tb_other_file_tests.fail"),
("passed", "lib.tb_pass.all"),
("failed", "lib.tb_fail.all"),
("passed", "lib.tb_infinite_events.all"),
("failed", "lib.tb_fail_on_warning.all"),
("passed", "lib.tb_no_fail_on_warning.all"),
("passed", "lib.tb_with_vhdl_runner.pass"),
("passed", "lib.tb_with_vhdl_runner.Test with spaces"),
("failed", "lib.tb_with_vhdl_runner.fail"),
("failed", "lib.tb_with_vhdl_runner.Test that timeouts"),
("passed", "lib.tb_magic_paths.all"),
("passed", "lib.tb_no_fail_after_cleanup.all"),
("failed", "lib.tb_elab_fail.all"),
("passed", "lib.tb_same_sim_all_pass.cfg.Test 1"),
("passed", "lib.tb_same_sim_all_pass.cfg.Test 2"),
("passed", "lib.tb_same_sim_all_pass.cfg.Test 3"),
("passed", "lib.tb_same_sim_some_fail.Test 1"),
("failed", "lib.tb_same_sim_some_fail.Test 2"),
("skipped", "lib.tb_same_sim_some_fail.Test 3"),
("passed", "lib.tb_with_checks.Test passing check"),
("failed", "lib.tb_with_checks.Test failing check"),
("failed", "lib.tb_with_checks.Test non-stopping failing check"),
("passed", "lib.tb_set_generic.all"),
("passed", "lib.tb_with_generic_config.Test 0"),
("passed", "lib.tb_with_generic_config.cfg.Test 1"),
("passed", "lib.tb_with_generic_config.Test 2"),
("passed", "lib.tb_with_generic_config.cfg.Test 3"),
("passed", "lib.tb_with_generic_config.cfg.Test 4"),
("passed", "lib.tb_no_generic_override.all"),
("passed", "lib.tb_ieee_warning.pass"),
("failed", "lib.tb_ieee_warning.fail"),
(
"failed",
"lib.tb_assert_stop_level.Report warning when VHDL assert stop level = warning",
),
(
"failed",
"lib.tb_assert_stop_level.Report error when VHDL assert stop level = warning",
),
(
"failed",
"lib.tb_assert_stop_level.Report failure when VHDL assert stop level = warning",
),
(
"passed",
"lib.tb_assert_stop_level.Report warning when VHDL assert stop level = error",
),
(
"failed",
"lib.tb_assert_stop_level.Report error when VHDL assert stop level = error",
),
(
"failed",
"lib.tb_assert_stop_level.Report failure when VHDL assert stop level = error",
),
(
"passed",
"lib.tb_assert_stop_level.Report warning when VHDL assert stop level = failure",
),
(
"passed",
"lib.tb_assert_stop_level.Report error when VHDL assert stop level = failure",
),
(
"failed",
"lib.tb_assert_stop_level.Report failure when VHDL assert stop level = failure",
),
)
| 37.996169
| 88
| 0.615811
|
5e28cdfc9342b1edf54d326e834ba22a30b4da95
| 3,758
|
py
|
Python
|
utils/muad_dataset.py
|
DerrickXuNu/muad
|
559bf91898745b2ea72ba888e0f55f7228ac85b4
|
[
"BSD-2-Clause"
] | null | null | null |
utils/muad_dataset.py
|
DerrickXuNu/muad
|
559bf91898745b2ea72ba888e0f55f7228ac85b4
|
[
"BSD-2-Clause"
] | null | null | null |
utils/muad_dataset.py
|
DerrickXuNu/muad
|
559bf91898745b2ea72ba888e0f55f7228ac85b4
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Customized dataset class for muad
"""
import os
import sys
import pandas as pd
import torch.multiprocessing as multiprocessing
from img2vec_pytorch import Img2Vec
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from models import common
class MuadDataset(Dataset):
"""Muad Dataset class inherited from Pytorch Dataset for efficient data pipeline
Parameters
----------
image_path: string
The path that contains the images
excel_path : string
Where is the data excel location, ie: ../data/CrisisMMD_v2.0/train_cleaned.xlsx
TODO: This is a temporary solution just for CrisisMMD dataset
image_model_name: string
Torchvision pretrained model name, only 'resnet-18' and alexnet supported for now
text_model_name: string
NLP model to process sentences. Only 'word2vec' and 'bert' supported
Attributes
----------
image_loc_list : list of string
Containing all images' paths
sentences: list of string
Containing all raw texts
text_model: model object
The NLP model we are using. Only Word2Vec and Bert are supported
image_model: model object
The image model we are using. Only resnet-18 and alexnet are supported
label: list of int
1 indicate abnormal while 0 represents normal datapoint
"""
def __init__(self, image_path, excel_path, image_model_name='resnet-18', text_model_name='bert'):
df = pd.read_excel(excel_path)
"""
Image Feature Extraction
"""
self.image_loc_list = []
for index, row in df.iterrows():
image_name = os.path.join(image_path, row["image"] if os.name != 'nt'
else row["image"].replace('/', "\\"))
self.image_loc_list.append(image_name)
# TODO: Make cuda parameterized
self.image_model = Img2Vec(cuda=True, model=image_model_name)
"""
Text Raw Info
"""
self.sentences = df["tweet_text"].values
if text_model_name == 'bert':
self.text_model = common.Bert()
elif text_model_name == 'word2vec':
self.text_model = common.Word2Vec()
else:
sys.exit("Invalid NLP model option. Only bert and word2vec is upp")
assert(len(self.image_loc_list) == len(self.sentences))
self.label = df["anomaly"].values
def __len__(self):
return len(self.image_loc_list)
def __getitem__(self, idx):
"""
Args:
idx (int): Indexes of data
Returns:
dictionary: containing the image feature, language feature and label
"""
# To save memory, we put the image feature extraction part in data retrieval stage
image_pil = Image.open(self.image_loc_list[idx]).convert('RGB')
# TODO: The dimension shouldn't be hardcoded in the future
image_feature = self.image_model.get_vec(image_pil)
sentence = self.sentences[idx]
text_feature = self.text_model.extract(sentence)
label = self.label[idx]
return {'image_features': image_feature, 'text_features': text_feature, 'label': label}
if __name__ == '__main__':
multiprocessing.set_start_method('spawn')
dataset = MuadDataset("../data/CrisisMMD_v2.0/CrisisMMD_v2.0",
"../data/CrisisMMD_v2.0/train_cleaned.xlsx")
dataloader = DataLoader(dataset, batch_size=4, shuffle=True, num_workers=4)
for i_batch, sample_batched in enumerate(dataloader):
print(i_batch, sample_batched['image_features'].size())
print(sample_batched['text_features'])
| 34.796296
| 101
| 0.641831
|
b7310b5ad2a5d544d6876f7fdf394f4669783114
| 95,521
|
py
|
Python
|
selfdrive/car/toyota/values.py
|
AnthonyMaiorani/dragonpilot
|
d3d3822b0ac6927da8c548620c5cfabf70f80dd2
|
[
"MIT"
] | null | null | null |
selfdrive/car/toyota/values.py
|
AnthonyMaiorani/dragonpilot
|
d3d3822b0ac6927da8c548620c5cfabf70f80dd2
|
[
"MIT"
] | null | null | null |
selfdrive/car/toyota/values.py
|
AnthonyMaiorani/dragonpilot
|
d3d3822b0ac6927da8c548620c5cfabf70f80dd2
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from cereal import car
from selfdrive.car import dbc_dict
from selfdrive.config import Conversions as CV
Ecu = car.CarParams.Ecu
MIN_ACC_SPEED = 19. * CV.MPH_TO_MS
PEDAL_HYST_GAP = 3. * CV.MPH_TO_MS
PEDAL_SCALE = 3.0
class CarControllerParams:
ACCEL_HYST_GAP = 0.06 # don't change accel command for small oscilalitons within this value
ACCEL_MAX = 1.5 # m/s2, lower than allowed 2.0 m/s2 for tuning reasons
ACCEL_MIN = -3.5 # m/s2
STEER_MAX = 1500
STEER_DELTA_UP = 10 # 1.5s time to peak torque
STEER_DELTA_DOWN = 25 # always lower than 45 otherwise the Rav4 faults (Prius seems ok with 50)
STEER_ERROR_MAX = 350 # max delta between torque cmd and torque motor
# Car button codes
class CruiseButtons:
ACCEL_ACC = 9
DECEL_ACC = 10
SET_ACC = 8
ACCEL_CC = 6
DECEL_CC = 5
SET_CC = 1
CANCEL = 0
class CAR:
PRIUS = "TOYOTA PRIUS 2017"
PRIUS_TSS2 = "TOYOTA PRIUS TSS2 2021"
RAV4H = "TOYOTA RAV4 HYBRID 2017"
RAV4 = "TOYOTA RAV4 2017"
COROLLA = "TOYOTA COROLLA 2017"
LEXUS_RX = "LEXUS RX 2016"
LEXUS_RXH = "LEXUS RX HYBRID 2017"
LEXUS_RX_TSS2 = "LEXUS RX 2020"
LEXUS_RXH_TSS2 = "LEXUS RX HYBRID 2020"
CHR = "TOYOTA C-HR 2018"
CHRH = "TOYOTA C-HR HYBRID 2018"
CAMRY = "TOYOTA CAMRY 2018"
CAMRYH = "TOYOTA CAMRY HYBRID 2018"
CAMRY_TSS2 = "TOYOTA CAMRY 2021" # TSS 2.5
CAMRYH_TSS2 = "TOYOTA CAMRY HYBRID 2021"
HIGHLANDER = "TOYOTA HIGHLANDER 2017"
HIGHLANDER_TSS2 = "TOYOTA HIGHLANDER 2020"
HIGHLANDERH = "TOYOTA HIGHLANDER HYBRID 2018"
HIGHLANDERH_TSS2 = "TOYOTA HIGHLANDER HYBRID 2020"
AVALON = "TOYOTA AVALON 2016"
AVALON_2019 = "TOYOTA AVALON 2019"
AVALONH_2019 = "TOYOTA AVALON HYBRID 2019"
RAV4_TSS2 = "TOYOTA RAV4 2019"
COROLLA_TSS2 = "TOYOTA COROLLA TSS2 2019"
# LSS2 Lexus UX Hybrid is same as a TSS2 Corolla Hybrid
COROLLAH_TSS2 = "TOYOTA COROLLA HYBRID TSS2 2019"
LEXUS_ES_TSS2 = "LEXUS ES 2019"
LEXUS_ESH_TSS2 = "LEXUS ES HYBRID 2019"
LEXUS_ESH = "LEXUS ES HYBRID 2018"
SIENNA = "TOYOTA SIENNA 2018"
LEXUS_IS = "LEXUS IS 2018"
LEXUS_CTH = "LEXUS CT HYBRID 2018"
RAV4H_TSS2 = "TOYOTA RAV4 HYBRID 2019"
LEXUS_NXH = "LEXUS NX HYBRID 2018"
LEXUS_NX = "LEXUS NX 2018"
LEXUS_NX_TSS2 = "LEXUS NX 2020"
MIRAI = "TOYOTA MIRAI 2021" # TSS 2.5
ALPHARD_TSS2 = "TOYOTA ALPHARD 2020"
LEXUS_ISH = "LEXUS ISH 2017"
PRIUS_ALPHA = "TOYOTA PRIUS ALPHA 2017"
PRIUS_C = "TOYOTA PRIUS C 2016 (MODED)"
# (addr, cars, bus, 1/freq*100, vl)
STATIC_DSU_MSGS = [
(0x128, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.AVALON), 1, 3, b'\xf4\x01\x90\x83\x00\x37'),
(0x128, (CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH), 1, 3, b'\x03\x00\x20\x00\x00\x52'),
(0x141, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 1, 2, b'\x00\x00\x00\x46'),
(0x160, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 1, 7, b'\x00\x00\x08\x12\x01\x31\x9c\x51'),
(0x161, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.AVALON, CAR.LEXUS_RX), 1, 7, b'\x00\x1e\x00\x00\x00\x80\x07'),
(0X161, (CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH), 1, 7, b'\x00\x1e\x00\xd4\x00\x00\x5b'),
(0x283, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 0, 3, b'\x00\x00\x00\x00\x00\x00\x8c'),
(0x2E6, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xff\xf8\x00\x08\x7f\xe0\x00\x4e'),
(0x2E7, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xa8\x9c\x31\x9c\x00\x00\x00\x02'),
(0x33E, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH), 0, 20, b'\x0f\xff\x26\x40\x00\x1f\x00'),
(0x344, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 0, 5, b'\x00\x00\x01\x00\x00\x00\x00\x50'),
(0x365, (CAR.PRIUS, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.HIGHLANDERH), 0, 20, b'\x00\x00\x00\x80\x03\x00\x08'),
(0x365, (CAR.RAV4, CAR.RAV4H, CAR.COROLLA, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 0, 20, b'\x00\x00\x00\x80\xfc\x00\x08'),
(0x366, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.HIGHLANDERH), 0, 20, b'\x00\x00\x4d\x82\x40\x02\x00'),
(0x366, (CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 0, 20, b'\x00\x72\x07\xff\x09\xfe\x00'),
(0x470, (CAR.PRIUS, CAR.LEXUS_RXH), 1, 100, b'\x00\x00\x02\x7a'),
(0x470, (CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.RAV4H, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH), 1, 100, b'\x00\x00\x01\x79'),
(0x4CB, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 0, 100, b'\x0c\x00\x00\x00\x00\x00\x00\x00'),
]
FINGERPRINTS = {
CAR.RAV4: [{
36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 547: 8, 548: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1600: 8, 1656: 8, 1664: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2024: 8
}],
CAR.RAV4H: [{
36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 296: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 547: 8, 548: 8, 550: 8, 552: 4, 560: 7, 562: 4, 581: 5, 608: 8, 610: 5, 643: 7, 705: 8, 713: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1212: 8, 1227: 8, 1228: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1600: 8, 1656: 8, 1664: 8, 1728: 8, 1745: 8, 1779: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# Chinese RAV4
{
36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 547: 8, 548: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 830: 7, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1008: 2, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1207: 8, 1227: 8, 1235: 8, 1263: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1600: 8, 1664: 8, 1728: 8, 1745: 8, 1779: 8
}],
CAR.PRIUS: [{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 512: 6, 513: 6, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 810: 2, 814: 8, 824: 2, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 863: 8, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1175: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1777: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
#2019 LE
{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 810: 2, 814: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1175: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1777: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# 2020 Prius Prime LE
{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 767: 4, 800: 8, 810: 2, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1235: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
#2020 Prius Prime Limited
{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 810: 2, 814: 8, 824: 2, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1175: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1777: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2024: 8, 2026: 8, 2027: 8, 2029: 8, 2030: 8, 2031: 8
},
#2020 Central Europe Prime
{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 767: 4, 800: 8, 810: 2, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 8, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8
},
#2017 German Prius
{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 810: 2, 814: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 863: 8, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1175: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1777: 8, 1779: 8, 1792: 8, 1767: 4, 1863: 8, 1904: 8, 1912: 8, 1984: 8, 1988: 8, 1990: 8, 1992: 8, 1996: 8, 1998: 8, 2002: 8, 2010: 8, 2015: 8, 2016: 8, 2018: 8, 2024: 8, 2026: 8, 2030: 8
}],
CAR.PRIUS_TSS2: [{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 765: 8, 800: 8, 810: 2, 814: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1175: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1593: 8, 1595: 8, 1649: 8, 1653: 8, 1654: 8, 1655: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
}],
#Corolla w/ added Pedal Support (512L and 513L)
CAR.COROLLA: [{
36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 547: 8, 548: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 2, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1196: 8, 1227: 8, 1235: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1596: 8, 1597: 8, 1600: 8, 1664: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2016: 8, 2017: 8, 2018: 8, 2019: 8, 2020: 8, 2021: 8, 2022: 8, 2023: 8, 2024: 8
}],
CAR.CAMRY: [
#XLE and LE
{
36: 8, 37: 8, 119: 6, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 830: 7, 835: 8, 836: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 891: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 983: 8, 984: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1412: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1816: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
#XSE and SE
# TODO: get proper fingerprint in stock mode
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 830: 7, 835: 8, 836: 8, 869: 7, 870: 7, 888: 8, 889: 8, 891: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 983: 8, 984: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1412: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1816: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
{
# 2019 XSE
36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 983: 8, 984: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1412: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1767: 4, 1808: 8, 1816: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8
}],
CAR.CAMRYH: [
#SE, LE and LE with Blindspot Monitor
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 983: 8, 984: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1810: 8, 1816: 8, 1818: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
#SL
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
#XLE
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 983: 8, 984: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# 2018 Chinese Camry Hybrid
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 800: 8, 810: 2, 812: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1112: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1235: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8
}],
CAR.HIGHLANDER: [{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 550: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1008: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1207: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1984: 8, 1988: 8, 1992: 8, 1996: 8, 1990: 8, 1998: 8
},
# 2019 Highlander XLE
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 550: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1008: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1207: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# 2017 Highlander Limited
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 550: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1207: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# 2018 Highlander Limited Platinum
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 550: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1008: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1207: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1263: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1585: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1728: 8, 1745: 8, 1779: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1988: 8, 1990: 8, 1996: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8
}],
CAR.HIGHLANDERH: [{
36: 8, 37: 8, 170: 8, 180: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1212: 8, 1227: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
{
# 2019 Highlander Hybrid Limited Platinum
36: 8, 37: 8, 170: 8, 180: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1076: 8, 1077: 8, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1212: 8, 1227: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
}],
CAR.COROLLAH_TSS2: [
# 2019 Taiwan Altis Hybrid
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 765: 8, 767: 4, 800: 8, 810: 2, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 896: 8, 898: 8, 918: 7, 921: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1082: 8, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1172: 8, 1235: 8, 1237: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1745: 8, 1775: 8, 1779: 8
},
# 2019 Chinese Levin Hybrid
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 765: 8, 767: 4, 800: 8, 810: 2, 812: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 896: 8, 898: 8, 921: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 1002: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1172: 8, 1235: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1600: 8, 1649: 8, 1745: 8, 1775: 8, 1779: 8
}
],
CAR.SIENNA: [
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 548: 8, 550: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 764: 8, 767: 4, 800: 8, 824: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 888: 8, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 1, 918: 7, 921: 8, 933: 8, 944: 6, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1160: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1212: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1656: 8, 1664: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# XLE AWD 2018
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 548: 8, 550: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 764: 8, 767: 4, 800: 8, 824: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 1, 921: 8, 933: 8, 944: 6, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1160: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1656: 8, 1664: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
}],
# dp - fake values, for generate car selection
CAR.LEXUS_ISH: [{ 65535: 1 }],
CAR.PRIUS_ALPHA: [{ 65535: 1 }],
CAR.PRIUS_C: [{ 65535: 1 }],
}
FW_VERSIONS = {
CAR.AVALON: {
(Ecu.esp, 0x7b0, None): [
b'F152607060\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510701300\x00\x00\x00\x00',
b'881510705100\x00\x00\x00\x00',
b'881510705200\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B41051\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0230721100\x00\x00\x00\x00\x00\x00\x00\x00A0C01000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230721200\x00\x00\x00\x00\x00\x00\x00\x00A0C01000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0701100\x00\x00\x00\x00',
b'8646F0703000\x00\x00\x00\x00',
],
},
CAR.AVALON_2019: {
(Ecu.esp, 0x7b0, None): [
b'F152607140\x00\x00\x00\x00\x00\x00',
b'F152607171\x00\x00\x00\x00\x00\x00',
b'F152607110\x00\x00\x00\x00\x00\x00',
b'F152607180\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510703200\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B41080\x00\x00\x00\x00\x00\x00',
b'8965B07010\x00\x00\x00\x00\x00\x00',
b'8965B41090\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x01896630735100\x00\x00\x00\x00',
b'\x01896630725300\x00\x00\x00\x00',
b'\x01896630738000\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0702100\x00\x00\x00\x00',
],
},
CAR.AVALONH_2019: {
(Ecu.esp, 0x7b0, None): [
b'F152641040\x00\x00\x00\x00\x00\x00',
b'F152641061\x00\x00\x00\x00\x00\x00',
b'F152641050\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510704200\x00\x00\x00\x00',
b'881514107100\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B07010\x00\x00\x00\x00\x00\x00',
b'8965B41090\x00\x00\x00\x00\x00\x00',
b'8965B41070\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x02896630724000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x02896630737000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x02896630728000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0702100\x00\x00\x00\x00',
],
},
CAR.CAMRY: {
(Ecu.engine, 0x700, None): [
b'\x018966306L3100\x00\x00\x00\x00',
b'\x018966306L4200\x00\x00\x00\x00',
b'\x018966306L5200\x00\x00\x00\x00',
b'\x018966306P8000\x00\x00\x00\x00',
b'\x018966306Q3100\x00\x00\x00\x00',
b'\x018966306Q4000\x00\x00\x00\x00',
b'\x018966306Q4100\x00\x00\x00\x00',
b'\x018966306Q4200\x00\x00\x00\x00',
b'\x018966333Q9200\x00\x00\x00\x00',
b'\x018966333P3100\x00\x00\x00\x00',
b'\x018966333P3200\x00\x00\x00\x00',
b'\x018966333P4200\x00\x00\x00\x00',
b'\x018966333P4300\x00\x00\x00\x00',
b'\x018966333P4400\x00\x00\x00\x00',
b'\x018966333P4500\x00\x00\x00\x00',
b'\x018966333P4700\x00\x00\x00\x00',
b'\x018966333Q6000\x00\x00\x00\x00',
b'\x018966333Q6200\x00\x00\x00\x00',
b'\x018966333Q6300\x00\x00\x00\x00',
b'\x018966333W6000\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x02333P1100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0602000 ',
b'8821F0603300 ',
b'8821F0604100 ',
b'8821F0605200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0608200 ',
b'8821F0609100 ',
],
(Ecu.esp, 0x7b0, None): [
b'F152606210\x00\x00\x00\x00\x00\x00',
b'F152606230\x00\x00\x00\x00\x00\x00',
b'F152606270\x00\x00\x00\x00\x00\x00',
b'F152606290\x00\x00\x00\x00\x00\x00',
b'F152606410\x00\x00\x00\x00\x00\x00',
b'F152633540\x00\x00\x00\x00\x00\x00',
b'F152633A10\x00\x00\x00\x00\x00\x00',
b'F152633A20\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33540\x00\x00\x00\x00\x00\x00',
b'8965B33542\x00\x00\x00\x00\x00\x00',
b'8965B33580\x00\x00\x00\x00\x00\x00',
b'8965B33581\x00\x00\x00\x00\x00\x00',
b'8965B33621\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [ # Same as 0x791
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0602000 ',
b'8821F0603300 ',
b'8821F0604100 ',
b'8821F0605200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0608200 ',
b'8821F0609100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0601200 ',
b'8646F0601300 ',
b'8646F0601400 ',
b'8646F0603400 ',
b'8646F0604100 ',
b'8646F0605000 ',
b'8646F0606000 ',
b'8646F0606100 ',
b'8646F0607100 ',
],
},
CAR.CAMRYH: {
(Ecu.engine, 0x700, None): [
b'\x018966306Q6000\x00\x00\x00\x00',
b'\x018966333N1100\x00\x00\x00\x00',
b'\x018966333N4300\x00\x00\x00\x00',
b'\x018966333X0000\x00\x00\x00\x00',
b'\x018966333X4000\x00\x00\x00\x00',
b'\x01896633T16000\x00\x00\x00\x00',
b'\x028966306B2100\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306B2300\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8100\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8200\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8300\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8400\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R5000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R5000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306R6000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R6000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S0000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S0100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S1100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633214\x00\x00\x00\x00\x00\x00',
b'F152633660\x00\x00\x00\x00\x00\x00',
b'F152633712\x00\x00\x00\x00\x00\x00',
b'F152633713\x00\x00\x00\x00\x00\x00',
b'F152633B51\x00\x00\x00\x00\x00\x00',
b'F152633B60\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603400 ',
b'8821F0604000 ',
b'8821F0604200 ',
b'8821F0605200 ',
b'8821F0606200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0608200 ',
b'8821F0609000 ',
b'8821F0609100 ',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33540\x00\x00\x00\x00\x00\x00',
b'8965B33542\x00\x00\x00\x00\x00\x00',
b'8965B33550\x00\x00\x00\x00\x00\x00',
b'8965B33551\x00\x00\x00\x00\x00\x00',
b'8965B33580\x00\x00\x00\x00\x00\x00',
b'8965B33581\x00\x00\x00\x00\x00\x00',
b'8965B33611\x00\x00\x00\x00\x00\x00',
b'8965B33621\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [ # Same as 0x791
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603400 ',
b'8821F0604000 ',
b'8821F0604200 ',
b'8821F0605200 ',
b'8821F0606200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0608200 ',
b'8821F0609000 ',
b'8821F0609100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0601200 ',
b'8646F0601300 ',
b'8646F0601400 ',
b'8646F0603400 ',
b'8646F0603500 ',
b'8646F0604100 ',
b'8646F0605000 ',
b'8646F0606000 ',
b'8646F0606100 ',
b'8646F0607000 ',
b'8646F0607100 ',
],
},
CAR.CAMRY_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B33630\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152606370\x00\x00\x00\x00\x00\x00',
b'\x01F152606390\x00\x00\x00\x00\x00\x00',
b'\x01F152606400\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x018966306Q5000\x00\x00\x00\x00',
b'\x018966306T3100\x00\x00\x00\x00',
b'\x018966306T3200\x00\x00\x00\x00',
b'\x018966306T4100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 15): [
b'\x018821F6201200\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 109): [
b'\x028646F3305200\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
b'\x028646F3305300\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
],
},
CAR.CAMRYH_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B33630\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633D00\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x018966306Q6000\x00\x00\x00\x00',
b'\x018966306Q7000\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 15): [
b'\x018821F6201200\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 109): [
b'\x028646F3305200\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
b'\x028646F3305300\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
],
},
CAR.CHR: {
(Ecu.engine, 0x700, None): [
b'\x01896631017100\x00\x00\x00\x00',
b'\x01896631017200\x00\x00\x00\x00',
b'\x0189663F413100\x00\x00\x00\x00',
b'\x0189663F414100\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0W01000 ',
b'8821F0W01100 ',
b'8821FF401600 ',
b'8821FF404000 ',
b'8821FF404100 ',
b'8821FF405100 ',
b'8821FF406000 ',
b'8821FF407100 ',
],
(Ecu.esp, 0x7b0, None): [
b'F152610020\x00\x00\x00\x00\x00\x00',
b'F152610153\x00\x00\x00\x00\x00\x00',
b'F152610210\x00\x00\x00\x00\x00\x00',
b'F1526F4034\x00\x00\x00\x00\x00\x00',
b'F1526F4044\x00\x00\x00\x00\x00\x00',
b'F1526F4073\x00\x00\x00\x00\x00\x00',
b'F1526F4121\x00\x00\x00\x00\x00\x00',
b'F1526F4122\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B10011\x00\x00\x00\x00\x00\x00',
b'8965B10040\x00\x00\x00\x00\x00\x00',
b'8965B10070\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0331024000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
b'\x0331024000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x0331036000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x033F401100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203102\x00\x00\x00\x00',
b'\x033F424000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F0W01000 ',
b'8821FF401600 ',
b'8821FF404000 ',
b'8821FF404100 ',
b'8821FF405100 ',
b'8821FF406000 ',
b'8821FF407100 ',
b'8821F0W01100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646FF401800 ',
b'8646FF404000 ',
b'8646FF406000 ',
b'8646FF407000 ',
],
},
CAR.CHRH: {
(Ecu.engine, 0x700, None): [
b'\x02896631013200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F405000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F418000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F423000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F431000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0189663F438000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152610013\x00\x00\x00\x00\x00\x00',
b'F152610014\x00\x00\x00\x00\x00\x00',
b'F152610040\x00\x00\x00\x00\x00\x00',
b'F152610190\x00\x00\x00\x00\x00\x00',
b'F152610200\x00\x00\x00\x00\x00\x00',
b'F152610230\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0W01000 ',
b'8821FF402300 ',
b'8821FF402400 ',
b'8821FF404000 ',
b'8821FF406000 ',
b'8821FF407100 ',
],
(Ecu.eps, 0x7a1, None): [
b'8965B10011\x00\x00\x00\x00\x00\x00',
b'8965B10020\x00\x00\x00\x00\x00\x00',
b'8965B10040\x00\x00\x00\x00\x00\x00',
b'8965B10050\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F0W01000 ',
b'8821FF402300 ',
b'8821FF402400 ',
b'8821FF404000 ',
b'8821FF406000 ',
b'8821FF407100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646FF402100 ',
b'8646FF404000 ',
b'8646FF406000 ',
b'8646FF407000 ',
],
},
CAR.COROLLA: {
(Ecu.engine, 0x7e0, None): [
b'\x0230ZC2000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2100\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2300\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3100\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3300\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0330ZC1200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510201100\x00\x00\x00\x00',
b'881510201200\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152602190\x00\x00\x00\x00\x00\x00',
b'F152602191\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B02181\x00\x00\x00\x00\x00\x00',
b'8965B02191\x00\x00\x00\x00\x00\x00',
b'8965B48150\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0201101\x00\x00\x00\x00',
b'8646F0201200\x00\x00\x00\x00',
],
},
CAR.COROLLA_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630ZG2000\x00\x00\x00\x00',
b'\x01896630ZG5000\x00\x00\x00\x00',
b'\x01896630ZG5100\x00\x00\x00\x00',
b'\x01896630ZG5200\x00\x00\x00\x00',
b'\x01896630ZG5300\x00\x00\x00\x00',
b'\x01896630ZP2000\x00\x00\x00\x00',
b'\x01896630ZQ5000\x00\x00\x00\x00',
b'\x018966312L8000\x00\x00\x00\x00',
b'\x018966312M0000\x00\x00\x00\x00',
b'\x018966312M9000\x00\x00\x00\x00',
b'\x018966312P9000\x00\x00\x00\x00',
b'\x018966312P9100\x00\x00\x00\x00',
b'\x018966312P9200\x00\x00\x00\x00',
b'\x018966312Q2300\x00\x00\x00\x00',
b'\x018966312R0000\x00\x00\x00\x00',
b'\x018966312R0100\x00\x00\x00\x00',
b'\x018966312R1000\x00\x00\x00\x00',
b'\x018966312R1100\x00\x00\x00\x00',
b'\x018966312R3100\x00\x00\x00\x00',
b'\x018966312S5000\x00\x00\x00\x00',
b'\x018966312S7000\x00\x00\x00\x00',
b'\x018966312W3000\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0230ZN4000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x03312M3000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
b'\x03312N6100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x03312N6100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'\x018965B12350\x00\x00\x00\x00\x00\x00',
b'\x018965B12470\x00\x00\x00\x00\x00\x00',
b'\x018965B12490\x00\x00\x00\x00\x00\x00',
b'\x018965B12500\x00\x00\x00\x00\x00\x00',
b'\x018965B12520\x00\x00\x00\x00\x00\x00',
b'\x018965B12530\x00\x00\x00\x00\x00\x00',
b'\x018965B1255000\x00\x00\x00\x00',
b'8965B12361\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152602280\x00\x00\x00\x00\x00\x00',
b'\x01F152602560\x00\x00\x00\x00\x00\x00',
b'\x01F152602590\x00\x00\x00\x00\x00\x00',
b'\x01F152602650\x00\x00\x00\x00\x00\x00',
b'\x01F152612641\x00\x00\x00\x00\x00\x00',
b'\x01F152612651\x00\x00\x00\x00\x00\x00',
b'\x01F152612B10\x00\x00\x00\x00\x00\x00',
b'\x01F152612B51\x00\x00\x00\x00\x00\x00',
b'\x01F152612B60\x00\x00\x00\x00\x00\x00',
b'\x01F152612B61\x00\x00\x00\x00\x00\x00',
b'\x01F152612B90\x00\x00\x00\x00\x00\x00',
b'\x01F152612C00\x00\x00\x00\x00\x00\x00',
b'F152602191\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F12010D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201300\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F1201400\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
b'\x028646F1202000\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F1202100\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F1202200\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
],
},
CAR.COROLLAH_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630ZJ1000\x00\x00\x00\x00',
b'\x01896630ZU8000\x00\x00\x00\x00',
b'\x01896637621000\x00\x00\x00\x00',
b'\x01896637624000\x00\x00\x00\x00',
b'\x01896637626000\x00\x00\x00\x00',
b'\x01896637648000\x00\x00\x00\x00',
b'\x02896630ZJ5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZN8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZQ3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZR2000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZT8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZT9000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966312Q3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966312Q4000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x038966312L7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1205001\x00\x00\x00\x00',
b'\x038966312N1000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B12361\x00\x00\x00\x00\x00\x00',
b'8965B12451\x00\x00\x00\x00\x00\x00',
b'8965B76012\x00\x00\x00\x00\x00\x00',
b'8965B76050\x00\x00\x00\x00\x00\x00',
b'\x018965B12350\x00\x00\x00\x00\x00\x00',
b'\x018965B12470\x00\x00\x00\x00\x00\x00',
b'\x018965B12490\x00\x00\x00\x00\x00\x00',
b'\x018965B12500\x00\x00\x00\x00\x00\x00',
b'\x018965B12520\x00\x00\x00\x00\x00\x00',
b'\x018965B12530\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152612590\x00\x00\x00\x00\x00\x00',
b'F152612691\x00\x00\x00\x00\x00\x00',
b'F152612692\x00\x00\x00\x00\x00\x00',
b'F152612700\x00\x00\x00\x00\x00\x00',
b'F152612710\x00\x00\x00\x00\x00\x00',
b'F152612790\x00\x00\x00\x00\x00\x00',
b'F152612800\x00\x00\x00\x00\x00\x00',
b'F152612820\x00\x00\x00\x00\x00\x00',
b'F152612840\x00\x00\x00\x00\x00\x00',
b'F152612A00\x00\x00\x00\x00\x00\x00',
b'F152612A10\x00\x00\x00\x00\x00\x00',
b'F152642540\x00\x00\x00\x00\x00\x00',
b'F152676293\x00\x00\x00\x00\x00\x00',
b'F152676303\x00\x00\x00\x00\x00\x00',
b'F152676304\x00\x00\x00\x00\x00\x00',
b'F152612D00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F12010D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201300\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F1201400\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
b'\x028646F1202000\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F1202100\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F76020C0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F7603100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F7603200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.HIGHLANDER: {
(Ecu.engine, 0x700, None): [
b'\x01896630E09000\x00\x00\x00\x00',
b'\x01896630E43000\x00\x00\x00\x00',
b'\x01896630E43100\x00\x00\x00\x00',
b'\x01896630E43200\x00\x00\x00\x00',
b'\x01896630E44200\x00\x00\x00\x00',
b'\x01896630E45000\x00\x00\x00\x00',
b'\x01896630E45100\x00\x00\x00\x00',
b'\x01896630E45200\x00\x00\x00\x00',
b'\x01896630E46200\x00\x00\x00\x00',
b'\x01896630E74000\x00\x00\x00\x00',
b'\x01896630E75000\x00\x00\x00\x00',
b'\x01896630E76000\x00\x00\x00\x00',
b'\x01896630E77000\x00\x00\x00\x00',
b'\x01896630E83000\x00\x00\x00\x00',
b'\x01896630E84000\x00\x00\x00\x00',
b'\x01896630E85000\x00\x00\x00\x00',
b'\x01896630E86000\x00\x00\x00\x00',
b'\x01896630E88000\x00\x00\x00\x00',
b'\x01896630EA0000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48140\x00\x00\x00\x00\x00\x00',
b'8965B48150\x00\x00\x00\x00\x00\x00',
b'8965B48210\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [b'F15260E011\x00\x00\x00\x00\x00\x00'],
(Ecu.dsu, 0x791, None): [
b'881510E01100\x00\x00\x00\x00',
b'881510E01200\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0E01200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.HIGHLANDERH: {
(Ecu.eps, 0x7a1, None): [
b'8965B48160\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648541\x00\x00\x00\x00\x00\x00',
b'F152648542\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0230E40000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230E40100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230EA2000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230EA2100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0E01200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.HIGHLANDER_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B48241\x00\x00\x00\x00\x00\x00',
b'8965B48310\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15260E051\x00\x00\x00\x00\x00\x00',
b'\x01F15260E110\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x01896630E62100\x00\x00\x00\x00',
b'\x01896630E62200\x00\x00\x00\x00',
b'\x01896630E64100\x00\x00\x00\x00',
b'\x01896630E64200\x00\x00\x00\x00',
b'\x01896630EB1000\x00\x00\x00\x00',
b'\x01896630EB1100\x00\x00\x00\x00',
b'\x01896630EB1200\x00\x00\x00\x00',
b'\x01896630EB2000\x00\x00\x00\x00',
b'\x01896630EB2100\x00\x00\x00\x00',
b'\x01896630EB2200\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
b'\x018821F6201200\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F0E02100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4803000\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
],
},
CAR.HIGHLANDERH_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B48241\x00\x00\x00\x00\x00\x00',
b'8965B48310\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15264872300\x00\x00\x00\x00',
b'\x01F15264872400\x00\x00\x00\x00',
b'\x01F15264872500\x00\x00\x00\x00',
b'\x01F152648C6300\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x01896630EA1000\000\000\000\000',
b'\x01896630EA1000\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896630E66000\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896630EB3000\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896630EB3100\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
b'\x018821F6201200\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F0E02100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4803000\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
],
},
CAR.LEXUS_IS: {
(Ecu.engine, 0x700, None): [
b'\x018966353M7100\x00\x00\x00\x00',
b'\x018966353Q2000\x00\x00\x00\x00',
b'\x018966353Q2300\x00\x00\x00\x00',
b'\x018966353R1100\x00\x00\x00\x00',
b'\x018966353R7100\x00\x00\x00\x00',
b'\x018966353R8100\x00\x00\x00\x00',
b'\x018966353Q4000\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0232480000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02353P7000\x00\x00\x00\x00\x00\x00\x00\x00530J5000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02353P9000\x00\x00\x00\x00\x00\x00\x00\x00553C1000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152653301\x00\x00\x00\x00\x00\x00',
b'F152653310\x00\x00\x00\x00\x00\x00',
b'F152653330\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881515306200\x00\x00\x00\x00',
b'881515306400\x00\x00\x00\x00',
b'881515306500\x00\x00\x00\x00',
b'881515307400\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B53270\x00\x00\x00\x00\x00\x00',
b'8965B53271\x00\x00\x00\x00\x00\x00',
b'8965B53280\x00\x00\x00\x00\x00\x00',
b'8965B53281\x00\x00\x00\x00\x00\x00',
b'8965B53311\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F5301300\x00\x00\x00\x00',
b'8646F5301400\x00\x00\x00\x00',
b'8646F5301200\x00\x00\x00\x00',
],
},
CAR.PRIUS: {
(Ecu.engine, 0x700, None): [
b'\x02896634761000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634761100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634761200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634762000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634763000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634763100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634765000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634765100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634769100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634769200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634770000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634782000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634784000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A0000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347B0000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x03896634759100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634759200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634759200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634759300\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701002\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760300\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634768000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703001\x00\x00\x00\x00',
b'\x03896634768000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x03896634768100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x03896634785000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4705001\x00\x00\x00\x00',
b'\x03896634785000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x03896634786000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4705001\x00\x00\x00\x00',
b'\x03896634786000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x03896634789000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x038966347A3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x038966347A3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4707001\x00\x00\x00\x00',
b'\x038966347B6000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x038966347B7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B47021\x00\x00\x00\x00\x00\x00',
b'8965B47022\x00\x00\x00\x00\x00\x00',
b'8965B47023\x00\x00\x00\x00\x00\x00',
b'8965B47050\x00\x00\x00\x00\x00\x00',
b'8965B47060\x00\x00\x00\x00\x00\x00', # This is the EPS with good angle sensor
],
(Ecu.esp, 0x7b0, None): [
b'F152647290\x00\x00\x00\x00\x00\x00',
b'F152647300\x00\x00\x00\x00\x00\x00',
b'F152647310\x00\x00\x00\x00\x00\x00',
b'F152647414\x00\x00\x00\x00\x00\x00',
b'F152647415\x00\x00\x00\x00\x00\x00',
b'F152647416\x00\x00\x00\x00\x00\x00',
b'F152647417\x00\x00\x00\x00\x00\x00',
b'F152647470\x00\x00\x00\x00\x00\x00',
b'F152647490\x00\x00\x00\x00\x00\x00',
b'F152647683\x00\x00\x00\x00\x00\x00',
b'F152647684\x00\x00\x00\x00\x00\x00',
b'F152647862\x00\x00\x00\x00\x00\x00',
b'F152647863\x00\x00\x00\x00\x00\x00',
b'F152647864\x00\x00\x00\x00\x00\x00',
b'F152647865\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514702300\x00\x00\x00\x00',
b'881514703100\x00\x00\x00\x00',
b'881514704100\x00\x00\x00\x00',
b'881514706000\x00\x00\x00\x00',
b'881514706100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4701300\x00\x00\x00\x00',
b'8646F4702001\x00\x00\x00\x00',
b'8646F4702100\x00\x00\x00\x00',
b'8646F4702200\x00\x00\x00\x00',
b'8646F4705000\x00\x00\x00\x00',
b'8646F4705200\x00\x00\x00\x00',
],
},
CAR.RAV4: {
(Ecu.engine, 0x7e0, None): [
b'\x02342Q1000\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1100\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1200\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1300\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2000\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2100\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2200\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q4000\x00\x00\x00\x00\x00\x00\x00\x0054215000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42063\x00\x00\x00\x00\x00\x00',
b'8965B42073\x00\x00\x00\x00\x00\x00',
b'8965B42082\x00\x00\x00\x00\x00\x00',
b'8965B42083\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F15260R102\x00\x00\x00\x00\x00\x00',
b'F15260R103\x00\x00\x00\x00\x00\x00',
b'F152642493\x00\x00\x00\x00\x00\x00',
b'F152642492\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514201200\x00\x00\x00\x00',
b'881514201300\x00\x00\x00\x00',
b'881514201400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201200\x00\x00\x00\x00',
b'8646F4202001\x00\x00\x00\x00',
b'8646F4202100\x00\x00\x00\x00',
b'8646F4204000\x00\x00\x00\x00',
],
},
CAR.RAV4H: {
(Ecu.engine, 0x7e0, None): [
b'\x02342N9000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342N9100\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342P0000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42102\x00\x00\x00\x00\x00\x00',
b'8965B42103\x00\x00\x00\x00\x00\x00',
b'8965B42112\x00\x00\x00\x00\x00\x00',
b'8965B42162\x00\x00\x00\x00\x00\x00',
b'8965B42163\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642090\x00\x00\x00\x00\x00\x00',
b'F152642110\x00\x00\x00\x00\x00\x00',
b'F152642120\x00\x00\x00\x00\x00\x00',
b'F152642400\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514202200\x00\x00\x00\x00',
b'881514202300\x00\x00\x00\x00',
b'881514202400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201100\x00\x00\x00\x00',
b'8646F4201200\x00\x00\x00\x00',
b'8646F4202001\x00\x00\x00\x00',
b'8646F4202100\x00\x00\x00\x00',
b'8646F4204000\x00\x00\x00\x00',
],
},
CAR.RAV4_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630R58000\x00\x00\x00\x00',
b'\x01896630R58100\x00\x00\x00\x00',
b'\x018966342E2000\x00\x00\x00\x00',
b'\x018966342M8000\x00\x00\x00\x00',
b'\x018966342S9000\x00\x00\x00\x00',
b'\x018966342T1000\x00\x00\x00\x00',
b'\x018966342T6000\x00\x00\x00\x00',
b'\x018966342T9000\x00\x00\x00\x00',
b'\x018966342U4000\x00\x00\x00\x00',
b'\x018966342U4100\x00\x00\x00\x00',
b'\x018966342V3000\x00\x00\x00\x00',
b'\x018966342V3100\x00\x00\x00\x00',
b'\x018966342V3200\x00\x00\x00\x00',
b'\x01896634A05000\x00\x00\x00\x00',
b'\x01896634A19000\x00\x00\x00\x00',
b'\x01896634A19100\x00\x00\x00\x00',
b'\x01896634A20000\x00\x00\x00\x00',
b'\x01896634A20100\x00\x00\x00\x00',
b'\x01896634A22000\x00\x00\x00\x00',
b'\x01896634A22100\x00\x00\x00\x00',
b'\x01896634A30000\x00\x00\x00\x00',
b'\x01896634A44000\x00\x00\x00\x00',
b'\x01896634A45000\x00\x00\x00\x00',
b'\x01896634A46000\x00\x00\x00\x00',
b'\x028966342M7000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x028966342T0000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x028966342V1000\x00\x00\x00\x00897CF1202001\x00\x00\x00\x00',
b'\x028966342Y8000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x02896634A18000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x02896634A18100\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x02896634A43000\x00\x00\x00\x00897CF4201001\x00\x00\x00\x00',
b'\x02896634A47000\x00\x00\x00\x00897CF4201001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15260R210\x00\x00\x00\x00\x00\x00',
b'\x01F15260R220\x00\x00\x00\x00\x00\x00',
b'\x01F15260R290\x00\x00\x00\x00\x00\x00',
b'\x01F15260R300\x00\x00\x00\x00\x00\x00',
b'\x01F152642551\x00\x00\x00\x00\x00\x00',
b'\x01F152642561\x00\x00\x00\x00\x00\x00',
b'\x01F152642700\x00\x00\x00\x00\x00\x00',
b'\x01F152642701\x00\x00\x00\x00\x00\x00',
b'\x01F152642710\x00\x00\x00\x00\x00\x00',
b'\x01F152642711\x00\x00\x00\x00\x00\x00',
b'\x01F152642750\x00\x00\x00\x00\x00\x00',
b'\x01F152642751\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42170\x00\x00\x00\x00\x00\x00',
b'8965B42171\x00\x00\x00\x00\x00\x00',
b'8965B42180\x00\x00\x00\x00\x00\x00',
b'8965B42181\x00\x00\x00\x00\x00\x00',
b'\x028965B0R01200\x00\x00\x00\x008965B0R02200\x00\x00\x00\x00',
b'\x028965B0R01300\x00\x00\x00\x008965B0R02300\x00\x00\x00\x00',
b'\x028965B0R01400\x00\x00\x00\x008965B0R02400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4203200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203300\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203500\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203700\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4203800\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
],
},
CAR.RAV4H_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896634A15000\x00\x00\x00\x00',
b'\x018966342M5000\x00\x00\x00\x00',
b'\x018966342W8000\x00\x00\x00\x00',
b'\x018966342X5000\x00\x00\x00\x00',
b'\x018966342X6000\x00\x00\x00\x00',
b'\x01896634A25000\x00\x00\x00\x00',
b'\x018966342W5000\x00\x00\x00\x00',
b'\x028966342W4001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A13001\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896634A13101\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896634A14001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A23001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A14001\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896634A14101\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642291\x00\x00\x00\x00\x00\x00',
b'F152642290\x00\x00\x00\x00\x00\x00',
b'F152642330\x00\x00\x00\x00\x00\x00',
b'F152642331\x00\x00\x00\x00\x00\x00',
b'F152642531\x00\x00\x00\x00\x00\x00',
b'F152642532\x00\x00\x00\x00\x00\x00',
b'F152642520\x00\x00\x00\x00\x00\x00',
b'F152642521\x00\x00\x00\x00\x00\x00',
b'F152642540\x00\x00\x00\x00\x00\x00',
b'F152642541\x00\x00\x00\x00\x00\x00',
b'F152642542\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42170\x00\x00\x00\x00\x00\x00',
b'8965B42171\x00\x00\x00\x00\x00\x00',
b'8965B42180\x00\x00\x00\x00\x00\x00',
b'8965B42181\x00\x00\x00\x00\x00\x00',
b'\x028965B0R01200\x00\x00\x00\x008965B0R02200\x00\x00\x00\x00',
b'\x028965B0R01300\x00\x00\x00\x008965B0R02300\x00\x00\x00\x00',
b'\x028965B0R01400\x00\x00\x00\x008965B0R02400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4203200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203300\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203500\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203700\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4203800\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
],
},
CAR.LEXUS_ES_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630EC9100\x00\x00\x00\x00',
b'\x018966333T5000\x00\x00\x00\x00',
b'\x018966333T5100\x00\x00\x00\x00',
b'\x018966333X6000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152606281\x00\x00\x00\x00\x00\x00',
b'\x01F152606340\x00\x00\x00\x00\x00\x00',
b'\x01F15260E031\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33252\x00\x00\x00\x00\x00\x00',
b'8965B33590\x00\x00\x00\x00\x00\x00',
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F33030D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3303200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3304100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4810200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.SIENNA: {
(Ecu.engine, 0x700, None): [
b'\x01896630832100\x00\x00\x00\x00',
b'\x01896630832200\x00\x00\x00\x00',
b'\x01896630838000\x00\x00\x00\x00',
b'\x01896630838100\x00\x00\x00\x00',
b'\x01896630842000\x00\x00\x00\x00',
b'\x01896630843000\x00\x00\x00\x00',
b'\x01896630851000\x00\x00\x00\x00',
b'\x01896630851100\x00\x00\x00\x00',
b'\x01896630852100\x00\x00\x00\x00',
b'\x01896630859000\x00\x00\x00\x00',
b'\x01896630860000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B45070\x00\x00\x00\x00\x00\x00',
b'8965B45080\x00\x00\x00\x00\x00\x00',
b'8965B45082\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152608130\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510801100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702200\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_CTH: {
(Ecu.dsu, 0x791, None): [
b'881517601100\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152676144\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0237635000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F7601100\x00\x00\x00\x00',
],
},
CAR.LEXUS_ESH_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x028966333S8000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966333T0100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966333V4000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x02896633T09000\x00\x00\x00\x00897CF3307001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633423\x00\x00\x00\x00\x00\x00',
b'F152633680\x00\x00\x00\x00\x00\x00',
b'F152633681\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33252\x00\x00\x00\x00\x00\x00',
b'8965B33590\x00\x00\x00\x00\x00\x00',
b'8965B33690\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F33030D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3303100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3304100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F3304200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.LEXUS_ESH: {
(Ecu.engine, 0x7e0, None): [
b'\x02333M4200\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633171\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881513310400\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33512\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701100\x00\x00\x00\x00',
b'8821F4701300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F3302001\x00\x00\x00\x00',
b'8646F3302200\x00\x00\x00\x00',
],
},
CAR.LEXUS_NX: {
(Ecu.engine, 0x700, None): [
b'\x01896637851000\x00\x00\x00\x00',
b'\x01896637852000\x00\x00\x00\x00',
b'\x01896637854000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152678130\x00\x00\x00\x00\x00\x00',
b'F152678140\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881517803100\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B78060\x00\x00\x00\x00\x00\x00',
b'8965B78080\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F7801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_NX_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x018966378B2100\x00\x00\x00\x00',
b'\x018966378G3000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152678221\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B78120\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b"\x018821F3301400\x00\x00\x00\x00",
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F78030A0\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F7803100\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.PRIUS_ALPHA: {
(Ecu.esp, 0x7b0, None): [
b'F152647280\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0234781000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514705100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4703300\x00\x00\x00\x00',
],
},
CAR.LEXUS_NXH: {
(Ecu.engine, 0x7e0, None): [
b'\x0237882000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0237841000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0237886000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0237880000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152678160\x00\x00\x00\x00\x00\x00',
b'F152678170\x00\x00\x00\x00\x00\x00',
b'F152678171\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881517804300\x00\x00\x00\x00',
b'881517804100\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B78060\x00\x00\x00\x00\x00\x00',
b'8965B78080\x00\x00\x00\x00\x00\x00',
b'8965B78100\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F7801300\x00\x00\x00\x00',
b'8646F7801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_RX: {
(Ecu.engine, 0x700, None): [
b'\x01896630E36200\x00\x00\x00\x00',
b'\x01896630E36300\x00\x00\x00\x00',
b'\x01896630E37200\x00\x00\x00\x00',
b'\x01896630E37300\x00\x00\x00\x00',
b'\x01896630E41000\x00\x00\x00\x00',
b'\x01896630E41100\x00\x00\x00\x00',
b'\x01896630E41200\x00\x00\x00\x00',
b'\x01896630EA3100\x00\x00\x00\x00',
b'\x01896630EA4100\x00\x00\x00\x00',
b'\x01896630EA4300\x00\x00\x00\x00',
b'\x01896630EA6300\x00\x00\x00\x00',
b'\x018966348R1300\x00\x00\x00\x00',
b'\x018966348R8500\x00\x00\x00\x00',
b'\x018966348W1300\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648472\x00\x00\x00\x00\x00\x00',
b'F152648473\x00\x00\x00\x00\x00\x00',
b'F152648492\x00\x00\x00\x00\x00\x00',
b'F152648493\x00\x00\x00\x00\x00\x00',
b'F152648474\x00\x00\x00\x00\x00\x00',
b'F152648630\x00\x00\x00\x00\x00\x00',
b'F152648494\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514810300\x00\x00\x00\x00',
b'881514810500\x00\x00\x00\x00',
b'881514810700\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B0E011\x00\x00\x00\x00\x00\x00',
b'8965B0E012\x00\x00\x00\x00\x00\x00',
b'8965B48102\x00\x00\x00\x00\x00\x00',
b'8965B48112\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701000\x00\x00\x00\x00',
b'8821F4701100\x00\x00\x00\x00',
b'8821F4701200\x00\x00\x00\x00',
b'8821F4701300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4801100\x00\x00\x00\x00',
b'8646F4801200\x00\x00\x00\x00',
b'8646F4802001\x00\x00\x00\x00',
b'8646F4802100\x00\x00\x00\x00',
b'8646F4802200\x00\x00\x00\x00',
b'8646F4809000\x00\x00\x00\x00',
],
},
CAR.LEXUS_RXH: {
(Ecu.engine, 0x7e0, None): [
b'\x02348J7000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348N0000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Q4000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Q4100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348T1100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348T3000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348V6000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Z3000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648361\x00\x00\x00\x00\x00\x00',
b'F152648501\x00\x00\x00\x00\x00\x00',
b'F152648502\x00\x00\x00\x00\x00\x00',
b'F152648504\x00\x00\x00\x00\x00\x00',
b'F152648740\x00\x00\x00\x00\x00\x00',
b'F152648A30\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514811300\x00\x00\x00\x00',
b'881514811500\x00\x00\x00\x00',
b'881514811700\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B0E011\x00\x00\x00\x00\x00\x00',
b'8965B0E012\x00\x00\x00\x00\x00\x00',
b'8965B48111\x00\x00\x00\x00\x00\x00',
b'8965B48112\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701000\x00\x00\x00\x00',
b'8821F4701100\x00\x00\x00\x00',
b'8821F4701200\x00\x00\x00\x00',
b'8821F4701300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4801200\x00\x00\x00\x00',
b'8646F4802001\x00\x00\x00\x00',
b'8646F4802100\x00\x00\x00\x00',
b'8646F4802200\x00\x00\x00\x00',
b'8646F4809000\x00\x00\x00\x00',
],
},
CAR.LEXUS_RX_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630EC9000\x00\x00\x00\x00',
b'\x01896634D12000\x00\x00\x00\x00',
b'\x01896630EB0000\x00\x00\x00\x00',
b'\x01896630EA9000\x00\x00\x00\x00',
b'\x01896630ED0000\x00\x00\x00\x00',
b'\x018966348W9000\x00\x00\x00\x00',
b'\x01896634D12100\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152648801\x00\x00\x00\x00\x00\x00',
b'\x01F15260E031\x00\x00\x00\x00\x00\x00',
b'\x01F15260E041\x00\x00\x00\x00\x00\x00',
b'\x01F152648781\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4810200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4810100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.LEXUS_RXH_TSS2: {
(Ecu.engine, 0x7e0, None): [
b'\x02348X8000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0234D14000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0234D16000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648831\x00\x00\x00\x00\x00\x00',
b'F152648D00\x00\x00\x00\x00\x00\x00',
b'F152648D60\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4810200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4810100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.PRIUS_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x028966347C8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x038966347C0000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710101\x00\x00\x00\x00',
b'\x038966347C1000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710101\x00\x00\x00\x00',
b'\x038966347C5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4707101\x00\x00\x00\x00',
b'\x038966347C5100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4707101\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152647500\x00\x00\x00\x00\x00\x00',
b'F152647510\x00\x00\x00\x00\x00\x00',
b'F152647520\x00\x00\x00\x00\x00\x00',
b'F152647521\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B47070\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4707000\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4710000\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
],
},
CAR.MIRAI: {
(Ecu.esp, 0x7D1, None): [b'\x01898A36203000\x00\x00\x00\x00',],
(Ecu.esp, 0x7B0, None): [b'\x01F15266203200\x00\x00\x00\x00',], # a second ESP ECU
(Ecu.eps, 0x7A1, None): [b'\x028965B6204100\x00\x00\x00\x008965B6203100\x00\x00\x00\x00',],
(Ecu.fwdRadar, 0x750, 0xf): [b'\x018821F6201200\x00\x00\x00\x00',],
(Ecu.fwdCamera, 0x750, 0x6d): [b'\x028646F6201400\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',],
},
CAR.ALPHARD_TSS2: {
(Ecu.engine, 0x7e0, None): [b'\x0235883000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',],
(Ecu.eps, 0x7a1, None): [b'8965B58040\x00\x00\x00\x00\x00\x00',],
(Ecu.fwdRadar, 0x750, 0xf): [b'\x018821F3301400\x00\x00\x00\x00',],
(Ecu.fwdCamera, 0x750, 0x6d): [b'\x028646F5803200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',],
},
}
STEER_THRESHOLD = 100
DBC = {
CAR.RAV4H: dbc_dict('toyota_rav4_hybrid_2017_pt_generated', 'toyota_adas'),
CAR.RAV4: dbc_dict('toyota_rav4_2017_pt_generated', 'toyota_adas'),
CAR.PRIUS: dbc_dict('toyota_prius_2017_pt_generated', 'toyota_adas'),
CAR.COROLLA: dbc_dict('toyota_corolla_2017_pt_generated', 'toyota_adas'),
CAR.LEXUS_RX: dbc_dict('lexus_rx_350_2016_pt_generated', 'toyota_adas'),
CAR.LEXUS_RXH: dbc_dict('lexus_rx_hybrid_2017_pt_generated', 'toyota_adas'),
CAR.LEXUS_RX_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_RXH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.CHR: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CHRH: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_adas'),
CAR.CAMRY: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CAMRYH: dbc_dict('toyota_camry_hybrid_2018_pt_generated', 'toyota_adas'),
CAR.CAMRY_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.CAMRYH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.HIGHLANDER: dbc_dict('toyota_highlander_2017_pt_generated', 'toyota_adas'),
CAR.HIGHLANDER_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.HIGHLANDERH: dbc_dict('toyota_highlander_hybrid_2018_pt_generated', 'toyota_adas'),
CAR.HIGHLANDERH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.AVALON: dbc_dict('toyota_avalon_2017_pt_generated', 'toyota_adas'),
CAR.AVALON_2019: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.AVALONH_2019: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_adas'),
CAR.RAV4_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.COROLLA_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.COROLLAH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ES_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ESH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ESH: dbc_dict('lexus_ct200h_2018_pt_generated', 'toyota_adas'),
CAR.SIENNA: dbc_dict('toyota_sienna_xle_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_IS: dbc_dict('lexus_is_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_CTH: dbc_dict('lexus_ct200h_2018_pt_generated', 'toyota_adas'),
CAR.RAV4H_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_NXH: dbc_dict('lexus_nx300h_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_NX: dbc_dict('lexus_nx300_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_NX_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.PRIUS_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.MIRAI: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.ALPHARD_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ISH: dbc_dict('lexus_is300h_2017_pt_generated', 'toyota_adas'),
CAR.PRIUS_ALPHA: dbc_dict('toyota_prius_alpha_2017_pt_generated', 'toyota_adas'),
CAR.PRIUS_C: dbc_dict('toyota_priusc_2016_pt_generated', 'toyota_adas'),
}
# Toyota/Lexus Safety Sense 2.0 and 2.5
TSS2_CAR = set([CAR.RAV4_TSS2, CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2, CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2, CAR.RAV4H_TSS2,
CAR.LEXUS_RX_TSS2, CAR.LEXUS_RXH_TSS2, CAR.HIGHLANDER_TSS2, CAR.HIGHLANDERH_TSS2, CAR.PRIUS_TSS2, CAR.CAMRY_TSS2, CAR.CAMRYH_TSS2,
CAR.MIRAI, CAR.LEXUS_NX_TSS2, CAR.ALPHARD_TSS2])
NO_DSU_CAR = TSS2_CAR | set([CAR.CHR, CAR.CHRH, CAR.CAMRY, CAR.CAMRYH])
# no resume button press required
NO_STOP_TIMER_CAR = TSS2_CAR | set([CAR.PRIUS_ALPHA, CAR.RAV4H, CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.SIENNA, CAR.LEXUS_ESH])
| 56.024047
| 1,108
| 0.61398
|
02f2c1389c2e146d15cfc51717624b2a1d2a80ff
| 1,868
|
py
|
Python
|
catalyst/contrib/utils/cv/tests/test_image.py
|
YaLTeR/catalyst
|
4b875b50b3c63ac2dac1f19399af0c016dfb4e2f
|
[
"Apache-2.0"
] | null | null | null |
catalyst/contrib/utils/cv/tests/test_image.py
|
YaLTeR/catalyst
|
4b875b50b3c63ac2dac1f19399af0c016dfb4e2f
|
[
"Apache-2.0"
] | null | null | null |
catalyst/contrib/utils/cv/tests/test_image.py
|
YaLTeR/catalyst
|
4b875b50b3c63ac2dac1f19399af0c016dfb4e2f
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import torch
from catalyst import utils
from catalyst.contrib.data.dataset.transforms import normalize, to_tensor
from catalyst.contrib.utils.cv.tensor import _IMAGENET_MEAN, _IMAGENET_STD
def test_imread():
"""Tests ``imread`` functionality."""
jpg_rgb_uri = (
"https://raw.githubusercontent.com/catalyst-team/catalyst-pics/master"
"/test_images/catalyst_icon.jpg"
)
jpg_grs_uri = (
"https://raw.githubusercontent.com/catalyst-team/catalyst-pics/master"
"/test_images/catalyst_icon_grayscale.jpg"
)
png_rgb_uri = (
"https://raw.githubusercontent.com/catalyst-team/catalyst-pics/master"
"/test_images/catalyst_icon.png"
)
png_grs_uri = (
"https://raw.githubusercontent.com/catalyst-team/catalyst-pics/master"
"/test_images/catalyst_icon_grayscale.png"
)
for uri in [jpg_rgb_uri, jpg_grs_uri, png_rgb_uri, png_grs_uri]:
img = utils.imread(uri)
assert img.shape == (400, 400, 3)
img = utils.imread(uri, grayscale=True)
assert img.shape == (400, 400, 1)
def test_tensor_to_ndimage():
"""Tests ``tensor_to_ndimage`` functionality."""
orig_images = np.random.randint(0, 255, (2, 20, 10, 3), np.uint8)
torch_images = torch.stack(
[
normalize(to_tensor(im), _IMAGENET_MEAN, _IMAGENET_STD)
for im in orig_images
],
dim=0,
)
byte_images = utils.tensor_to_ndimage(torch_images, dtype=np.uint8)
float_images = utils.tensor_to_ndimage(torch_images, dtype=np.float32)
assert np.allclose(byte_images, orig_images)
assert np.allclose(float_images, orig_images / 255, atol=1e-3, rtol=1e-3)
assert np.allclose(
utils.tensor_to_ndimage(torch_images[0]),
orig_images[0] / 255,
atol=1e-3,
rtol=1e-3,
)
| 31.133333
| 78
| 0.667024
|
9b89144e7685390d685dd39f2102a1506ff1a836
| 5,527
|
py
|
Python
|
mypy/fscache.py
|
Wintakeb/mypy
|
fc4ad4db1a0e070d719e0844d35ef4f144cc24e0
|
[
"PSF-2.0"
] | null | null | null |
mypy/fscache.py
|
Wintakeb/mypy
|
fc4ad4db1a0e070d719e0844d35ef4f144cc24e0
|
[
"PSF-2.0"
] | null | null | null |
mypy/fscache.py
|
Wintakeb/mypy
|
fc4ad4db1a0e070d719e0844d35ef4f144cc24e0
|
[
"PSF-2.0"
] | null | null | null |
"""Interface for accessing the file system with automatic caching.
The idea is to cache the results of any file system state reads during
a single transaction. This has two main benefits:
* This avoids redundant syscalls, as we won't perform the same OS
operations multiple times.
* This makes it easier to reason about concurrent FS updates, as different
operations targeting the same paths can't report different state during
a transaction.
Note that this only deals with reading state, not writing.
Properties maintained by the API:
* The contents of the file are always from the same or later time compared
to the reported mtime of the file, even if mtime is queried after reading
a file.
* Repeating an operation produces the same result as the first one during
a transaction.
* Call flush() to start a new transaction (flush the caches).
The API is a bit limited. It's easy to add new cached operations, however.
You should perform all file system reads through the API to actually take
advantage of the benefits.
"""
import functools
import hashlib
import os
import stat
from typing import Dict, List, Tuple
class FileSystemCache:
def __init__(self) -> None:
self.flush()
def flush(self) -> None:
"""Start another transaction and empty all caches."""
self.stat_cache = {} # type: Dict[str, os.stat_result]
self.stat_error_cache = {} # type: Dict[str, OSError]
self.listdir_cache = {} # type: Dict[str, List[str]]
self.listdir_error_cache = {} # type: Dict[str, OSError]
self.isfile_case_cache = {} # type: Dict[str, bool]
self.read_cache = {} # type: Dict[str, bytes]
self.read_error_cache = {} # type: Dict[str, Exception]
self.hash_cache = {} # type: Dict[str, str]
def stat(self, path: str) -> os.stat_result:
if path in self.stat_cache:
return self.stat_cache[path]
if path in self.stat_error_cache:
raise copy_os_error(self.stat_error_cache[path])
try:
st = os.stat(path)
except OSError as err:
# Take a copy to get rid of associated traceback and frame objects.
# Just assigning to __traceback__ doesn't free them.
self.stat_error_cache[path] = copy_os_error(err)
raise err
self.stat_cache[path] = st
return st
def listdir(self, path: str) -> List[str]:
if path in self.listdir_cache:
return self.listdir_cache[path]
if path in self.listdir_error_cache:
raise copy_os_error(self.listdir_error_cache[path])
try:
results = os.listdir(path)
except OSError as err:
# Like above, take a copy to reduce memory use.
self.listdir_error_cache[path] = copy_os_error(err)
raise err
self.listdir_cache[path] = results
return results
def isfile(self, path: str) -> bool:
try:
st = self.stat(path)
except OSError:
return False
return stat.S_ISREG(st.st_mode)
def isfile_case(self, path: str) -> bool:
"""Return whether path exists and is a file.
On case-insensitive filesystems (like Mac or Windows) this returns
False if the case of the path's last component does not exactly
match the case found in the filesystem.
TODO: We should maybe check the case for some directory components also,
to avoid permitting wrongly-cased *packages*.
"""
if path in self.isfile_case_cache:
return self.isfile_case_cache[path]
head, tail = os.path.split(path)
if not tail:
res = False
else:
try:
names = self.listdir(head)
res = tail in names and self.isfile(path)
except OSError:
res = False
self.isfile_case_cache[path] = res
return res
def isdir(self, path: str) -> bool:
try:
st = self.stat(path)
except OSError:
return False
return stat.S_ISDIR(st.st_mode)
def exists(self, path: str) -> bool:
try:
self.stat(path)
except FileNotFoundError:
return False
return True
def read(self, path: str) -> bytes:
if path in self.read_cache:
return self.read_cache[path]
if path in self.read_error_cache:
raise self.read_error_cache[path]
# Need to stat first so that the contents of file are from no
# earlier instant than the mtime reported by self.stat().
self.stat(path)
try:
with open(path, 'rb') as f:
data = f.read()
except Exception as err:
self.read_error_cache[path] = err
raise
md5hash = hashlib.md5(data).hexdigest()
self.read_cache[path] = data
self.hash_cache[path] = md5hash
return data
def md5(self, path: str) -> str:
if path not in self.hash_cache:
self.read(path)
return self.hash_cache[path]
def samefile(self, f1: str, f2: str) -> bool:
s1 = self.stat(f1)
s2 = self.stat(f2)
return os.path.samestat(s1, s2) # type: ignore
def copy_os_error(e: OSError) -> OSError:
new = OSError(*e.args)
new.errno = e.errno
new.strerror = e.strerror
new.filename = e.filename
if e.filename2:
new.filename2 = e.filename2
return new
| 33.295181
| 80
| 0.621856
|
50aeda18f6c2a2c1fe079c999d7bd4922504754c
| 947
|
py
|
Python
|
mysite/manage.py
|
codinginbrazil/django
|
9eef7d7e54cb3929dd07b544ba28b6498a85902f
|
[
"MIT"
] | null | null | null |
mysite/manage.py
|
codinginbrazil/django
|
9eef7d7e54cb3929dd07b544ba28b6498a85902f
|
[
"MIT"
] | null | null | null |
mysite/manage.py
|
codinginbrazil/django
|
9eef7d7e54cb3929dd07b544ba28b6498a85902f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks.
- A command-line utility that lets you interact with this Django project in various ways
- Um utilitário de linha de comando
que permite a você interagir com esse projeto Django de várias maneiras
- [docs](https://docs.djangoproject.com/en/3.2/ref/django-admin/)
"""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 33.821429
| 92
| 0.692714
|
ca94dc11a5fab3e6f330b9522968053f32d60818
| 3,251
|
py
|
Python
|
tests/test_registryclient.py
|
3r1co/registry-frontend
|
2b5ae0d0eeb4fc0939e690cb3a7063bbae2b8209
|
[
"MIT"
] | null | null | null |
tests/test_registryclient.py
|
3r1co/registry-frontend
|
2b5ae0d0eeb4fc0939e690cb3a7063bbae2b8209
|
[
"MIT"
] | null | null | null |
tests/test_registryclient.py
|
3r1co/registry-frontend
|
2b5ae0d0eeb4fc0939e690cb3a7063bbae2b8209
|
[
"MIT"
] | 3
|
2019-01-27T13:47:00.000Z
|
2019-10-26T09:06:58.000Z
|
import os
import aiohttp
import asyncio
import pytest
import json
from aioresponses import aioresponses
from registryclient import RegistryClient
@pytest.fixture(scope = 'module')
def global_data():
return {'registry': "http://registry",
'repository': "test/alpine",
'tag': "latest"}
@pytest.fixture(scope = 'module')
def client(global_data):
return RegistryClient(global_data["registry"], None, None, None)
@pytest.mark.asyncio
async def test_retrieve_repositories(global_data, client):
f = open(get_resource('response_repositories.json'), "r")
with aioresponses() as m:
m.get("%s/v2/_catalog" % global_data["registry"], status=200, body=f.read())
session = aiohttp.ClientSession()
resp = await client.retrieve_repositories(session)
expect = set([global_data["repository"]])
assert check_equal(resp, expect)
await session.close()
@pytest.mark.asyncio
async def test_retrieve_tags_for_repository(global_data, client):
f = open(get_resource('response_tags.json'), "r")
with aioresponses() as m:
m.get("%s/v2/%s/tags/list" % (global_data["registry"], global_data["repository"]), status=200, body=f.read())
session = aiohttp.ClientSession()
resp = await client.retrieve_tags_for_repository(global_data["repository"], session)
expect = set([global_data["tag"]])
assert check_equal(resp, expect)
await session.close()
@pytest.mark.asyncio
async def test_retrieve_size_for_tag_and_repository(global_data, client):
f = open(get_resource('response_manifest_v2.json'), "r")
with aioresponses() as m:
m.get("%s/v2/%s/manifests/%s" % (global_data["registry"], global_data["repository"], global_data["tag"]), status=200, body=f.read())
session = aiohttp.ClientSession()
resp = await client.retrieve_size_for_tag_and_repository(global_data["repository"], global_data["tag"], session)
expect = {'repo': 'test/alpine', 'tag': 'latest', 'sizes':
{'manifest': 7023,
'sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f': 32654,
'sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b': 16724,
'sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736': 73109}}
assert check_equal(resp, expect)
await session.close()
@pytest.mark.asyncio
async def test_retrieve_manifest_v1_for_tag_and_repository(global_data, client):
f = open(get_resource('response_manifest_v1.json'), "r")
with aioresponses() as m:
m.get("%s/v2/%s/manifests/%s" % (global_data["registry"], global_data["repository"], global_data["tag"]), status=200, body=f.read())
session = aiohttp.ClientSession()
resp = await client.retrieve_manifest_v1_for_tag_and_repository(global_data["repository"], global_data["tag"], session)
response = json.loads(resp)
assert response["architecture"] == "amd64"
await session.close()
def check_equal(s1, s2):
return len(s1) == len(s2) and sorted(s1) == sorted(s2)
def get_resource(filename):
return os.path.join(os.path.dirname(__file__), 'resources', filename)
| 44.534247
| 140
| 0.690864
|
9d3d0bbe6dbc3c4c015561a0312f83254acef779
| 696
|
py
|
Python
|
parser.py
|
adityag6994/quad_arrt
|
4235b3985fb5327cffaa136929859bac124ee934
|
[
"BSD-2-Clause"
] | 1
|
2021-03-02T08:45:41.000Z
|
2021-03-02T08:45:41.000Z
|
parser.py
|
adityag6994/quad_arrt
|
4235b3985fb5327cffaa136929859bac124ee934
|
[
"BSD-2-Clause"
] | null | null | null |
parser.py
|
adityag6994/quad_arrt
|
4235b3985fb5327cffaa136929859bac124ee934
|
[
"BSD-2-Clause"
] | 3
|
2016-04-02T22:09:09.000Z
|
2016-08-22T08:21:24.000Z
|
# coding=utf-8
from __future__ import division
from __future__ import with_statement # for python 2.5
import roslib
import addict
roslib.load_manifest("rosparam")
import rosparam
__author__ = 'Aijun Bai'
class Yaml(addict.Dict):
def __init__(self, *args, **kwargs):
super(Yaml, self).__init__(*args, **kwargs)
if 'file_name' in kwargs:
self._parse(kwargs['file_name'])
def _parse(self, file_name):
param_list = rosparam.load_file(file_name, default_namespace='/')
super(Yaml, self).__init__(param_list[0][0])
def __call__(self, key, default):
if key not in self:
self[key] = default
return self[key]
| 21.75
| 73
| 0.659483
|
1e66fe05d540d695bb1544449dd773049c1628c5
| 13,084
|
py
|
Python
|
contrib/dokku-installer.py
|
tamanobi/dokku
|
5579fe00066e6caf1671506f5d73db7fcc61f4e0
|
[
"MIT"
] | null | null | null |
contrib/dokku-installer.py
|
tamanobi/dokku
|
5579fe00066e6caf1671506f5d73db7fcc61f4e0
|
[
"MIT"
] | null | null | null |
contrib/dokku-installer.py
|
tamanobi/dokku
|
5579fe00066e6caf1671506f5d73db7fcc61f4e0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2.7
import cgi
import json
import os
import re
import SimpleHTTPServer
import SocketServer
import subprocess
import sys
import threading
VERSION = 'v0.14.2'
hostname = ''
try:
command = "bash -c '[[ $(dig +short $HOSTNAME) ]] && echo $HOSTNAME || wget -q -O - icanhazip.com'"
hostname = subprocess.check_output(command, shell=True)
if ':' in hostname:
hostname = ''
except subprocess.CalledProcessError:
pass
key_file = os.getenv('KEY_FILE', None)
if os.path.isfile('/home/ec2-user/.ssh/authorized_keys'):
key_file = '/home/ec2-user/.ssh/authorized_keys'
elif os.path.isfile('/home/ubuntu/.ssh/authorized_keys'):
key_file = '/home/ubuntu/.ssh/authorized_keys'
else:
key_file = '/root/.ssh/authorized_keys'
admin_keys = []
if os.path.isfile(key_file):
try:
command = "cat {0}".format(key_file)
admin_keys = subprocess.check_output(command, shell=True).strip().split("\n")
except subprocess.CalledProcessError:
pass
def check_boot():
if 'onboot' not in sys.argv:
return
init_dir = os.getenv('INIT_DIR', '/etc/init')
systemd_dir = os.getenv('SYSTEMD_DIR', '/etc/systemd/system')
nginx_dir = os.getenv('NGINX_DIR', '/etc/nginx/conf.d')
if os.path.exists(init_dir):
with open('{0}/dokku-installer.conf'.format(init_dir), 'w') as f:
f.write("start on runlevel [2345]\n")
f.write("exec {0} selfdestruct\n".format(os.path.abspath(__file__)))
if os.path.exists(systemd_dir):
with open('{0}/dokku-installer.service'.format(systemd_dir), 'w') as f:
f.write("[Unit]\n")
f.write("Description=Dokku web-installer\n")
f.write("\n")
f.write("[Service]\n")
f.write("ExecStart={0} selfdestruct\n".format(os.path.abspath(__file__)))
f.write("\n")
f.write("[Install]\n")
f.write("WantedBy=multi-user.target\n")
f.write("WantedBy=graphical.target\n")
if os.path.exists(nginx_dir):
with open('{0}/dokku-installer.conf'.format(nginx_dir), 'w') as f:
f.write("upstream dokku-installer { server 127.0.0.1:2000; }\n")
f.write("server {\n")
f.write(" listen 80;\n")
f.write(" location / {\n")
f.write(" proxy_pass http://dokku-installer;\n")
f.write(" }\n")
f.write("}\n")
subprocess.call('rm -f /etc/nginx/sites-enabled/*', shell=True)
sys.exit(0)
class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
content = PAGE.replace('{VERSION}', VERSION)
content = content.replace('{HOSTNAME}', hostname)
content = content.replace('{AUTHORIZED_KEYS_LOCATION}', key_file)
content = content.replace('{ADMIN_KEYS}', "\n".join(admin_keys))
self.send_response(200)
self.end_headers()
self.wfile.write(content)
def do_POST(self):
if self.path not in ['/setup', '/setup/']:
return
params = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']})
vhost_enable = 'false'
dokku_root = os.getenv('DOKKU_ROOT', '/home/dokku')
if 'vhost' in params and params['vhost'].value == 'true':
vhost_enable = 'true'
with open('{0}/VHOST'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
else:
try:
os.remove('{0}/VHOST'.format(dokku_root))
except OSError:
pass
with open('{0}/HOSTNAME'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
for (index, key) in enumerate(params['keys'].value.splitlines(), 1):
user = 'admin'
if self.admin_user_exists() is not None:
user = 'web-admin'
if self.web_admin_user_exists() is not None:
index = int(self.web_admin_user_exists()) + 1
elif self.web_admin_user_exists() is None:
index = 1
elif self.admin_user_exists() is None:
pass
else:
index = int(self.admin_user_exists()) + 1
user = user + str(index)
command = ['sshcommand', 'acl-add', 'dokku', user]
proc = subprocess.Popen(command, stdin=subprocess.PIPE)
proc.stdin.write(key)
proc.stdin.close()
proc.wait()
set_debconf_selection('boolean', 'nginx_enable', 'true')
set_debconf_selection('boolean', 'skip_key_file', 'true')
set_debconf_selection('boolean', 'vhost_enable', vhost_enable)
set_debconf_selection('boolean', 'web_config', 'false')
set_debconf_selection('string', 'hostname', params['hostname'].value)
if 'selfdestruct' in sys.argv:
DeleteInstallerThread()
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps({'status': 'ok'}))
def web_admin_user_exists(self):
return self.user_exists('web-admin(\d+)')
def admin_user_exists(self):
return self.user_exists('admin(\d+)')
def user_exists(self, name):
command = 'dokku ssh-keys:list'
pattern = re.compile(r'NAME="' + name + '"')
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
max_num = 0
exists = False
for line in proc.stdout:
m = pattern.search(line)
if m:
# User of the form `user` or `user#` exists
exists = True
max_num = max(max_num, m.group(1))
if exists:
return max_num
else:
return None
def set_debconf_selection(debconf_type, key, value):
found = False
with open('/etc/os-release', 'r') as f:
for line in f:
if 'debian' in line:
found = True
if not found:
return
ps = subprocess.Popen(['echo', 'dokku dokku/{0} {1} {2}'.format(
key, debconf_type, value
)], stdout=subprocess.PIPE)
try:
subprocess.check_output(['debconf-set-selections'], stdin=ps.stdout)
except subprocess.CalledProcessError:
pass
ps.wait()
class DeleteInstallerThread(object):
def __init__(self, interval=1):
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
command = "rm /etc/nginx/conf.d/dokku-installer.conf && /etc/init.d/nginx stop && /etc/init.d/nginx start"
try:
subprocess.call(command, shell=True)
except:
pass
command = "rm -f /etc/init/dokku-installer.conf /etc/systemd/system/dokku-installer.service && (stop dokku-installer || systemctl stop dokku-installer.service)"
try:
subprocess.call(command, shell=True)
except:
pass
def main():
check_boot()
port = int(os.getenv('PORT', 2000))
httpd = SocketServer.TCPServer(("", port), GetHandler)
print "Listening on 0.0.0.0:{0}, CTRL+C to stop".format(port)
httpd.serve_forever()
PAGE = """
<html>
<head>
<title>Dokku Setup</title>
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" integrity="sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO" crossorigin="anonymous">
<script src="https://code.jquery.com/jquery-1.12.4.min.js" integrity="sha256-ZosEbRLbNQzLpnKIkEdrPv7lOy9C27hHQ+Xp8a4MxAQ=" crossorigin="anonymous"></script>
<style>
.bd-callout {
padding: 1.25rem;
margin-top: 1.25rem;
margin-bottom: 1.25rem;
border: 1px solid #eee;
border-left-width: .25rem;
border-radius: .25rem;
}
.bd-callout p:last-child {
margin-bottom: 0;
}
.bd-callout-info {
border-left-color: #5bc0de;
}
pre {
font-size: 80%;
margin-bottom: 0;
}
h1 small {
font-size: 50%;
}
h5 {
font-size: 1rem;
}
.container {
width: 640px;
}
.result {
padding-left: 20px;
}
input.form-control, textarea.form-control {
background-color: #fafbfc;
font-size: 14px;
}
input.form-control::placeholder, textarea.form-control::placeholder {
color: #adb2b8
}
</style>
</head>
<body>
<div class="container">
<form id="form" role="form">
<h1 class="pt-3">Dokku Setup <small class="text-muted">{VERSION}</small></h1>
<div class="alert alert-warning small" role="alert">
<strong>Warning:</strong> The SSH key filled out here can grant root access to the server. Please complete the setup as soon as possible.
</div>
<div class="row">
<div class="col">
<h3>Admin Access</h3>
<div class="form-group">
<label for="key">Public SSH Keys</label><br />
<textarea class="form-control" name="keys" rows="5" id="key" placeholder="Begins with 'ssh-rsa', 'ssh-dss', 'ssh-ed25519', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384', or 'ecdsa-sha2-nistp521'">{ADMIN_KEYS}</textarea>
<small class="form-text text-muted">Public keys allow users to ssh onto the server as the <code>dokku</code> user, as well as remotely execute Dokku commands. They are currently auto-populated from: <code>{AUTHORIZED_KEYS_LOCATION}</code>, and can be changed later via the <a href="http://dokku.viewdocs.io/dokku/deployment/user-management/" target="_blank"><code>dokku ssh-keys</code></a> plugin.</small>
</div>
</div>
</div>
<div class="row">
<div class="col">
<h3>Hostname Configuration</h3>
<div class="form-group">
<label for="hostname">Hostname</label>
<input class="form-control" type="text" id="hostname" name="hostname" value="{HOSTNAME}" placeholder="A hostname or ip address such as {HOSTNAME}" />
<small class="form-text text-muted">This will be used as the default host for all applications, and can be changed later via the <a href="http://dokku.viewdocs.io/dokku/configuration/domains/" target="_blank"><code>dokku domains:set-global</code></a> command.</small>
</div>
<div class="form-check">
<input class="form-check-input" type="checkbox" id="vhost" name="vhost" value="true">
<label class="form-check-label" for="vhost">Use virtualhost naming for apps</label>
<small class="form-text text-muted">When enabled, Nginx will be run on port 80 and proxy requests to apps based on hostname.</small>
<small class="form-text text-muted">When disabled, a specific port will be setup for each application on first deploy, and requests to that port will be proxied to the relevant app.</small>
</div>
<div class="bd-callout bd-callout-info">
<h5>What will app URLs look like?</h5>
<pre><code id="example">http://hostname:port</code></pre>
</div>
</div>
</div>
<button type="button" onclick="setup()" class="btn btn-primary">Finish Setup</button> <span class="result"></span>
</form>
</div>
<div id="error-output"></div>
<script>
function setup() {
if ($.trim($("#key").val()) == "") {
alert("Your admin public key cannot be blank.")
return
}
if ($.trim($("#hostname").val()) == "") {
alert("Your hostname cannot be blank.")
return
}
data = $("#form").serialize()
$("input,textarea,button").prop("disabled", true);
$.post('/setup', data)
.done(function() {
$(".result").addClass('text-success');
$(".result").html("Success! Redirecting in 3 seconds. ..")
setTimeout(function() {
window.location.href = "http://dokku.viewdocs.io/dokku~{VERSION}/deployment/application-deployment/";
}, 3000);
})
.fail(function(data) {
$(".result").addClass('text-danger');
$(".result").html("Something went wrong...")
$("#error-output").html(data.responseText)
});
}
function update() {
if ($("#vhost").is(":checked") && $("#hostname").val().match(/^(\d{1,3}\.){3}\d{1,3}$/)) {
alert("In order to use virtualhost naming, the hostname must not be an IP but a valid domain name.")
$("#vhost").prop('checked', false);
}
if ($("#vhost").is(':checked')) {
$("#example").html("http://<app-name>."+$("#hostname").val())
} else {
$("#example").html("http://"+$("#hostname").val()+":<app-port>")
}
}
$("#vhost").change(update);
$("#hostname").change(update);
update();
</script>
</body>
</html>
"""
if __name__ == "__main__":
main()
| 36.856338
| 418
| 0.584684
|
964c9403ed105e8e96ba49e698465a5a11d9db10
| 1,681
|
py
|
Python
|
SocialAPI/TwitterAPI/RecolectorTweetsUsersStream.py
|
garnachod/ConcursoPolicia
|
f123595afc697ddfa862114a228d7351e2f8fd73
|
[
"Apache-2.0"
] | null | null | null |
SocialAPI/TwitterAPI/RecolectorTweetsUsersStream.py
|
garnachod/ConcursoPolicia
|
f123595afc697ddfa862114a228d7351e2f8fd73
|
[
"Apache-2.0"
] | null | null | null |
SocialAPI/TwitterAPI/RecolectorTweetsUsersStream.py
|
garnachod/ConcursoPolicia
|
f123595afc697ddfa862114a228d7351e2f8fd73
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
#lib_path = os.path.abspath('../../')
#sys.path.append(lib_path)
from DBbridge.EscritorTweetsCassandra import EscritorTweetsCassandra
from SocialAPI.Recolector import Recolector
from twython import TwythonStreamer
from ApoyoTwitter import ApoyoTwitter
from getAuthorizations import GetAuthorizations
class RecolectorTweetsUsersStream(TwythonStreamer, Recolector):
"""docstring for TweetsStatusStream"""
def __init__(self, escritores):
self.authorizator = GetAuthorizations(1000)
self.tipo_id = 100
self.authorizator.load_twitter_token(self.tipo_id)
app_key, app_secret, oauth_token, oauth_token_secret = self.authorizator.get_twitter_secret()
Recolector.__init__(self, escritores)
TwythonStreamer.__init__(self, app_key, app_secret, oauth_token, oauth_token_secret)
self.tweets = []
def recolecta(self, tokens):
string_tokens = ""
for i, token in enumerate(tokens):
if i >= 5000:
break
if i == 0:
string_tokens += str(token)
else:
string_tokens += "," + str(token)
print string_tokens
self.statuses.filter(follow=string_tokens)
def on_success(self, data):
limiteEscritura = 10
if 'text' in data:
print len(self.tweets)
if len(self.tweets) > limiteEscritura:
self.guarda(self.tweets)
self.tweets = []
self.tweets.append(data)
def guarda(self, arrayDatos):
for escritor in self.escritores:
escritor.escribe(arrayDatos)
def on_error(self, status_code, data):
print status_code
#exit()
if __name__ == '__main__':
escritores = [EscritorTweetsCassandra(-1)]
recolector = RecolectorTweetsUsersStream(escritores)
recolector.recolecta([2383366169])
| 26.265625
| 95
| 0.751933
|
8d13411d8e50051701e43d3aaf585808f74fca5d
| 1,174
|
py
|
Python
|
src/remove_comments.py
|
tanthanadon/senior
|
89fc24889b34860982b551e5ea5e0d3550505f65
|
[
"MIT"
] | null | null | null |
src/remove_comments.py
|
tanthanadon/senior
|
89fc24889b34860982b551e5ea5e0d3550505f65
|
[
"MIT"
] | 5
|
2020-03-04T13:49:10.000Z
|
2020-03-20T04:06:23.000Z
|
src/remove_comments.py
|
tanthanadon/senior
|
89fc24889b34860982b551e5ea5e0d3550505f65
|
[
"MIT"
] | null | null | null |
import sys, token, tokenize
def do_file(fname):
""" Run on just one file.
"""
source = open(fname)
mod = open(fname + ",strip", "w")
prev_toktype = token.INDENT
first_line = None
last_lineno = -1
last_col = 0
tokgen = tokenize.generate_tokens(source.readline)
for toktype, ttext, (slineno, scol), (elineno, ecol), ltext in tokgen:
if 0: # Change to if 1 to see the tokens fly by.
print("%10s %-14s %-20r %r" % (
tokenize.tok_name.get(toktype, toktype),
"%d.%d-%d.%d" % (slineno, scol, elineno, ecol),
ttext, ltext
))
if slineno > last_lineno:
last_col = 0
if scol > last_col:
mod.write(" " * (scol - last_col))
if toktype == token.STRING and prev_toktype == token.INDENT:
# Docstring
mod.write("#--")
elif toktype == tokenize.COMMENT:
# Comment
mod.write("##\n")
else:
mod.write(ttext)
prev_toktype = toktype
last_col = ecol
last_lineno = elineno
if __name__ == '__main__':
do_file("api.py")
| 29.35
| 74
| 0.526405
|
19caab646cb8b36a1efe2dca2e9c4e8733305afc
| 269
|
py
|
Python
|
src/computational_design_and_fabrication/__init__.py
|
augmentedfabricationlab/computational_design_and_fabrication
|
3be054be8ff880391a2385277f0b8f2f0ef57786
|
[
"MIT"
] | null | null | null |
src/computational_design_and_fabrication/__init__.py
|
augmentedfabricationlab/computational_design_and_fabrication
|
3be054be8ff880391a2385277f0b8f2f0ef57786
|
[
"MIT"
] | null | null | null |
src/computational_design_and_fabrication/__init__.py
|
augmentedfabricationlab/computational_design_and_fabrication
|
3be054be8ff880391a2385277f0b8f2f0ef57786
|
[
"MIT"
] | null | null | null |
"""
Intro to project ...
Setup
=====
In order to use this library, ...
Main concepts
=============
Describe typical classes found in project
.. autoclass:: SampleClassName
:members:
"""
from .wall import Wall_ProjectTitle
__all__ = ['Wall_ProjectTitle']
| 10.346154
| 41
| 0.654275
|
5da357503b50f6ae9c8b0bdd4781e9f4d01f07ea
| 2,949
|
py
|
Python
|
silvaengine_resource/tests/test_silvaengine_resource.py
|
ideabosque/silvaengine_resouces
|
f67788008f845b61583907a6c539fde4af758266
|
[
"MIT"
] | null | null | null |
silvaengine_resource/tests/test_silvaengine_resource.py
|
ideabosque/silvaengine_resouces
|
f67788008f845b61583907a6c539fde4af758266
|
[
"MIT"
] | null | null | null |
silvaengine_resource/tests/test_silvaengine_resource.py
|
ideabosque/silvaengine_resouces
|
f67788008f845b61583907a6c539fde4af758266
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
__author__ = "bl"
import logging, sys, unittest, os
from silvaengine_utility import Utility
from dotenv import load_dotenv
load_dotenv()
setting = {
"region_name": os.getenv("region_name"),
"aws_access_key_id": os.getenv("aws_access_key_id"),
"aws_secret_access_key": os.getenv("aws_secret_access_key"),
}
sys.path.insert(0, "/var/www/projects/silvaengine_resouces")
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger()
from silvaengine_resource import Resource
class SilvaEngineResourceTest(unittest.TestCase):
def setUp(self):
self.resource = Resource(logger, **setting)
logger.info("Initiate SilvaEngineResourceTest ...")
def tearDown(self):
logger.info("Destory SilvaEngineResourceTest ...")
@unittest.skip("demonstrating skipping")
def test_add_resource(self):
logger.info(
self.resource.add_resource(
[
"analytics_engine",
"user_engine",
"shipping_quote_engine",
"seller_engine",
"factory_engine",
]
)
)
# @unittest.skip("demonstrating skipping")
def test_graphql_get_resource(self):
variables = {
"limit": 10,
"lastEvaluatedKey": {},
}
query = """
query resources($limit: Int!, $lastEvaluatedKey: JSON) {
resources(limit: $limit, lastEvaluatedKey: $lastEvaluatedKey) {
items {
resourceId
service
moduleName
className
function
label
status
createdAt
updatedAt
updatedBy
operations {
query {
label
action
}
mutation {
label
action
}
}
}
lastEvaluatedKey
}
}
"""
# variables = {
# "limit": 1,
# "lastEvaluatedKey": Utility.json_dumps(
# {
# "service": {"S": "subscription_management"},
# "resource_id": {"S": "053429072013b1fc6eeac9555cd4618b"},
# }
# ),
# }
payload = {"query": query, "variables": variables}
response = self.resource.resource_graphql(**payload)
logger.info(response)
if __name__ == "__main__":
unittest.main()
| 28.355769
| 79
| 0.47372
|
e1dbb89ce271de97647d3494aca05338270e11cc
| 13,642
|
py
|
Python
|
aries_cloudagent/messaging/credentials/routes.py
|
Patrik-Stas/aries-cloudagent-python
|
28f4b59b11db72568084070e4e6c1576c298f03d
|
[
"Apache-2.0"
] | null | null | null |
aries_cloudagent/messaging/credentials/routes.py
|
Patrik-Stas/aries-cloudagent-python
|
28f4b59b11db72568084070e4e6c1576c298f03d
|
[
"Apache-2.0"
] | null | null | null |
aries_cloudagent/messaging/credentials/routes.py
|
Patrik-Stas/aries-cloudagent-python
|
28f4b59b11db72568084070e4e6c1576c298f03d
|
[
"Apache-2.0"
] | null | null | null |
"""Connection handling admin routes."""
import asyncio
import json
from aiohttp import web
from aiohttp_apispec import docs, request_schema, response_schema
from marshmallow import fields, Schema
from ...holder.base import BaseHolder
from ...storage.error import StorageNotFoundError
from ...wallet.error import WalletNotFoundError
from ..connections.models.connection_record import ConnectionRecord
from .manager import CredentialManager
from .models.credential_exchange import CredentialExchange, CredentialExchangeSchema
class CredentialSendRequestSchema(Schema):
"""Request schema for sending a credential offer admin message."""
connection_id = fields.Str(required=True)
credential_definition_id = fields.Str(required=True)
credential_values = fields.Dict(required=False)
class CredentialSendResultSchema(Schema):
"""Result schema for sending a credential offer admin message."""
credential_id = fields.Str()
class CredentialOfferRequestSchema(Schema):
"""Request schema for sending a credential offer admin message."""
connection_id = fields.Str(required=True)
credential_definition_id = fields.Str(required=True)
class CredentialOfferResultSchema(Schema):
"""Result schema for sending a credential offer admin message."""
credential_id = fields.Str()
class CredentialRequestResultSchema(Schema):
"""Result schema for sending a credential request admin message."""
credential_id = fields.Str()
class CredentialIssueRequestSchema(Schema):
"""Request schema for sending a credential issue admin message."""
credential_values = fields.Dict(required=True)
class CredentialIssueResultSchema(Schema):
"""Result schema for sending a credential issue admin message."""
credential_id = fields.Str()
class CredentialExchangeListSchema(Schema):
"""Result schema for a credential exchange query."""
results = fields.List(fields.Nested(CredentialExchangeSchema()))
class CredentialSchema(Schema):
"""Result schema for a credential query."""
# properties undefined
class CredentialListSchema(Schema):
"""Result schema for a credential query."""
results = fields.List(fields.Nested(CredentialSchema()))
@docs(tags=["credentials"], summary="Fetch a credential from wallet by id")
@response_schema(CredentialSchema(), 200)
async def credentials_get(request: web.BaseRequest):
"""
Request handler for retrieving a credential.
Args:
request: aiohttp request object
Returns:
The credential response
"""
context = request.app["request_context"]
credential_id = request.match_info["id"]
holder: BaseHolder = await context.inject(BaseHolder)
try:
credential = await holder.get_credential(credential_id)
except WalletNotFoundError:
raise web.HTTPNotFound()
return web.json_response(credential)
@docs(tags=["credentials"], summary="Remove a credential from the wallet by id")
async def credentials_remove(request: web.BaseRequest):
"""
Request handler for searching connection records.
Args:
request: aiohttp request object
Returns:
The connection list response
"""
context = request.app["request_context"]
credential_id = request.match_info["id"]
holder: BaseHolder = await context.inject(BaseHolder)
try:
await holder.delete_credential(credential_id)
except WalletNotFoundError:
raise web.HTTPNotFound()
return web.json_response({})
@docs(
tags=["credentials"],
parameters=[
{
"name": "start",
"in": "query",
"schema": {"type": "string"},
"required": False,
},
{
"name": "count",
"in": "query",
"schema": {"type": "string"},
"required": False,
},
{"name": "wql", "in": "query", "schema": {"type": "string"}, "required": False},
],
summary="Fetch credentials from wallet",
)
@response_schema(CredentialListSchema(), 200)
async def credentials_list(request: web.BaseRequest):
"""
Request handler for searching credential records.
Args:
request: aiohttp request object
Returns:
The credential list response
"""
context = request.app["request_context"]
start = request.query.get("start")
count = request.query.get("count")
# url encoded json wql
encoded_wql = request.query.get("wql") or "{}"
wql = json.loads(encoded_wql)
# defaults
start = int(start) if isinstance(start, str) else 0
count = int(count) if isinstance(count, str) else 10
holder: BaseHolder = await context.inject(BaseHolder)
credentials = await holder.get_credentials(start, count, wql)
return web.json_response({"results": credentials})
@docs(tags=["credential_exchange"], summary="Fetch all credential exchange records")
@response_schema(CredentialExchangeListSchema(), 200)
async def credential_exchange_list(request: web.BaseRequest):
"""
Request handler for searching credential exchange records.
Args:
request: aiohttp request object
Returns:
The credential exchange list response
"""
context = request.app["request_context"]
tag_filter = {}
for param_name in (
"connection_id",
"initiator",
"state",
"credential_definition_id",
"schema_id",
):
if param_name in request.query and request.query[param_name] != "":
tag_filter[param_name] = request.query[param_name]
records = await CredentialExchange.query(context, tag_filter)
return web.json_response({"results": [record.serialize() for record in records]})
@docs(tags=["credential_exchange"], summary="Fetch a single credential exchange record")
@response_schema(CredentialExchangeSchema(), 200)
async def credential_exchange_retrieve(request: web.BaseRequest):
"""
Request handler for fetching a single credential exchange record.
Args:
request: aiohttp request object
Returns:
The credential exchange record response
"""
context = request.app["request_context"]
credential_exchange_id = request.match_info["id"]
try:
record = await CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
except StorageNotFoundError:
raise web.HTTPNotFound()
return web.json_response(record.serialize())
@docs(
tags=["credential_exchange"],
summary="Sends a credential and automates the entire flow",
)
@request_schema(CredentialSendRequestSchema())
@response_schema(CredentialSendResultSchema(), 200)
async def credential_exchange_send(request: web.BaseRequest):
"""
Request handler for sending a credential offer.
Args:
request: aiohttp request object
Returns:
The credential offer details.
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
body = await request.json()
connection_id = body.get("connection_id")
credential_definition_id = body.get("credential_definition_id")
credential_values = body.get("credential_values")
credential_manager = CredentialManager(context)
connection_record = await ConnectionRecord.retrieve_by_id(context, connection_id)
if not connection_record.is_active:
raise web.HTTPForbidden()
credential_exchange_record = await credential_manager.prepare_send(
credential_definition_id, connection_id, credential_values
)
asyncio.ensure_future(
credential_manager.perform_send(credential_exchange_record, outbound_handler)
)
return web.json_response(credential_exchange_record.serialize())
@docs(tags=["credential_exchange"], summary="Sends a credential offer")
@request_schema(CredentialOfferRequestSchema())
@response_schema(CredentialOfferResultSchema(), 200)
async def credential_exchange_send_offer(request: web.BaseRequest):
"""
Request handler for sending a credential offer.
Args:
request: aiohttp request object
Returns:
The credential offer details.
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
body = await request.json()
connection_id = body.get("connection_id")
credential_definition_id = body.get("credential_definition_id")
credential_manager = CredentialManager(context)
connection_record = await ConnectionRecord.retrieve_by_id(context, connection_id)
if not connection_record.is_active:
raise web.HTTPForbidden()
credential_exchange_record = await credential_manager.create_offer(
credential_definition_id, connection_id
)
(
credential_exchange_record,
credential_offer_message,
) = await credential_manager.offer_credential(credential_exchange_record)
await outbound_handler(credential_offer_message, connection_id=connection_id)
return web.json_response(credential_exchange_record.serialize())
@docs(tags=["credential_exchange"], summary="Sends a credential request")
@response_schema(CredentialRequestResultSchema(), 200)
async def credential_exchange_send_request(request: web.BaseRequest):
"""
Request handler for sending a credential request.
Args:
request: aiohttp request object
Returns:
The credential request details.
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
credential_exchange_id = request.match_info["id"]
credential_exchange_record = await CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
connection_id = credential_exchange_record.connection_id
assert credential_exchange_record.state == CredentialExchange.STATE_OFFER_RECEIVED
credential_manager = CredentialManager(context)
connection_record = await ConnectionRecord.retrieve_by_id(context, connection_id)
if not connection_record.is_active:
raise web.HTTPForbidden()
(
credential_exchange_record,
credential_request_message,
) = await credential_manager.create_request(
credential_exchange_record, connection_record
)
await outbound_handler(credential_request_message, connection_id=connection_id)
return web.json_response(credential_exchange_record.serialize())
@docs(tags=["credential_exchange"], summary="Sends a credential")
@request_schema(CredentialIssueRequestSchema())
@response_schema(CredentialIssueResultSchema(), 200)
async def credential_exchange_issue(request: web.BaseRequest):
"""
Request handler for sending a credential.
Args:
request: aiohttp request object
Returns:
The credential details.
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
body = await request.json()
credential_values = body["credential_values"]
credential_exchange_id = request.match_info["id"]
credential_exchange_record = await CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
connection_id = credential_exchange_record.connection_id
assert credential_exchange_record.state == CredentialExchange.STATE_REQUEST_RECEIVED
credential_manager = CredentialManager(context)
connection_record = await ConnectionRecord.retrieve_by_id(context, connection_id)
if not connection_record.is_active:
raise web.HTTPForbidden()
credential_exchange_record.credential_values = credential_values
(
credential_exchange_record,
credential_issue_message,
) = await credential_manager.issue_credential(credential_exchange_record)
await outbound_handler(credential_issue_message, connection_id=connection_id)
return web.json_response(credential_exchange_record.serialize())
@docs(
tags=["credential_exchange"],
summary="Remove an existing credential exchange record",
)
async def credential_exchange_remove(request: web.BaseRequest):
"""
Request handler for removing a credential exchange record.
Args:
request: aiohttp request object
"""
context = request.app["request_context"]
credential_exchange_id = request.match_info["id"]
try:
credential_exchange_id = request.match_info["id"]
credential_exchange_record = await CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
except StorageNotFoundError:
raise web.HTTPNotFound()
await credential_exchange_record.delete_record(context)
return web.json_response({})
async def register(app: web.Application):
"""Register routes."""
app.add_routes(
[
web.get("/credential/{id}", credentials_get),
web.post("/credential/{id}/remove", credentials_remove),
web.get("/credentials", credentials_list),
web.get("/credential_exchange", credential_exchange_list),
web.get("/credential_exchange/{id}", credential_exchange_retrieve),
web.post("/credential_exchange/send", credential_exchange_send),
web.post("/credential_exchange/send-offer", credential_exchange_send_offer),
web.post(
"/credential_exchange/{id}/send-request",
credential_exchange_send_request,
),
web.post("/credential_exchange/{id}/issue", credential_exchange_issue),
web.post("/credential_exchange/{id}/remove", credential_exchange_remove),
]
)
| 30.181416
| 88
| 0.718077
|
b4201caa7eb7df7755c46ac5e63f4c7a7c0390f7
| 4,169
|
py
|
Python
|
benchmark/startQiskit_QC2414.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_QC2414.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_QC2414.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=32
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.x(input_qubit[3]) # number=1
prog.h(input_qubit[0]) # number=18
prog.x(input_qubit[1]) # number=28
prog.cz(input_qubit[3],input_qubit[0]) # number=19
prog.h(input_qubit[2]) # number=24
prog.h(input_qubit[0]) # number=20
prog.rx(-1.8378317023500288,input_qubit[1]) # number=25
prog.z(input_qubit[3]) # number=14
prog.cx(input_qubit[3],input_qubit[0]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[3]) # number=16
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[3]) # number=29
prog.cz(input_qubit[0],input_qubit[3]) # number=30
prog.h(input_qubit[3]) # number=31
prog.x(input_qubit[3]) # number=22
prog.cx(input_qubit[0],input_qubit[3]) # number=23
prog.z(input_qubit[1]) # number=26
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[2]) # number=12
prog.z(input_qubit[1]) # number=27
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2414.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.741667
| 165
| 0.654354
|
33fb618fc0715ad0fd7981934de9f8ae5738ee78
| 90,084
|
py
|
Python
|
src/rbfopt/rbfopt_test_functions.py
|
aerometu/rbfopt
|
4aba6186aa7d49c10551601d77e2484f88ffee39
|
[
"BSD-3-Clause"
] | 1
|
2020-03-07T08:26:13.000Z
|
2020-03-07T08:26:13.000Z
|
src/rbfopt/rbfopt_test_functions.py
|
aerometu/rbfopt
|
4aba6186aa7d49c10551601d77e2484f88ffee39
|
[
"BSD-3-Clause"
] | null | null | null |
src/rbfopt/rbfopt_test_functions.py
|
aerometu/rbfopt
|
4aba6186aa7d49c10551601d77e2484f88ffee39
|
[
"BSD-3-Clause"
] | null | null | null |
"""Test functions.
This module implements several known mathematical functions, that can
be used to test RBFOpt.
Licensed under Revised BSD license, see LICENSE.
(C) Copyright Singapore University of Technology and Design 2014.
(C) Copyright International Business Machines Corporation 2017.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import sys
import math
import numpy as np
from rbfopt.rbfopt_black_box import RbfoptBlackBox
class branin:
"""
Branin function of the Dixon-Szego test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==2)
value = ((x[1] - (5.1/(4*math.pi*math.pi))*x[0]*x[0] +
5/math.pi*x[0] - 6)**2 + 10*(1-1/(8*math.pi)) *
math.cos(x[0]) +10)
return(value)
dimension = 2
var_lower = np.array([-5, 0])
var_upper = np.array([10, 15])
optimum_point = np.array([9.42477796, 2.47499998])
additional_optima = np.array([ [-3.14159265, 12.27500000],
[3.14159265, 2.27500000] ])
optimum_value = 0.397887357729739
var_type = np.array(['R'] * 2)
# -- end class
class hartman3:
"""
Hartman3 function of the Dixon-Szego test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==3)
a = [ [3.0, 0.1, 3.0, 0.1],
[10.0, 10.0, 10.0, 10.0],
[30.0, 35.0, 30.0, 35.0] ]
p = [ [0.36890, 0.46990, 0.10910, 0.03815],
[0.11700, 0.43870, 0.87320, 0.57430],
[0.26730, 0.74700, 0.55470, 0.88280] ]
c = [1.0, 1.2, 3.0, 3.2]
value = -math.fsum([ c[i] *
math.exp(-math.fsum([a[j][i]*(x[j] - p[j][i])**2
for j in range(3)]))
for i in range(4) ])
return(value)
dimension = 3
var_lower = np.array([0, 0, 0])
var_upper = np.array([1, 1, 1])
optimum_point = np.array([0.1, 0.55592003, 0.85218259])
optimum_value = -3.8626347486217725
var_type = np.array(['R'] * 3)
# -- end class
class hartman6:
"""
Hartman6 function of the Dixon-Szego test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==6)
a = [ [10.00, 0.05, 3.00, 17.00],
[3.00, 10.00, 3.50, 8.00],
[17.00, 17.00, 1.70, 0.05],
[3.50, 0.10, 10.00, 10.00],
[1.70, 8.00, 17.00, 0.10],
[8.00, 14.00, 8.00, 14.00] ]
p = [ [0.1312, 0.2329, 0.2348, 0.4047],
[0.1696, 0.4135, 0.1451, 0.8828],
[0.5569, 0.8307, 0.3522, 0.8732],
[0.0124, 0.3736, 0.2883, 0.5743],
[0.8283, 0.1004, 0.3047, 0.1091],
[0.5886, 0.9991, 0.6650, 0.0381] ]
c = [1.0, 1.2, 3.0, 3.2]
value = -math.fsum([ c[i] *
math.exp(-math.fsum([a[j][i]*(x[j] - p[j][i])**2
for j in range(6)]))
for i in range(4) ])
return(value)
dimension = 6
var_lower = np.array([0, 0, 0, 0, 0, 0])
var_upper = np.array([1, 1, 1, 1, 1, 1])
optimum_point = np.array([0.20168952, 0.15001069, 0.47687398,
0.27533243, 0.31165162, 0.65730054])
optimum_value = -3.32236801141551
var_type = np.array(['R'] * 6)
# -- end class
class camel:
"""
Six-hump Camel function of the Dixon-Szego test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==2)
value = ((4 - 2.1*x[0]**2 + x[0]**4/3)*x[0]**2 +
x[0]*x[1] + (-4 + 4*x[1]**2)*x[1]**2)
return(value)
dimension = 2
var_lower = np.array([-3, -2])
var_upper = np.array([3, 2])
optimum_point = np.array([0.08984201, -0.7126])
optimum_value = -1.0316284535
var_type = np.array(['R'] * 2)
# -- end class
class goldsteinprice:
"""
Goldstein & Price function of the Dixon-Szego test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==2)
value= ((1 + (x[0] + x[1] + 1)**2 *
(19 - 14*x[0] + 3*x[0]**2 - 14*x[1] + 6*x[0]*x[1] +
3*x[1]**2)) *
(30 + (2*x[0] - 3*x[1])**2 *
(18 - 32*x[0] + 12*x[0]**2 + 48*x[1] - 36*x[0]*x[1] +
27*x[1]**2)))
return(value)
dimension = 2
var_lower = np.array([-2, -2])
var_upper = np.array([2, 2])
optimum_point = np.array([0.0, -1.0])
optimum_value = 3
var_type = np.array(['R'] * 2)
# -- end class
class shekel5:
"""
Shekel5 function of the Dixon-Szego test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==4)
a = [ [4.0, 1.0, 8.0, 6.0, 3.0],
[4.0, 1.0, 8.0, 6.0, 7.0],
[4.0, 1.0, 8.0, 6.0, 3.0],
[4.0, 1.0, 8.0, 6.0, 7.0] ]
c = [0.1, 0.2, 0.2, 0.4, 0.4]
value = -math.fsum([ 1.0 /
(math.fsum([math.fsum([ (x[i] - a[i][j])**2
for i in range(4) ]),
c[j]])) for j in range(5) ])
return(value)
dimension = 4
var_lower = np.array([0, 0, 0, 0])
var_upper = np.array([10, 10, 10, 10])
optimum_point = np.array([4, 4, 4, 4])
optimum_value = -10.1531958509790
var_type = np.array(['R'] * 4)
# -- end class
class shekel7:
"""
Shekel7 function of the Dixon-Szego test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==4)
a = [ [4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 5.0],
[4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 5.0],
[4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 3.0],
[4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 3.0] ]
c = [0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3]
value = -math.fsum([ 1.0 /
(math.fsum([math.fsum([ (x[i] - a[i][j])**2
for i in range(4) ]),
c[j]])) for j in range(7) ])
return(value)
dimension = 4
var_lower = np.array([0, 0, 0, 0])
var_upper = np.array([10, 10, 10, 10])
optimum_point = np.array([4, 4, 4, 4])
optimum_value = -10.4028188369303
var_type = np.array(['R'] * 4)
# -- end class
class shekel10:
"""
Shekel10 function of the Dixon-Szego test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==4)
a = [ [4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 5.0, 8.0, 6.0, 7.0],
[4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 5.0, 1.0, 2.0, 3.6],
[4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 3.0, 8.0, 6.0, 7.0],
[4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 3.0, 1.0, 2.0, 3.6] ]
c = [0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3, 0.7, 0.5, 0.5]
value = -math.fsum([ 1.0 /
(math.fsum([math.fsum([ (x[i] - a[i][j])**2
for i in range(4) ]),
c[j]])) for j in range(10) ])
return(value)
dimension = 4
var_lower = np.array([0, 0, 0, 0])
var_upper = np.array([10, 10, 10, 10])
optimum_point = np.array([4, 4, 4, 4])
optimum_value = -10.53628372621960
var_type = np.array(['R'] * 4)
# -- end class
class ex4_1_1:
"""
ex4_1_1 function of the GlobalLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==1)
value = (x[0]**6 - (52.0/25.0)*x[0]**5 + (39.0/80.0)*x[0]**4 +
(71.0/10.0)*x[0]**3 - (79.0/20.0)*x[0]**2 - x[0] +
1.0/10.0)
return(value)
dimension = 1
var_lower = np.array([-2])
var_upper = np.array([11])
optimum_point = np.array([-1.19131])
optimum_value = -7.487312360731
var_type = np.array(['R'])
# -- end class
class ex4_1_2:
"""
ex4_1_2 function of the GlobalLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==1)
a = [-500, 2.5, 1.666666666, 1.25, 1.0, 0.8333333, 0.714285714,
0.625, 0.555555555, 1.0, -43.6363636, 0.41666666, 0.384615384,
0.357142857, 0.3333333, 0.3125, 0.294117647, 0.277777777,
0.263157894, 0.25, 0.238095238, 0.227272727, 0.217391304,
0.208333333, 0.2, 0.192307692, 0.185185185, 0.178571428,
0.344827586, 0.6666666, -15.48387097, 0.15625, 0.1515151,
0.14705882, 0.14285712, 0.138888888, 0.135135135, 0.131578947,
0.128205128, 0.125, 0.121951219, 0.119047619, 0.116279069,
0.113636363, 0.1111111, 0.108695652, 0.106382978, 0.208333333,
0.408163265, 0.8]
value = math.fsum([a[i]*x[0]**(i+1) for i in range(50)])
return(value)
dimension = 1
var_lower = np.array([1])
var_upper = np.array([2])
optimum_point = np.array([1.09106])
optimum_value = -663.4993631230575
var_type = np.array(['R'] * 1)
# -- end class
class ex8_1_1:
"""
ex8_1_1 function of the GlobalLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==2)
value = math.cos(x[0])*math.sin(x[1]) - x[0]/(x[1]**2+1)
return(value)
dimension = 2
var_lower = np.array([-1, -1])
var_upper = np.array([2, 1])
optimum_point = np.array([2.0, 0.105783])
optimum_value = -2.0218067833
var_type = np.array(['R'] * 2)
# -- end class
class ex8_1_4:
"""
ex8_1_4 function of the GlobalLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==2)
value = 12*x[0]**2-6.3*x[0]**4+x[0]**6-6*x[0]*x[1]+6*x[1]**2
return(value)
dimension = 2
var_lower = np.array([-2, -5])
var_upper = np.array([4, 2])
optimum_point = np.array([0.0, 0.0])
optimum_value = 0.0
var_type = np.array(['R'] * 2)
# -- end class
class least:
"""
least function of the GlobalLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==3)
value = ((127 + (-x[1]*math.exp(-5*x[2])) - x[0])**2 +
(151 + (-x[1]*math.exp(-3*x[2])) - x[0])**2 +
(379 + (-x[1]*math.exp(-x[2])) - x[0])**2 +
(421 + (-x[1]*math.exp(5*x[2])) - x[0])**2 +
(460 + (-x[1]*math.exp(3*x[2])) - x[0])**2 +
(426 + (-x[1]*math.exp(x[2])) - x[0])**2)
return(value)
dimension = 3
var_lower = np.array([0, -200, -5])
var_upper = np.array([600, 200, 5] )
optimum_point = np.array([516.651174172, -149.351893696, -0.206642767973])
optimum_value = 14085.139848928
var_type = np.array(['R'] * 3)
# -- end class
class rbrock:
"""
rbrock function of the GlobalLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==2)
value = (100*(x[1] - x[0]**2)**2 + (1 - x[0])**2)
return(value)
dimension = 2
var_lower = np.array([-10, -10])
var_upper = np.array([5, 10])
optimum_point = np.array([1.0, 1.0])
optimum_value = 0.0
var_type = np.array(['R'] * 2)
# -- end class
class perm_6:
"""
perm function of dimension 6 from Arnold Neumaier.
http://www.mat.univie.ac.at/~neum/glopt/my_problems.html
We use parameters (6, 60) here.
"""
@staticmethod
def evaluate(x):
assert(len(x)==6)
beta = 60
value = math.fsum([ (math.fsum([((i + 1)**k + beta) *
((x[i]/(i+1))**k - 1)
for i in range(6)]))**2
for k in range(6) ]) + 1000
return(value)
dimension = 6
var_lower = np.array([-6 for i in range(6)])
var_upper = np.array([6 for i in range(6)])
optimum_point = np.array([(i+1) for i in range(6)])
optimum_value = 1000.0
var_type = np.array(['R'] * 6)
# -- end class
class perm0_8:
"""
perm0 function of dimension 8 from Arnold Neumaier.
http://www.mat.univie.ac.at/~neum/glopt/my_problems.html
We use parameters (8, 100) here.
"""
@staticmethod
def evaluate(x):
assert(len(x)==8)
beta = 100
value = math.fsum([ (math.fsum([(i + 1 + beta) *
(x[i]**k - (1/(i+1))**k)
for i in range(8)]))**2
for k in range(8) ]) + 1000
return(value)
dimension = 8
var_lower = np.array([-1 for i in range(8)])
var_upper = np.array([1 for i in range(8)])
optimum_point = np.array([1.0/(i+1) for i in range(8)])
optimum_value = 1000.0
var_type = np.array(['R'] * 8)
# -- end class
class schoen_6_1:
"""
schoen function of dimension 6 with 50 stationary points.
"""
@staticmethod
def evaluate(x):
assert(len(x)==6)
z = [[0.298854, 0.181010, 0.984817, 0.125272, 0.548396, 0.894658],
[0.800371, 0.817380, 0.398577, 0.652349, 0.250843, 0.130235],
[0.268631, 0.929778, 0.640422, 0.462004, 0.492930, 0.434955],
[0.257863, 0.729198, 0.210810, 0.364378, 0.228216, 0.947432],
[0.767627, 0.592150, 0.103788, 0.696895, 0.472449, 0.244504],
[0.369630, 0.110889, 0.072344, 0.515753, 0.068087, 0.103057],
[0.425457, 0.807081, 0.491209, 0.449497, 0.065690, 0.592775],
[0.544229, 0.619841, 0.704609, 0.573098, 0.044844, 0.305800],
[0.164031, 0.722884, 0.670496, 0.517915, 0.176386, 0.921565],
[0.153788, 0.703577, 0.899129, 0.406134, 0.941356, 0.538215],
[0.984781, 0.510479, 0.573361, 0.884599, 0.399472, 0.712935],
[0.488416, 0.403997, 0.888823, 0.048434, 0.265197, 0.478025],
[0.047985, 0.280071, 0.709960, 0.278919, 0.035737, 0.037699],
[0.656172, 0.498412, 0.458622, 0.982970, 0.041234, 0.921127],
[0.590802, 0.359690, 0.396516, 0.338153, 0.320793, 0.847369],
[0.649160, 0.846974, 0.451818, 0.064864, 0.818545, 0.955844],
[0.583716, 0.669610, 0.463098, 0.492710, 0.989690, 0.002397],
[0.097300, 0.112389, 0.128759, 0.182995, 0.262808, 0.701887],
[0.487363, 0.892520, 0.269056, 0.116046, 0.905416, 0.808013],
[0.908316, 0.023997, 0.670399, 0.985859, 0.178548, 0.450410],
[0.230409, 0.381732, 0.613667, 0.697260, 0.016950, 0.736507],
[0.132544, 0.526349, 0.650042, 0.084086, 0.979257, 0.771499],
[0.872978, 0.008826, 0.587481, 0.624637, 0.623175, 0.939539],
[0.447828, 0.836386, 0.223285, 0.422756, 0.344488, 0.555953],
[0.546839, 0.153934, 0.953017, 0.640891, 0.666774, 0.647583],
[0.762237, 0.608920, 0.401447, 0.056202, 0.203535, 0.890609],
[0.655150, 0.444544, 0.495582, 0.247926, 0.155128, 0.188004],
[0.481813, 0.387178, 0.597276, 0.634671, 0.285404, 0.714793],
[0.976385, 0.018854, 0.262585, 0.640434, 0.086314, 0.669879],
[0.120164, 0.882300, 0.057626, 0.695111, 0.735135, 0.004711],
[0.414644, 0.715618, 0.642033, 0.770645, 0.407019, 0.502945],
[0.257475, 0.620029, 0.840603, 0.638546, 0.636521, 0.883558],
[0.788980, 0.374926, 0.448016, 0.081941, 0.225763, 0.944905],
[0.661591, 0.178832, 0.790349, 0.141653, 0.424235, 0.571960],
[0.546361, 0.624907, 0.190470, 0.412713, 0.124748, 0.662788],
[0.226384, 0.065829, 0.960836, 0.767766, 0.089695, 0.441792],
[0.303675, 0.370047, 0.973692, 0.830432, 0.424719, 0.173571],
[0.548375, 0.823234, 0.334253, 0.078398, 0.097269, 0.195120],
[0.646225, 0.100478, 0.723833, 0.891035, 0.386094, 0.360272],
[0.362757, 0.114700, 0.731020, 0.783785, 0.250399, 0.244399],
[0.904335, 0.869074, 0.479004, 0.525872, 0.359411, 0.338333],
[0.563175, 0.245903, 0.694417, 0.833524, 0.205055, 0.132535],
[0.401356, 0.920963, 0.401902, 0.120625, 0.765834, 0.381552],
[0.769562, 0.279591, 0.567598, 0.017192, 0.697366, 0.813451],
[0.738572, 0.984740, 0.007616, 0.005382, 0.592976, 0.771773],
[0.683721, 0.824097, 0.731623, 0.936945, 0.182420, 0.393537],
[0.375859, 0.541929, 0.974640, 0.377459, 0.754060, 0.019335],
[0.410275, 0.619158, 0.148428, 0.419225, 0.637412, 0.204038],
[0.552701, 0.472723, 0.491747, 0.017922, 0.198525, 0.074668],
[0.749510, 0.158720, 0.395476, 0.528285, 0.143614, 0.961610]]
f = [-1000, -1000, -1000, 672.2, 861.4, 520.9, 121.0, 11.5, 48.2,
702.4, 536.2, 457.7, 801.3, 787.7, 768.6, 292.4, 960.0, 573.1,
303.7, 283.3, 474.1, 216.9, 462.2, 853.6, 677.1, 464.6, 830.6,
831.8, 109.6, 967.6, 122.9, 896.2, 490.2, 710.4, 81.1, 802.9,
999.8, 945.5, 672.3, 712.9, 235.8, 266.5, 772.4, 326.6, 585.5,
16.9, 135.9, 224.2, 382.1, 614.6]
numerator = 0.0
denominator = 0.0
for i in range(50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= math.fsum([ (x[k] - z[j][k])**2
for k in range(6) ])
numerator += f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
dimension = 6
var_lower = np.array([0 for i in range(6)])
var_upper = np.array([1 for i in range(6)])
optimum_point = np.array([0.298854, 0.181010, 0.984817,
0.125272, 0.548396, 0.894658])
optimum_value = -1000
var_type = np.array(['R'] * 6)
# -- end class
class schoen_6_2:
"""
schoen function of dimension 6 with 50 stationary points.
"""
@staticmethod
def evaluate(x):
assert(len(x)==6)
z = [[0.669711, 0.815540, 0.646120, 0.377447, 0.111538, 0.040529],
[0.000632, 0.706804, 0.857031, 0.473778, 0.993569, 0.616184],
[0.625617, 0.880221, 0.534547, 0.760235, 0.276998, 0.735438],
[0.774577, 0.922914, 0.947791, 0.315328, 0.414841, 0.785803],
[0.079768, 0.131498, 0.225123, 0.464621, 0.638041, 0.992795],
[0.471038, 0.244503, 0.565776, 0.898397, 0.604639, 0.306230],
[0.642233, 0.482219, 0.034943, 0.934805, 0.972714, 0.153664],
[0.550151, 0.310507, 0.042126, 0.230722, 0.444375, 0.117355],
[0.789984, 0.488482, 0.065237, 0.842940, 0.793454, 0.799489],
[0.850183, 0.754551, 0.516033, 0.166362, 0.201966, 0.044234],
[0.000601, 0.896758, 0.304433, 0.149125, 0.178398, 0.871836],
[0.056787, 0.932745, 0.218009, 0.778061, 0.131847, 0.356237],
[0.210266, 0.221479, 0.014831, 0.200901, 0.656693, 0.891819],
[0.528515, 0.178025, 0.188138, 0.411485, 0.217833, 0.907579],
[0.195801, 0.663099, 0.477312, 0.395250, 0.655791, 0.820570],
[0.933208, 0.789323, 0.350520, 0.855434, 0.491082, 0.874993],
[0.251047, 0.543513, 0.529644, 0.218495, 0.351637, 0.608904],
[0.963286, 0.793004, 0.650148, 0.881362, 0.904832, 0.005397],
[0.431744, 0.438965, 0.044544, 0.834968, 0.330614, 0.451282],
[0.234845, 0.328576, 0.388284, 0.339183, 0.206086, 0.600034],
[0.512783, 0.961787, 0.959109, 0.632098, 0.910614, 0.912025],
[0.454168, 0.743189, 0.834284, 0.955817, 0.072172, 0.523068],
[0.696968, 0.720236, 0.341060, 0.054580, 0.045599, 0.549192],
[0.272955, 0.318845, 0.700767, 0.426325, 0.895755, 0.843128],
[0.992189, 0.332899, 0.272784, 0.019284, 0.073711, 0.434800],
[0.154276, 0.639611, 0.924641, 0.587242, 0.358453, 0.548022],
[0.021506, 0.450392, 0.515150, 0.032232, 0.650223, 0.849384],
[0.316499, 0.513234, 0.958219, 0.843587, 0.125408, 0.836643],
[0.538587, 0.261750, 0.732136, 0.030271, 0.893345, 0.270532],
[0.987469, 0.708780, 0.446487, 0.968784, 0.734448, 0.788229],
[0.353358, 0.135036, 0.249018, 0.565029, 0.740519, 0.250807],
[0.810372, 0.656510, 0.472093, 0.225741, 0.420513, 0.202519],
[0.848128, 0.551586, 0.513140, 0.956164, 0.483389, 0.404478],
[0.292239, 0.297077, 0.934202, 0.468329, 0.872274, 0.992632],
[0.828869, 0.534749, 0.716451, 0.405855, 0.164485, 0.531068],
[0.130616, 0.757677, 0.284500, 0.438300, 0.957643, 0.725899],
[0.503542, 0.640368, 0.381914, 0.847206, 0.134660, 0.762294],
[0.653851, 0.646544, 0.436036, 0.944225, 0.310369, 0.392362],
[0.539397, 0.027168, 0.697972, 0.209293, 0.992890, 0.008113],
[0.902045, 0.171034, 0.194924, 0.620057, 0.002203, 0.557433],
[0.802612, 0.085835, 0.380626, 0.492568, 0.238166, 0.961837],
[0.466993, 0.647847, 0.113397, 0.015357, 0.928904, 0.166425],
[0.892021, 0.869756, 0.681364, 0.129555, 0.394682, 0.745036],
[0.060675, 0.869904, 0.757236, 0.220765, 0.615988, 0.754288],
[0.031815, 0.340961, 0.455958, 0.529616, 0.840036, 0.365200],
[0.834595, 0.603639, 0.745330, 0.085080, 0.184636, 0.238718],
[0.575681, 0.250761, 0.874497, 0.870401, 0.854591, 0.968971],
[0.359629, 0.724830, 0.455053, 0.120311, 0.258563, 0.932004],
[0.209891, 0.990298, 0.767661, 0.284193, 0.375076, 0.154363],
[0.410402, 0.437385, 0.639614, 0.946647, 0.579466, 0.524775]]
f = [-1000, -1000, -1000, 109.6, 132.4, 558.2, 158.0, 6.2, 205.4,
593.9, 2.4, 399.8, 395.9, 212.6, 976.1, 104.4, 552.1, 436.3,
837.1, 283.7, 779.7, 392.1, 85.8, 885.1, 401.5, 367.5, 694.4,
691.6, 933.1, 590.7, 246.2, 370.0, 54.3, 719.4, 95.2, 276.0,
829.1, 613.6, 242.8, 424.6, 320.6, 666.1, 479.2, 420.0, 956.6,
241.0, 21.1, 169.8, 178.1, 394.4]
numerator = 0.0
denominator = 0.0
for i in range(50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= math.fsum([ (x[k] - z[j][k])**2
for k in range(6) ])
numerator += f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
dimension = 6
var_lower = np.array([0 for i in range(6)])
var_upper = np.array([1 for i in range(6)])
optimum_point = np.array([0.669711, 0.815540, 0.646120,
0.377447, 0.111538, 0.040529])
optimum_value = -1000
var_type = np.array(['R'] * 6)
# -- end class
class schoen_10_1:
"""
schoen function of dimension 10 with 50 stationary points.
"""
@staticmethod
def evaluate(x):
assert(len(x)==10)
z = [[0.914871, 0.765230, 0.139426, 0.617466, 0.823635,
0.794003, 0.801171, 0.568811, 0.279434, 0.540422],
[0.976983, 0.593277, 0.701115, 0.585262, 0.669106,
0.272906, 0.177127, 0.143389, 0.561181, 0.018744],
[0.385208, 0.984106, 0.390066, 0.905970, 0.169600,
0.191291, 0.564157, 0.689910, 0.857031, 0.715390],
[0.975998, 0.536904, 0.819333, 0.801793, 0.564454,
0.336124, 0.654190, 0.044197, 0.717416, 0.465807],
[0.750519, 0.415284, 0.258927, 0.736115, 0.597744,
0.763716, 0.747691, 0.969633, 0.188117, 0.964954],
[0.412888, 0.671756, 0.380214, 0.558595, 0.768370,
0.998320, 0.212183, 0.606757, 0.531315, 0.303569],
[0.196682, 0.139879, 0.108608, 0.736975, 0.755971,
0.021390, 0.852398, 0.188596, 0.920133, 0.045012],
[0.956270, 0.729258, 0.397664, 0.013146, 0.519861,
0.300011, 0.008396, 0.820346, 0.176841, 0.402298],
[0.126432, 0.872346, 0.923581, 0.297492, 0.992744,
0.486525, 0.915493, 0.589980, 0.498242, 0.989945],
[0.697409, 0.026641, 0.875467, 0.503039, 0.563285,
0.096769, 0.933643, 0.884419, 0.585825, 0.395465],
[0.494783, 0.824300, 0.153326, 0.202651, 0.579815,
0.416954, 0.707624, 0.497959, 0.568876, 0.812841],
[0.126963, 0.757337, 0.648583, 0.787445, 0.822586,
0.401155, 0.301350, 0.562707, 0.744074, 0.088372],
[0.293611, 0.835864, 0.925111, 0.760322, 0.729456,
0.096840, 0.651466, 0.975836, 0.691353, 0.038384],
[0.999250, 0.916829, 0.205699, 0.027241, 0.156956,
0.206598, 0.175242, 0.811219, 0.660192, 0.119865],
[0.387978, 0.665180, 0.774376, 0.135223, 0.766238,
0.380668, 0.058279, 0.727506, 0.991527, 0.345759],
[0.299341, 0.066231, 0.680305, 0.392230, 0.319985,
0.698292, 0.100236, 0.394973, 0.096232, 0.362943],
[0.281548, 0.860858, 0.647870, 0.981650, 0.110777,
0.836484, 0.697387, 0.659942, 0.694425, 0.434991],
[0.606706, 0.052287, 0.858208, 0.738885, 0.158495,
0.002367, 0.933796, 0.112986, 0.647308, 0.421573],
[0.776505, 0.101364, 0.610406, 0.275033, 0.548409,
0.998967, 0.536743, 0.943903, 0.960993, 0.251672],
[0.371347, 0.491122, 0.772374, 0.860206, 0.752131,
0.338591, 0.826739, 0.312111, 0.768881, 0.862719],
[0.866886, 0.358220, 0.131205, 0.276334, 0.334111,
0.429525, 0.752197, 0.167524, 0.437764, 0.162916],
[0.584246, 0.511215, 0.659647, 0.349220, 0.954428,
0.477982, 0.386041, 0.813944, 0.753530, 0.983276],
[0.697327, 0.499835, 0.530487, 0.599958, 0.497257,
0.998852, 0.106262, 0.186978, 0.887481, 0.749174],
[0.041611, 0.278918, 0.999095, 0.825221, 0.218320,
0.383711, 0.077041, 0.642061, 0.668906, 0.758298],
[0.072437, 0.592862, 0.040655, 0.446330, 0.651659,
0.055738, 0.631924, 0.890039, 0.192989, 0.741054],
[0.533886, 0.135079, 0.787647, 0.593408, 0.749228,
0.749045, 0.190386, 0.755508, 0.465321, 0.465156],
[0.748843, 0.696419, 0.882124, 0.843895, 0.858057,
0.220107, 0.350310, 0.102947, 0.453576, 0.875940],
[0.560231, 0.580247, 0.381834, 0.807535, 0.184636,
0.615702, 0.628408, 0.081783, 0.793384, 0.233639],
[0.384827, 0.589138, 0.630013, 0.634506, 0.630712,
0.521293, 0.494486, 0.681700, 0.288512, 0.319808],
[0.721978, 0.452289, 0.426726, 0.323106, 0.781584,
0.999325, 0.043670, 0.884560, 0.520936, 0.430684],
[0.810388, 0.624041, 0.811624, 0.105973, 0.199807,
0.440644, 0.864152, 0.282280, 0.397116, 0.499932],
[0.973889, 0.677797, 0.080137, 0.549098, 0.625445,
0.577342, 0.538642, 0.388039, 0.552273, 0.793807],
[0.365176, 0.228017, 0.623500, 0.084450, 0.177343,
0.910108, 0.632719, 0.521458, 0.894843, 0.707893],
[0.502069, 0.622312, 0.958019, 0.744999, 0.515695,
0.407885, 0.590739, 0.736542, 0.297555, 0.237955],
[0.313835, 0.090014, 0.336274, 0.433171, 0.330864,
0.105751, 0.160367, 0.651934, 0.207260, 0.293577],
[0.886072, 0.592935, 0.498116, 0.321835, 0.011216,
0.543911, 0.506579, 0.216779, 0.406812, 0.261349],
[0.789947, 0.881332, 0.696597, 0.742955, 0.252224,
0.718157, 0.188217, 0.371208, 0.178640, 0.347720],
[0.482759, 0.663618, 0.622706, 0.036170, 0.278854,
0.088147, 0.482808, 0.134824, 0.028828, 0.944537],
[0.184705, 0.662346, 0.917194, 0.186490, 0.918392,
0.955111, 0.636015, 0.447595, 0.813716, 0.372839],
[0.231741, 0.637199, 0.745257, 0.201568, 0.697485,
0.897022, 0.239791, 0.495219, 0.153831, 0.387172],
[0.198061, 0.194102, 0.550259, 0.751804, 0.503973,
0.034252, 0.788267, 0.731760, 0.118338, 0.057247],
[0.068470, 0.545180, 0.668845, 0.714932, 0.688014,
0.203845, 0.146138, 0.109039, 0.470214, 0.441797],
[0.085180, 0.142394, 0.938665, 0.071422, 0.946796,
0.697832, 0.472400, 0.161384, 0.325715, 0.122550],
[0.637672, 0.986961, 0.969438, 0.989508, 0.381318,
0.800871, 0.012035, 0.326007, 0.459124, 0.645374],
[0.147210, 0.954608, 0.361146, 0.094699, 0.092327,
0.301664, 0.478447, 0.008274, 0.680576, 0.004184],
[0.768792, 0.812618, 0.915766, 0.029070, 0.506944,
0.457816, 0.839167, 0.024706, 0.990756, 0.088779],
[0.872678, 0.601536, 0.948347, 0.621023, 0.415621,
0.289340, 0.291338, 0.190461, 0.664007, 0.583513],
[0.641216, 0.700152, 0.080576, 0.355500, 0.294700,
0.338614, 0.563964, 0.528079, 0.759223, 0.508432],
[0.738489, 0.077376, 0.429485, 0.300586, 0.576927,
0.185931, 0.231659, 0.954833, 0.614178, 0.092903],
[0.729321, 0.318607, 0.768657, 0.899419, 0.749499,
0.623403, 0.671793, 0.052835, 0.973726, 0.168336]]
f = [-1000, -1000, -1000, 799.1, 396.8, 370.3, 400.2, 239.7,
678.8, 868.9, 564.4, 681.6, 153.0, 760.7, 562.9, 434.9,
579.2, 260.6, 88.5, 601.3, 754.8, 894.8, 672.8, 633.7, 921.8,
43.2, 286.2, 945.5, 716.0, 72.7, 631.2, 640.3, 425.1, 825.8,
555.8, 136.9, 805.7, 786.5, 400.0, 856.4, 548.0, 510.8, 52.3,
111.6, 686.6, 888.2, 315.4, 333.9, 61.5, 755.2]
numerator = 0.0
denominator = 0.0
for i in range(50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= math.fsum([ (x[k] - z[j][k])**2
for k in range(10) ])
numerator += f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
dimension = 10
var_lower = np.array([0 for i in range(10)])
var_upper = np.array([1 for i in range(10)])
optimum_point = np.array([0.914871, 0.765230, 0.139426, 0.617466,
0.823635, 0.794003, 0.801171, 0.568811,
0.279434, 0.540422])
optimum_value = -1000
var_type = np.array(['R'] * 10)
# -- end class
class schoen_10_2:
"""
schoen function of dimension 10 with 50 stationary points.
"""
@staticmethod
def evaluate(x):
assert(len(x)==10)
z = [[0.131461, 0.965235, 0.046134, 0.983011, 0.719813,
0.827542, 0.662422, 0.570546, 0.578707, 0.013264],
[0.068454, 0.682785, 0.582736, 0.434517, 0.310613,
0.869876, 0.993949, 0.629156, 0.590599, 0.356378],
[0.632837, 0.961665, 0.015079, 0.378878, 0.805608,
0.685239, 0.528658, 0.752934, 0.717790, 0.374865],
[0.286191, 0.912944, 0.400358, 0.902532, 0.324887,
0.850063, 0.483503, 0.764147, 0.147726, 0.159851],
[0.303483, 0.754790, 0.090527, 0.653764, 0.164323,
0.402931, 0.593477, 0.448444, 0.711483, 0.113869],
[0.057398, 0.302029, 0.596351, 0.565466, 0.694204,
0.974864, 0.323989, 0.298493, 0.859391, 0.238714],
[0.139267, 0.214902, 0.608462, 0.297987, 0.499810,
0.578553, 0.548077, 0.208442, 0.046162, 0.246848],
[0.680420, 0.783181, 0.828103, 0.475810, 0.680401,
0.188455, 0.015200, 0.650103, 0.762389, 0.063985],
[0.409243, 0.600740, 0.302354, 0.588411, 0.436291,
0.294790, 0.701477, 0.994162, 0.433749, 0.535320],
[0.077949, 0.530126, 0.869737, 0.387811, 0.705317,
0.632911, 0.442087, 0.082918, 0.441383, 0.591975],
[0.622628, 0.054964, 0.020475, 0.145616, 0.163873,
0.321546, 0.282867, 0.743494, 0.750568, 0.732386],
[0.538574, 0.066932, 0.225204, 0.290045, 0.613242,
0.529365, 0.384018, 0.946557, 0.974384, 0.425297],
[0.108817, 0.850094, 0.886417, 0.161581, 0.082973,
0.506354, 0.589650, 0.638991, 0.045151, 0.688464],
[0.917742, 0.365119, 0.484176, 0.173231, 0.210253,
0.303688, 0.992141, 0.023109, 0.977178, 0.535146],
[0.183469, 0.198085, 0.511596, 0.275610, 0.753700,
0.437328, 0.986237, 0.028654, 0.767921, 0.997910],
[0.484908, 0.759122, 0.577318, 0.359934, 0.935730,
0.617833, 0.770173, 0.311175, 0.004831, 0.157457],
[0.634077, 0.236972, 0.016427, 0.261753, 0.349712,
0.245870, 0.412238, 0.523557, 0.985327, 0.094060],
[0.477875, 0.803438, 0.496728, 0.848920, 0.497386,
0.938203, 0.279797, 0.287076, 0.395184, 0.980546],
[0.450215, 0.193712, 0.975838, 0.103925, 0.077410,
0.709573, 0.253072, 0.311723, 0.885664, 0.204528],
[0.557312, 0.815198, 0.097914, 0.539142, 0.826048,
0.130070, 0.049858, 0.223634, 0.076387, 0.831224],
[0.927559, 0.324916, 0.563393, 0.209281, 0.344394,
0.953384, 0.298679, 0.890637, 0.966615, 0.380006],
[0.026403, 0.997573, 0.479163, 0.379686, 0.687928,
0.832002, 0.214326, 0.348248, 0.073151, 0.062646],
[0.726869, 0.911171, 0.961920, 0.874884, 0.216867,
0.076966, 0.776240, 0.495777, 0.963492, 0.425246],
[0.357483, 0.486330, 0.759177, 0.748362, 0.889904,
0.350438, 0.232983, 0.823613, 0.792656, 0.441264],
[0.875826, 0.359459, 0.214808, 0.425850, 0.493328,
0.456048, 0.523145, 0.504154, 0.090128, 0.472437],
[0.813400, 0.808407, 0.427211, 0.902524, 0.210376,
0.490662, 0.915939, 0.169439, 0.078865, 0.485371],
[0.877334, 0.982207, 0.679085, 0.486335, 0.940715,
0.585964, 0.289279, 0.694886, 0.172625, 0.201457],
[0.141599, 0.476124, 0.762246, 0.067045, 0.411332,
0.813196, 0.134138, 0.302390, 0.856145, 0.349243],
[0.346912, 0.082142, 0.787442, 0.857465, 0.371129,
0.448550, 0.967943, 0.775340, 0.943681, 0.656127],
[0.619267, 0.547196, 0.470422, 0.141566, 0.584198,
0.952226, 0.196462, 0.629549, 0.685469, 0.824365],
[0.014209, 0.789812, 0.836373, 0.186139, 0.493840,
0.710697, 0.910033, 0.368287, 0.865953, 0.140892],
[0.482763, 0.072574, 0.026730, 0.143687, 0.739505,
0.419649, 0.013683, 0.662644, 0.785254, 0.234561],
[0.821421, 0.844100, 0.153937, 0.671762, 0.290469,
0.631347, 0.591435, 0.498966, 0.043395, 0.176771],
[0.404994, 0.496656, 0.951774, 0.497357, 0.715401,
0.023378, 0.493045, 0.342766, 0.117055, 0.698590],
[0.985857, 0.831692, 0.423498, 0.215757, 0.341260,
0.790760, 0.941186, 0.716883, 0.062641, 0.582012],
[0.676905, 0.280897, 0.800638, 0.898913, 0.735995,
0.592412, 0.433021, 0.432772, 0.874477, 0.112375],
[0.377382, 0.118941, 0.529204, 0.419434, 0.673891,
0.074904, 0.129868, 0.819585, 0.220536, 0.353223],
[0.233415, 0.136703, 0.487256, 0.777498, 0.901915,
0.612402, 0.778635, 0.436718, 0.484520, 0.641969],
[0.273297, 0.670196, 0.344525, 0.669751, 0.180230,
0.530085, 0.393284, 0.326043, 0.260840, 0.364690],
[0.931213, 0.676123, 0.912481, 0.898258, 0.001887,
0.408306, 0.917215, 0.496959, 0.287951, 0.562511],
[0.047196, 0.780338, 0.895994, 0.088169, 0.552425,
0.130790, 0.308504, 0.232476, 0.187952, 0.105936],
[0.343517, 0.356222, 0.416018, 0.450278, 0.487765,
0.040510, 0.592363, 0.771635, 0.577849, 0.315843],
[0.527759, 0.529503, 0.210423, 0.756794, 0.892670,
0.339374, 0.445837, 0.363265, 0.432114, 0.942045],
[0.560107, 0.110906, 0.115725, 0.761393, 0.969105,
0.921166, 0.455014, 0.593512, 0.111887, 0.217300],
[0.463382, 0.635591, 0.329484, 0.573602, 0.492558,
0.474174, 0.371906, 0.850465, 0.467637, 0.261373],
[0.033051, 0.422543, 0.294155, 0.699026, 0.846231,
0.047967, 0.686826, 0.480273, 0.463181, 0.345601],
[0.285473, 0.723925, 0.202386, 0.671909, 0.685277,
0.993969, 0.415329, 0.155218, 0.233826, 0.088752],
[0.029705, 0.651519, 0.813239, 0.677718, 0.961189,
0.285385, 0.824635, 0.837670, 0.524970, 0.815489],
[0.519627, 0.508274, 0.141067, 0.156163, 0.274566,
0.536322, 0.834749, 0.852042, 0.656166, 0.964211],
[0.119675, 0.971352, 0.052983, 0.178217, 0.408438,
0.215091, 0.102098, 0.256312, 0.051758, 0.906712]]
f = [-1000, -1000, -1000, 90.4, 830.9, 52.7, 375.2, 289.7, 244.1,
470.2, 111.7, 968.9, 903.4, 918.5, 820.3, 441.2, 687.5, 836.9,
11.0, 454.5, 929.3, 952.6, 937.2, 870.5, 211.7, 378.4, 320.3,
729.6, 420.8, 213.8, 717.7, 285.4, 522.8, 748.3, 371.0, 501.2,
568.6, 111.9, 645.2, 486.2, 157.0, 968.5, 137.6, 127.2, 943.4,
437.2, 199.7, 415.4, 966.0, 362.3]
numerator = 0.0
denominator = 0.0
for i in range(50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= math.fsum([ (x[k] - z[j][k])**2
for k in range(10) ])
numerator += f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
dimension = 10
var_lower = np.array([0 for i in range(10)])
var_upper = np.array([1 for i in range(10)])
optimum_point = np.array([0.131461, 0.965235, 0.046134, 0.983011,
0.719813, 0.827542, 0.662422, 0.570546,
0.578707, 0.013264])
optimum_value = -1000
var_type = np.array(['R'] * 10)
# -- end class
class schaeffer_f7_12_1:
"""
Schaeffer F7 function.
"""
@staticmethod
def evaluate(x):
assert(len(x)==12)
opt = np.array([-34.32567, -34.98896, 07.69262, 30.3388, -48.24371,
23.18355, 24.93374, 32.07436, 46.86153, 04.64872,
25.64591, -16.69128])
value = 0
normalizer = 1.0/float(len(x)-1)
for i in range(len(x)-1):
si = 2**i*math.sqrt((x[i]-opt[i])**2 + (x[i+1]-opt[i+1])**2)
value += (normalizer * math.sqrt(si) *
(math.sin(50*si**0.20) + 1))**2
return value - 10
dimension = 12
var_lower = np.array([-50 for i in range(12)])
var_upper = np.array([50 for i in range(12)])
optimum_point = np.array([-34.32567, -34.98896, 07.69262, 30.3388,
-48.24371, 23.18355, 24.93374, 32.07436,
46.86153, 04.64872, 25.64591, -16.69128])
optimum_value = -10
var_type = np.array(['R'] * 12)
# -- end class
class schaeffer_f7_12_2:
"""
Schaeffer F7 function.
"""
@staticmethod
def evaluate(x):
assert(len(x)==12)
opt = np.array([-08.214, 30.69133, 48.26095, -04.94219,
15.15357, 00.4841, -13.54025, -40.78766,
-16.02916, 16.42138, 39.30248, -49.56986])
value = 0
normalizer = 1.0/float(len(x)-1)
for i in range(len(x)-1):
si = 3**i*math.sqrt((x[i]-opt[i])**2 + (x[i+1]-opt[i+1])**2)
value += (normalizer * math.sqrt(si) *
(math.sin(50*si**0.20) + 1))**2
return value + 10
dimension = 12
var_lower = np.array([-50 for i in range(12)])
var_upper = np.array([50 for i in range(12)])
optimum_point = np.array([-08.214, 30.69133, 48.26095, -04.94219,
15.15357, 00.4841, -13.54025, -40.78766,
-16.02916, 16.42138, 39.30248, -49.56986])
optimum_value = 10
var_type = np.array(['R'] * 12)
# -- end class
# After this point, all functions are MINLP
class gear:
"""
gear function of the MINLPLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==4)
value = ((0.14427932477276 - x[0]*x[1]/(x[2]*x[3]))**2)
return(value)
dimension = 4
var_lower = np.array([12, 12, 12, 12])
var_upper = np.array([60, 60, 60, 60])
optimum_point = np.array([12.0, 23.0, 58.0, 33.0])
optimum_value = 0.0
var_type = np.array(['I'] * 4)
# -- end class
class gear4:
"""
gear4 function of the MINLPLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==5)
value = -1000000*x[0]*x[1]/(x[2]*x[3]) + 2*x[4] + 144279.32477276
# There is a constraint:
# -1000000*x[0]*x[1]/(x[2]*x[3]) + x[4] + 144279.32477276 >= 0
penalty = 10*max(0,-(-1000000*x[0]*x[1]/(x[2]*x[3]) + x[4] +
144279.32477276))
return(value + penalty)
dimension = 5
var_lower = np.array([12, 12, 12, 12, 0])
var_upper = np.array([60, 60, 60, 60, 100])
optimum_point = np.array([19.0, 16.0, 43.0, 49.0, 1.64342847396619])
optimum_value = 1.6434284739
var_type = np.array(['I'] * 4 + ['R'])
# -- end class
class nvs02:
"""
nvs02 function of the MINLPLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==5)
value = (0.0001*(5.3578547*math.sqrt(x[2]) + 0.8356891*x[0]*x[4] +
37.293239*x[0]) + 5.9207859)
# There are three constraints:
# 0 <= (0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] -
# 0.0022053*x[2]*x[4] + 85.334407) <= 92
# 90 <= (0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] +
# 0.0021813*math.sqrt(x[2]) + 80.51249) <= 110
# 20 <= (0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] +
# 0.0019085*x[2]*x[3] + 9.300961) <= 25
penalty = 0.0
penalty += 10*max(0, -(0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] -
0.0022053*x[2]*x[4] + 85.334407))
penalty += 10*max(0, (0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] -
0.0022053*x[2]*x[4] + 85.334407) - 92)
penalty += 10*max(0, -(0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] +
0.0021813*math.sqrt(x[2]) + 80.51249) + 90)
penalty += 10*max(0, (0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] +
0.0021813*math.sqrt(x[2]) + 80.51249) - 110)
penalty += 10*max(0, -(0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] +
0.0019085*x[2]*x[3] + 9.300961) + 20)
penalty += 10*max(0, (0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] +
0.0019085*x[2]*x[3] + 9.300961) - 25)
return(value + penalty)
dimension = 5
var_lower = np.array([0, 0, 0, 0, 0])
var_upper = np.array([200, 200, 200, 200, 200])
optimum_point = np.array([0.0, 9.0, 9.0, 200.0, 197.0])
optimum_value = 5.9223932564100004
var_type = np.array(['I'] * 5)
# -- end class
class nvs03:
"""
nvs03 function of the MINLPLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==2)
value = (-8 + x[0])**2 + (-2 + x[1])**2
# There are two constraints:
# -0.1*x[0]**2 + x[1] >= 0
# -0.333333333333333*x[0] - x[1] + 4.5 >= 0.0
penalty = 0.0
penalty += 100*max(0, -(-0.1*x[0]**2 + x[1]))
penalty += 100*max(0, -(-0.333333333333333*x[0] - x[1] + 4.5))
return(value + penalty)
dimension = 2
var_lower = np.array([0, 0])
var_upper = np.array([200, 200])
optimum_point = np.array([4.0, 2.0])
optimum_value = 16.0
var_type = np.array(['I'] * 2)
# -- end class
class nvs04:
"""
nvs04 function of the MINLPLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==2)
value = 100*(0.5 + x[1] - (0.6 + x[0])**2)**2 + (0.4 - x[0])**2
return(value)
dimension = 2
var_lower = np.array([0, 0])
var_upper = np.array([200, 200])
optimum_point = np.array([1.0, 2.0])
optimum_value = 0.72
var_type = np.array(['I'] * 2)
# -- end class
class nvs06:
"""
nvs06 function of the MINLPLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==2)
value = (0.1*((x[0])**2 + (1 + (x[1])**2)/(x[0])**2 +
(100 + ((x[0])**2)*(x[1])**2)/(x[0]*x[1])**4) + 1.2)
return(value)
dimension = 2
var_lower = np.array([1, 1])
var_upper = np.array([200, 200])
optimum_point = np.array([2.0, 2.0])
optimum_value = 1.7703125
var_type = np.array(['I'] * 2)
# -- end class
class nvs07:
"""
nvs07 function of the MINLPLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==3)
value = 2*x[1]**2 + x[0] + 5*x[2]
# There are two constraints:
# x[2]**2 * x[1] + 5*x[2] + 3*x[0] - 10 >= 0
# x[0] - x[2] - 2.66 >= 0
penalty = 0.0
penalty += 10*max(0, -(x[2]**2 * x[1] + 5*x[2] + 3*x[0] - 10))
penalty += 10*max(0, -(x[0] - x[2] - 2.66))
return(value + penalty)
dimension = 3
var_lower = np.array([0, 0, 0])
var_upper = np.array([200, 200, 200])
optimum_point = np.array([4.0, 0.0, 0.0])
optimum_value = 4.0
var_type = np.array(['I'] * 3)
# -- end class
class nvs09:
"""
nvs09 function of the MINLPLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==10)
value = ((math.log(x[0] - 2))**2 + (math.log(10 - x[0]))**2 +
(math.log(x[1] - 2))**2 + (math.log(10 - x[1]))**2 +
(math.log(x[2] - 2))**2 + (math.log(10 - x[2]))**2 +
(math.log(x[3] - 2))**2 + (math.log(10 - x[3]))**2 +
(math.log(x[4] - 2))**2 + (math.log(10 - x[4]))**2 +
(math.log(x[5] - 2))**2 + (math.log(10 - x[5]))**2 +
(math.log(x[6] - 2))**2 + (math.log(10 - x[6]))**2 +
(math.log(x[7] - 2))**2 + (math.log(10 - x[7]))**2 +
(math.log(x[8] - 2))**2 + (math.log(10 - x[8]))**2 +
(math.log(x[9] - 2))**2 + (math.log(10 - x[9]))**2 -
(x[0]*x[1]*x[2]*x[3]*x[4]*x[5]*x[6]*x[7]*x[8]*x[9])**0.2)
return(value)
dimension = 10
var_lower = np.array([3 for i in range(10)])
var_upper = np.array([9 for i in range(10)])
optimum_point = np.array([9, 9, 9, 9, 9, 9, 9, 9, 9, 9])
optimum_value = -43.134336918035
var_type = np.array(['I'] * 10)
# -- end class
class nvs14:
"""
nvs14 function of the MINLPLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==5)
value = (5.3578547*x[2]**2 + 0.8356891*x[0]*x[4] + 37.293239*x[0] -
40792.141)
# There are three constraints:
# 0 <= (0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] -
# 0.0022053*x[2]*x[4] + 85.334407) <= 92
# 90 <= (0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] +
# 0.0021813*x[2]**2 + 80.51249) <= 110
# 20 <= (0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] +
# 0.0019085*x[2]*x[3] + 9.300961) <= 25
penalty = 0.0
penalty += 1000*max(0, -(0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] -
0.0022053*x[2]*x[4] + 85.334407))
penalty += 1000*max(0, (0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] -
0.0022053*x[2]*x[4] + 85.334407) - 92)
penalty += 1000*max(0, -(0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] +
0.0021813*x[2]**2 + 80.51249) + 90)
penalty += 1000*max(0, (0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] +
0.0021813*x[2]**2 + 80.51249) - 110)
penalty += 1000*max(0, -(0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] +
0.0019085*x[2]*x[3] + 9.300961) + 20)
penalty += 1000*max(0, (0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] +
0.0019085*x[2]*x[3] + 9.300961) - 25)
return(value + penalty)
dimension = 5
var_lower = np.array([0, 0, 0, 0, 0])
var_upper = np.array([200, 200, 200, 200, 200])
optimum_point = np.array([0.0, 7.0, 9.0, 175.0, 200.0])
optimum_value = -40358.1547693
var_type = np.array(['I'] * 5)
# -- end class
class nvs15:
"""
nvs15 function of the MINLPLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==3)
value = (2*x[0]**2 - 8*x[0] + 2*x[1]**2 - 6*x[1] + x[2]**2 - 4*x[2] +
2*x[0]*x[1] + 2*x[0]*x[2] + 9)
# There is one constraint:
# - x[0] - x[1] - 2*x[2] + 3 >= 0
penalty = 0.0
penalty += 10*max(0, -(-x[0] - x[1] - 2*x[2] + 3))
return(value + penalty)
dimension = 3
var_lower = np.array([0, 0, 0])
var_upper = np.array([200, 200, 200])
optimum_point = np.array([2.0, 0.0, 0.0])
optimum_value = 1.0
var_type = np.array(['I'] * 3)
# -- end class
class nvs16:
"""
nvs16 function of the MINLPLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==2)
value = ((1.5 - x[0]*(1 - x[1]))**2 +
(2.25 - x[0]*(1 - x[1]**2))**2 +
(2.625 - x[0]*(1 - x[1]**3))**2)
return(value)
dimension = 2
var_lower = np.array([0, 0])
var_upper = np.array([200, 200])
optimum_point = np.array([2.0, 0.0])
optimum_value = 0.703125
var_type = np.array(['I'] * 2)
# -- end class
class prob03:
"""
prob03 function of the MINLPLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==2)
value = 3*x[0] + 2*x[1]
# There is one constraint:
# x[0]*x[1] - 3.5 >= 0
penalty = 10*max(0, -(x[0]*x[1] - 3.5))
return(value + penalty)
dimension = 2
var_lower = np.array([1, 1])
var_upper = np.array([5, 5])
optimum_point = np.array([2.0, 2.0])
optimum_value = 10.0
var_type = np.array(['I'] * 2)
# -- end class
class sporttournament06:
"""
sporttournament06 function of the MINLPLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==15)
value = (2*x[0]*x[2] - 2*x[0] + 2*x[2] + 2*x[0]*x[6] - 2*x[6]
+ 2*x[1]*x[4] - 2*x[1] - 2*x[4] + 2*x[1]*x[9] -
4*x[9] - 2*x[2]*x[3] + 2*x[3] - 2*x[2]*x[11] -
2*x[2]*x[13] - 2*x[3]*x[4] + 2*x[3]*x[8] - 2*x[8] -
2*x[3]*x[14] + 2*x[4]*x[5] - 2*x[5] + 2*x[4]*x[7] -
2*x[7] + 2*x[5]*x[8] - 2*x[6]*x[7] + 2*x[6]* x[11] +
2*x[6]*x[12] + 2*x[7]*x[9] + 2*x[7]*x[14] +
2*x[8]*x[10] - 2*x[10] - 2*x[8]*x[11] + 2*x[9]* x[10]
+ 2*x[9]*x[11] - 2*x[12]*x[14] + 2*x[13]*x[14])
return(value)
dimension = 15
var_lower = np.array([0] * 15)
var_upper = np.array([1] * 15)
optimum_point = np.array([0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0,
0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0])
optimum_value = -12.0
var_type = np.array(['I'] * 15)
# -- end class
class st_miqp1:
"""
st_miqp1 function of the MINLPLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==5)
value = (50*x[0]*x[0] + 42*x[0] + 50*x[1]*x[1] + 44*x[1] +
50*x[2]*x[2] + 45*x[2] + 50*x[3]*x[3]
+ 47*x[3] + 50*x[4]*x[4] + 47.5*x[4])
# There is one constraint:
# 20*x[0] + 12*x[1] + 11*x[2] + 7*x[3] + 4*x[4] - 40 >= 0
penalty = 100*max(0, -(20*x[0] + 12*x[1] + 11*x[2] + 7*x[3] +
4*x[4] - 40))
return(value + penalty)
dimension = 5
var_lower = np.array([0, 0, 0, 0, 0])
var_upper = np.array([1, 1, 1, 1, 1])
optimum_point = np.array([1.0, 1.0, 1.0, 0.0, 0.0])
optimum_value = 281.0
var_type = np.array(['I'] * 5)
# -- end class
class st_miqp3:
"""
st_miqp3 function of the MINLPLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==2)
value = (6*x[0]*x[0] - 3*x[1])
# There is one constraint:
# 4*x[0] - x[1] >= 0
penalty = 10*max(0, -(4*x[0] - x[1]))
return(value + penalty)
dimension = 2
var_lower = np.array([0, 0])
var_upper = np.array([3, 50])
optimum_point = np.array([1.0, 4.0])
optimum_value = -6.0
var_type = np.array(['I'] * 2)
# -- end class
class st_test1:
"""
st_test1 function of the MINLPLib test set.
"""
@staticmethod
def evaluate(x):
assert(len(x)==5)
value = (50*x[0]*x[0] + 42*x[0] + 50*x[1]*x[1] - 44*x[1] +
50*x[3]*x[3] - 47*x[3] + 50*x[4]*x[4] - 47.5*x[4] + 45*x[2])
# There is one constraint:
# -20*x[0] - 12*x[1] - 11*x[2] - 7*x[3] - 4*x[4] + 40 >= 0
penalty = 10*max(0, -(-20*x[0] - 12*x[1] - 11*x[2] - 7*x[3] -
4*x[4] + 40))
return(value + penalty)
dimension = 5
var_lower = np.array([0, 0, 0, 0, 0])
var_upper = np.array([1, 1, 1, 1, 1])
optimum_point = np.array([0.0, 0.0, 0.0, 0.0, 0.0])
optimum_value = 0.0
var_type = np.array(['I'] * 5)
# -- end class
class schoen_6_1_int:
"""
schoen function of dimension 6 with 50 stationary points.
Mixed integer version.
"""
@staticmethod
def evaluate(x):
assert(len(x)==6)
z = [[0.298854, 0.181010, 0.984817, 0.125272, 0.548396, 0.894658],
[0.800371, 0.817380, 0.398577, 0.652349, 0.250843, 0.130235],
[0.268631, 0.929778, 0.640422, 0.462004, 0.492930, 0.434955],
[0.257863, 0.729198, 0.210810, 0.364378, 0.228216, 0.947432],
[0.767627, 0.592150, 0.103788, 0.696895, 0.472449, 0.244504],
[0.369630, 0.110889, 0.072344, 0.515753, 0.068087, 0.103057],
[0.425457, 0.807081, 0.491209, 0.449497, 0.065690, 0.592775],
[0.544229, 0.619841, 0.704609, 0.573098, 0.044844, 0.305800],
[0.164031, 0.722884, 0.670496, 0.517915, 0.176386, 0.921565],
[0.153788, 0.703577, 0.899129, 0.406134, 0.941356, 0.538215],
[0.984781, 0.510479, 0.573361, 0.884599, 0.399472, 0.712935],
[0.488416, 0.403997, 0.888823, 0.048434, 0.265197, 0.478025],
[0.047985, 0.280071, 0.709960, 0.278919, 0.035737, 0.037699],
[0.656172, 0.498412, 0.458622, 0.982970, 0.041234, 0.921127],
[0.590802, 0.359690, 0.396516, 0.338153, 0.320793, 0.847369],
[0.649160, 0.846974, 0.451818, 0.064864, 0.818545, 0.955844],
[0.583716, 0.669610, 0.463098, 0.492710, 0.989690, 0.002397],
[0.097300, 0.112389, 0.128759, 0.182995, 0.262808, 0.701887],
[0.487363, 0.892520, 0.269056, 0.116046, 0.905416, 0.808013],
[0.908316, 0.023997, 0.670399, 0.985859, 0.178548, 0.450410],
[0.230409, 0.381732, 0.613667, 0.697260, 0.016950, 0.736507],
[0.132544, 0.526349, 0.650042, 0.084086, 0.979257, 0.771499],
[0.872978, 0.008826, 0.587481, 0.624637, 0.623175, 0.939539],
[0.447828, 0.836386, 0.223285, 0.422756, 0.344488, 0.555953],
[0.546839, 0.153934, 0.953017, 0.640891, 0.666774, 0.647583],
[0.762237, 0.608920, 0.401447, 0.056202, 0.203535, 0.890609],
[0.655150, 0.444544, 0.495582, 0.247926, 0.155128, 0.188004],
[0.481813, 0.387178, 0.597276, 0.634671, 0.285404, 0.714793],
[0.976385, 0.018854, 0.262585, 0.640434, 0.086314, 0.669879],
[0.120164, 0.882300, 0.057626, 0.695111, 0.735135, 0.004711],
[0.414644, 0.715618, 0.642033, 0.770645, 0.407019, 0.502945],
[0.257475, 0.620029, 0.840603, 0.638546, 0.636521, 0.883558],
[0.788980, 0.374926, 0.448016, 0.081941, 0.225763, 0.944905],
[0.661591, 0.178832, 0.790349, 0.141653, 0.424235, 0.571960],
[0.546361, 0.624907, 0.190470, 0.412713, 0.124748, 0.662788],
[0.226384, 0.065829, 0.960836, 0.767766, 0.089695, 0.441792],
[0.303675, 0.370047, 0.973692, 0.830432, 0.424719, 0.173571],
[0.548375, 0.823234, 0.334253, 0.078398, 0.097269, 0.195120],
[0.646225, 0.100478, 0.723833, 0.891035, 0.386094, 0.360272],
[0.362757, 0.114700, 0.731020, 0.783785, 0.250399, 0.244399],
[0.904335, 0.869074, 0.479004, 0.525872, 0.359411, 0.338333],
[0.563175, 0.245903, 0.694417, 0.833524, 0.205055, 0.132535],
[0.401356, 0.920963, 0.401902, 0.120625, 0.765834, 0.381552],
[0.769562, 0.279591, 0.567598, 0.017192, 0.697366, 0.813451],
[0.738572, 0.984740, 0.007616, 0.005382, 0.592976, 0.771773],
[0.683721, 0.824097, 0.731623, 0.936945, 0.182420, 0.393537],
[0.375859, 0.541929, 0.974640, 0.377459, 0.754060, 0.019335],
[0.4, 0.6, 0.1, 0.4, 0.637412, 0.204038],
[0.5, 0.4, 0.4, 0.0, 0.198525, 0.074668],
[0.7, 0.1, 0.3, 0.5, 0.143614, 0.961610]]
f = [672.2, 861.4, 520.9, 121.0, 11.5, 48.2, 702.4, 536.2,
457.7, 801.3, 787.7, 768.6, 292.4, 960.0, 573.1, 303.7,
283.3, 474.1, 216.9, 462.2, 853.6, 677.1, 464.6, 830.6,
831.8, 109.6, 967.6, 122.9, 896.2, 490.2, 710.4, 81.1,
802.9, 999.8, 945.5, 672.3, 712.9, 235.8, 266.5, 772.4,
326.6, 585.5, 16.9, 135.9, 224.2, 382.1, 614.6, -1000,
-1000, -1000]
numerator = 0.0
denominator = 0.0
for i in range(50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= math.fsum([ (x[k]/10 - z[j][k])**2
for k in range(6) ])
numerator += f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
dimension = 6
var_lower = np.array([0 for i in range(6)])
var_upper = np.array([10 for i in range(6)])
optimum_point = np.array([04., 06., 01., 04., 06.37412, 02.04038])
optimum_value = -1000
var_type = np.array(['I'] * 4 + ['R'] * 2)
# -- end class
class schoen_6_2_int:
"""
schoen function of dimension 6 with 50 stationary points.
Mixed integer version.
"""
@staticmethod
def evaluate(x):
assert(len(x)==6)
z = [[0.669711, 0.815540, 0.646120, 0.377447, 0.111538, 0.040529],
[0.000632, 0.706804, 0.857031, 0.473778, 0.993569, 0.616184],
[0.625617, 0.880221, 0.534547, 0.760235, 0.276998, 0.735438],
[0.774577, 0.922914, 0.947791, 0.315328, 0.414841, 0.785803],
[0.079768, 0.131498, 0.225123, 0.464621, 0.638041, 0.992795],
[0.471038, 0.244503, 0.565776, 0.898397, 0.604639, 0.306230],
[0.642233, 0.482219, 0.034943, 0.934805, 0.972714, 0.153664],
[0.550151, 0.310507, 0.042126, 0.230722, 0.444375, 0.117355],
[0.789984, 0.488482, 0.065237, 0.842940, 0.793454, 0.799489],
[0.850183, 0.754551, 0.516033, 0.166362, 0.201966, 0.044234],
[0.000601, 0.896758, 0.304433, 0.149125, 0.178398, 0.871836],
[0.056787, 0.932745, 0.218009, 0.778061, 0.131847, 0.356237],
[0.210266, 0.221479, 0.014831, 0.200901, 0.656693, 0.891819],
[0.528515, 0.178025, 0.188138, 0.411485, 0.217833, 0.907579],
[0.195801, 0.663099, 0.477312, 0.395250, 0.655791, 0.820570],
[0.933208, 0.789323, 0.350520, 0.855434, 0.491082, 0.874993],
[0.251047, 0.543513, 0.529644, 0.218495, 0.351637, 0.608904],
[0.963286, 0.793004, 0.650148, 0.881362, 0.904832, 0.005397],
[0.431744, 0.438965, 0.044544, 0.834968, 0.330614, 0.451282],
[0.234845, 0.328576, 0.388284, 0.339183, 0.206086, 0.600034],
[0.512783, 0.961787, 0.959109, 0.632098, 0.910614, 0.912025],
[0.454168, 0.743189, 0.834284, 0.955817, 0.072172, 0.523068],
[0.696968, 0.720236, 0.341060, 0.054580, 0.045599, 0.549192],
[0.272955, 0.318845, 0.700767, 0.426325, 0.895755, 0.843128],
[0.992189, 0.332899, 0.272784, 0.019284, 0.073711, 0.434800],
[0.154276, 0.639611, 0.924641, 0.587242, 0.358453, 0.548022],
[0.021506, 0.450392, 0.515150, 0.032232, 0.650223, 0.849384],
[0.316499, 0.513234, 0.958219, 0.843587, 0.125408, 0.836643],
[0.538587, 0.261750, 0.732136, 0.030271, 0.893345, 0.270532],
[0.987469, 0.708780, 0.446487, 0.968784, 0.734448, 0.788229],
[0.353358, 0.135036, 0.249018, 0.565029, 0.740519, 0.250807],
[0.810372, 0.656510, 0.472093, 0.225741, 0.420513, 0.202519],
[0.848128, 0.551586, 0.513140, 0.956164, 0.483389, 0.404478],
[0.292239, 0.297077, 0.934202, 0.468329, 0.872274, 0.992632],
[0.828869, 0.534749, 0.716451, 0.405855, 0.164485, 0.531068],
[0.130616, 0.757677, 0.284500, 0.438300, 0.957643, 0.725899],
[0.503542, 0.640368, 0.381914, 0.847206, 0.134660, 0.762294],
[0.653851, 0.646544, 0.436036, 0.944225, 0.310369, 0.392362],
[0.539397, 0.027168, 0.697972, 0.209293, 0.992890, 0.008113],
[0.902045, 0.171034, 0.194924, 0.620057, 0.002203, 0.557433],
[0.802612, 0.085835, 0.380626, 0.492568, 0.238166, 0.961837],
[0.466993, 0.647847, 0.113397, 0.015357, 0.928904, 0.166425],
[0.892021, 0.869756, 0.681364, 0.129555, 0.394682, 0.745036],
[0.060675, 0.869904, 0.757236, 0.220765, 0.615988, 0.754288],
[0.031815, 0.340961, 0.455958, 0.529616, 0.840036, 0.365200],
[0.834595, 0.603639, 0.745330, 0.085080, 0.184636, 0.238718],
[0.575681, 0.250761, 0.874497, 0.870401, 0.854591, 0.968971],
[0.3, 0.7, 0.4, 0.1, 0.258563, 0.932004],
[0.2, 0.9, 0.7, 0.2, 0.375076, 0.154363],
[0.4, 0.4, 0.6, 0.9, 0.579466, 0.524775]]
f = [109.6, 132.4, 558.2, 158.0, 6.2, 205.4, 593.9, 2.4,
399.8, 395.9, 212.6, 976.1, 104.4, 552.1, 436.3, 837.1,
283.7, 779.7, 392.1, 85.8, 885.1, 401.5, 367.5, 694.4,
691.6, 933.1, 590.7, 246.2, 370.0, 54.3, 719.4, 95.2,
276.0, 829.1, 613.6, 242.8, 424.6, 320.6, 666.1, 479.2,
420.0, 956.6, 241.0, 21.1, 169.8, 178.1, 394.4, -1000,
-1000, -1000, ]
numerator = 0.0
denominator = 0.0
for i in range(50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= math.fsum([ (x[k]/10 - z[j][k])**2
for k in range(6) ])
numerator += f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
dimension = 6
var_lower = np.array([0 for i in range(6)])
var_upper = np.array([10 for i in range(6)])
optimum_point = np.array([03., 07., 04., 01., 02.58563, 09.32004])
optimum_value = -1000
var_type = np.array(['I'] * 4 + ['R'] * 2)
# -- end class
class schoen_10_1_int:
"""
schoen function of dimension 10 with 50 stationary points.
Mixed integer version.
"""
@staticmethod
def evaluate(x):
assert(len(x)==10)
z = [[0.914871, 0.765230, 0.139426, 0.617466, 0.823635,
0.794003, 0.801171, 0.568811, 0.279434, 0.540422],
[0.976983, 0.593277, 0.701115, 0.585262, 0.669106,
0.272906, 0.177127, 0.143389, 0.561181, 0.018744],
[0.385208, 0.984106, 0.390066, 0.905970, 0.169600,
0.191291, 0.564157, 0.689910, 0.857031, 0.715390],
[0.975998, 0.536904, 0.819333, 0.801793, 0.564454,
0.336124, 0.654190, 0.044197, 0.717416, 0.465807],
[0.750519, 0.415284, 0.258927, 0.736115, 0.597744,
0.763716, 0.747691, 0.969633, 0.188117, 0.964954],
[0.412888, 0.671756, 0.380214, 0.558595, 0.768370,
0.998320, 0.212183, 0.606757, 0.531315, 0.303569],
[0.196682, 0.139879, 0.108608, 0.736975, 0.755971,
0.021390, 0.852398, 0.188596, 0.920133, 0.045012],
[0.956270, 0.729258, 0.397664, 0.013146, 0.519861,
0.300011, 0.008396, 0.820346, 0.176841, 0.402298],
[0.126432, 0.872346, 0.923581, 0.297492, 0.992744,
0.486525, 0.915493, 0.589980, 0.498242, 0.989945],
[0.697409, 0.026641, 0.875467, 0.503039, 0.563285,
0.096769, 0.933643, 0.884419, 0.585825, 0.395465],
[0.494783, 0.824300, 0.153326, 0.202651, 0.579815,
0.416954, 0.707624, 0.497959, 0.568876, 0.812841],
[0.126963, 0.757337, 0.648583, 0.787445, 0.822586,
0.401155, 0.301350, 0.562707, 0.744074, 0.088372],
[0.293611, 0.835864, 0.925111, 0.760322, 0.729456,
0.096840, 0.651466, 0.975836, 0.691353, 0.038384],
[0.999250, 0.916829, 0.205699, 0.027241, 0.156956,
0.206598, 0.175242, 0.811219, 0.660192, 0.119865],
[0.387978, 0.665180, 0.774376, 0.135223, 0.766238,
0.380668, 0.058279, 0.727506, 0.991527, 0.345759],
[0.299341, 0.066231, 0.680305, 0.392230, 0.319985,
0.698292, 0.100236, 0.394973, 0.096232, 0.362943],
[0.281548, 0.860858, 0.647870, 0.981650, 0.110777,
0.836484, 0.697387, 0.659942, 0.694425, 0.434991],
[0.606706, 0.052287, 0.858208, 0.738885, 0.158495,
0.002367, 0.933796, 0.112986, 0.647308, 0.421573],
[0.776505, 0.101364, 0.610406, 0.275033, 0.548409,
0.998967, 0.536743, 0.943903, 0.960993, 0.251672],
[0.371347, 0.491122, 0.772374, 0.860206, 0.752131,
0.338591, 0.826739, 0.312111, 0.768881, 0.862719],
[0.866886, 0.358220, 0.131205, 0.276334, 0.334111,
0.429525, 0.752197, 0.167524, 0.437764, 0.162916],
[0.584246, 0.511215, 0.659647, 0.349220, 0.954428,
0.477982, 0.386041, 0.813944, 0.753530, 0.983276],
[0.697327, 0.499835, 0.530487, 0.599958, 0.497257,
0.998852, 0.106262, 0.186978, 0.887481, 0.749174],
[0.041611, 0.278918, 0.999095, 0.825221, 0.218320,
0.383711, 0.077041, 0.642061, 0.668906, 0.758298],
[0.072437, 0.592862, 0.040655, 0.446330, 0.651659,
0.055738, 0.631924, 0.890039, 0.192989, 0.741054],
[0.533886, 0.135079, 0.787647, 0.593408, 0.749228,
0.749045, 0.190386, 0.755508, 0.465321, 0.465156],
[0.748843, 0.696419, 0.882124, 0.843895, 0.858057,
0.220107, 0.350310, 0.102947, 0.453576, 0.875940],
[0.560231, 0.580247, 0.381834, 0.807535, 0.184636,
0.615702, 0.628408, 0.081783, 0.793384, 0.233639],
[0.384827, 0.589138, 0.630013, 0.634506, 0.630712,
0.521293, 0.494486, 0.681700, 0.288512, 0.319808],
[0.721978, 0.452289, 0.426726, 0.323106, 0.781584,
0.999325, 0.043670, 0.884560, 0.520936, 0.430684],
[0.810388, 0.624041, 0.811624, 0.105973, 0.199807,
0.440644, 0.864152, 0.282280, 0.397116, 0.499932],
[0.973889, 0.677797, 0.080137, 0.549098, 0.625445,
0.577342, 0.538642, 0.388039, 0.552273, 0.793807],
[0.365176, 0.228017, 0.623500, 0.084450, 0.177343,
0.910108, 0.632719, 0.521458, 0.894843, 0.707893],
[0.502069, 0.622312, 0.958019, 0.744999, 0.515695,
0.407885, 0.590739, 0.736542, 0.297555, 0.237955],
[0.313835, 0.090014, 0.336274, 0.433171, 0.330864,
0.105751, 0.160367, 0.651934, 0.207260, 0.293577],
[0.886072, 0.592935, 0.498116, 0.321835, 0.011216,
0.543911, 0.506579, 0.216779, 0.406812, 0.261349],
[0.789947, 0.881332, 0.696597, 0.742955, 0.252224,
0.718157, 0.188217, 0.371208, 0.178640, 0.347720],
[0.482759, 0.663618, 0.622706, 0.036170, 0.278854,
0.088147, 0.482808, 0.134824, 0.028828, 0.944537],
[0.184705, 0.662346, 0.917194, 0.186490, 0.918392,
0.955111, 0.636015, 0.447595, 0.813716, 0.372839],
[0.231741, 0.637199, 0.745257, 0.201568, 0.697485,
0.897022, 0.239791, 0.495219, 0.153831, 0.387172],
[0.198061, 0.194102, 0.550259, 0.751804, 0.503973,
0.034252, 0.788267, 0.731760, 0.118338, 0.057247],
[0.068470, 0.545180, 0.668845, 0.714932, 0.688014,
0.203845, 0.146138, 0.109039, 0.470214, 0.441797],
[0.085180, 0.142394, 0.938665, 0.071422, 0.946796,
0.697832, 0.472400, 0.161384, 0.325715, 0.122550],
[0.637672, 0.986961, 0.969438, 0.989508, 0.381318,
0.800871, 0.012035, 0.326007, 0.459124, 0.645374],
[0.147210, 0.954608, 0.361146, 0.094699, 0.092327,
0.301664, 0.478447, 0.008274, 0.680576, 0.004184],
[0.768792, 0.812618, 0.915766, 0.029070, 0.506944,
0.457816, 0.839167, 0.024706, 0.990756, 0.088779],
[0.872678, 0.601536, 0.948347, 0.621023, 0.415621,
0.289340, 0.291338, 0.190461, 0.664007, 0.583513],
[0.6, 0.7, 0.0, 0.355500, 0.294700,
0.3, 0.5, 0.5, 0.759223, 0.508432],
[0.7, 0.0, 0.4, 0.300586, 0.576927,
0.1, 0.2, 0.9, 0.614178, 0.092903],
[0.7, 0.3, 0.7, 0.899419, 0.749499,
0.6, 0.6, 0.0, 0.973726, 0.168336]]
f = [799.1, 396.8, 370.3, 400.2, 239.7, 678.8, 868.9, 564.4,
681.6, 153.0, 760.7, 562.9, 434.9, 579.2, 260.6, 88.5,
601.3, 754.8, 894.8, 672.8, 633.7, 921.8, 43.2, 286.2,
945.5, 716.0, 72.7, 631.2, 640.3, 425.1, 825.8, 555.8,
136.9, 805.7, 786.5, 400.0, 856.4, 548.0, 510.8, 52.3,
111.6, 686.6, 888.2, 315.4, 333.9, 61.5, 755.2, -1000,
-1000, -1000]
numerator = 0.0
denominator = 0.0
for i in range(50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= math.fsum([ (x[k]/10 - z[j][k])**2
for k in range(10) ])
numerator += f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
dimension = 10
var_lower = np.array([0 for i in range(10)])
var_upper = np.array([10 for i in range(10)])
optimum_point = np.array([06., 07., 00., 03.55500, 02.94700,
03., 05., 05., 07.59223, 05.08432])
optimum_value = -1000
var_type = np.array(['I'] * 3 + ['R'] * 2 + ['I'] * 3 + ['R'] * 2)
# -- end class
class schoen_10_2_int:
"""
schoen function of dimension 10 with 50 stationary points.
Mixed integer version.
"""
@staticmethod
def evaluate(x):
assert(len(x)==10)
z = z = [[0.131461, 0.965235, 0.046134, 0.983011, 0.719813,
0.827542, 0.662422, 0.570546, 0.578707, 0.013264],
[0.068454, 0.682785, 0.582736, 0.434517, 0.310613,
0.869876, 0.993949, 0.629156, 0.590599, 0.356378],
[0.632837, 0.961665, 0.015079, 0.378878, 0.805608,
0.685239, 0.528658, 0.752934, 0.717790, 0.374865],
[0.286191, 0.912944, 0.400358, 0.902532, 0.324887,
0.850063, 0.483503, 0.764147, 0.147726, 0.159851],
[0.303483, 0.754790, 0.090527, 0.653764, 0.164323,
0.402931, 0.593477, 0.448444, 0.711483, 0.113869],
[0.057398, 0.302029, 0.596351, 0.565466, 0.694204,
0.974864, 0.323989, 0.298493, 0.859391, 0.238714],
[0.139267, 0.214902, 0.608462, 0.297987, 0.499810,
0.578553, 0.548077, 0.208442, 0.046162, 0.246848],
[0.680420, 0.783181, 0.828103, 0.475810, 0.680401,
0.188455, 0.015200, 0.650103, 0.762389, 0.063985],
[0.409243, 0.600740, 0.302354, 0.588411, 0.436291,
0.294790, 0.701477, 0.994162, 0.433749, 0.535320],
[0.077949, 0.530126, 0.869737, 0.387811, 0.705317,
0.632911, 0.442087, 0.082918, 0.441383, 0.591975],
[0.622628, 0.054964, 0.020475, 0.145616, 0.163873,
0.321546, 0.282867, 0.743494, 0.750568, 0.732386],
[0.538574, 0.066932, 0.225204, 0.290045, 0.613242,
0.529365, 0.384018, 0.946557, 0.974384, 0.425297],
[0.108817, 0.850094, 0.886417, 0.161581, 0.082973,
0.506354, 0.589650, 0.638991, 0.045151, 0.688464],
[0.917742, 0.365119, 0.484176, 0.173231, 0.210253,
0.303688, 0.992141, 0.023109, 0.977178, 0.535146],
[0.183469, 0.198085, 0.511596, 0.275610, 0.753700,
0.437328, 0.986237, 0.028654, 0.767921, 0.997910],
[0.484908, 0.759122, 0.577318, 0.359934, 0.935730,
0.617833, 0.770173, 0.311175, 0.004831, 0.157457],
[0.634077, 0.236972, 0.016427, 0.261753, 0.349712,
0.245870, 0.412238, 0.523557, 0.985327, 0.094060],
[0.477875, 0.803438, 0.496728, 0.848920, 0.497386,
0.938203, 0.279797, 0.287076, 0.395184, 0.980546],
[0.450215, 0.193712, 0.975838, 0.103925, 0.077410,
0.709573, 0.253072, 0.311723, 0.885664, 0.204528],
[0.557312, 0.815198, 0.097914, 0.539142, 0.826048,
0.130070, 0.049858, 0.223634, 0.076387, 0.831224],
[0.927559, 0.324916, 0.563393, 0.209281, 0.344394,
0.953384, 0.298679, 0.890637, 0.966615, 0.380006],
[0.026403, 0.997573, 0.479163, 0.379686, 0.687928,
0.832002, 0.214326, 0.348248, 0.073151, 0.062646],
[0.726869, 0.911171, 0.961920, 0.874884, 0.216867,
0.076966, 0.776240, 0.495777, 0.963492, 0.425246],
[0.357483, 0.486330, 0.759177, 0.748362, 0.889904,
0.350438, 0.232983, 0.823613, 0.792656, 0.441264],
[0.875826, 0.359459, 0.214808, 0.425850, 0.493328,
0.456048, 0.523145, 0.504154, 0.090128, 0.472437],
[0.813400, 0.808407, 0.427211, 0.902524, 0.210376,
0.490662, 0.915939, 0.169439, 0.078865, 0.485371],
[0.877334, 0.982207, 0.679085, 0.486335, 0.940715,
0.585964, 0.289279, 0.694886, 0.172625, 0.201457],
[0.141599, 0.476124, 0.762246, 0.067045, 0.411332,
0.813196, 0.134138, 0.302390, 0.856145, 0.349243],
[0.346912, 0.082142, 0.787442, 0.857465, 0.371129,
0.448550, 0.967943, 0.775340, 0.943681, 0.656127],
[0.619267, 0.547196, 0.470422, 0.141566, 0.584198,
0.952226, 0.196462, 0.629549, 0.685469, 0.824365],
[0.014209, 0.789812, 0.836373, 0.186139, 0.493840,
0.710697, 0.910033, 0.368287, 0.865953, 0.140892],
[0.482763, 0.072574, 0.026730, 0.143687, 0.739505,
0.419649, 0.013683, 0.662644, 0.785254, 0.234561],
[0.821421, 0.844100, 0.153937, 0.671762, 0.290469,
0.631347, 0.591435, 0.498966, 0.043395, 0.176771],
[0.404994, 0.496656, 0.951774, 0.497357, 0.715401,
0.023378, 0.493045, 0.342766, 0.117055, 0.698590],
[0.985857, 0.831692, 0.423498, 0.215757, 0.341260,
0.790760, 0.941186, 0.716883, 0.062641, 0.582012],
[0.676905, 0.280897, 0.800638, 0.898913, 0.735995,
0.592412, 0.433021, 0.432772, 0.874477, 0.112375],
[0.377382, 0.118941, 0.529204, 0.419434, 0.673891,
0.074904, 0.129868, 0.819585, 0.220536, 0.353223],
[0.233415, 0.136703, 0.487256, 0.777498, 0.901915,
0.612402, 0.778635, 0.436718, 0.484520, 0.641969],
[0.273297, 0.670196, 0.344525, 0.669751, 0.180230,
0.530085, 0.393284, 0.326043, 0.260840, 0.364690],
[0.931213, 0.676123, 0.912481, 0.898258, 0.001887,
0.408306, 0.917215, 0.496959, 0.287951, 0.562511],
[0.047196, 0.780338, 0.895994, 0.088169, 0.552425,
0.130790, 0.308504, 0.232476, 0.187952, 0.105936],
[0.343517, 0.356222, 0.416018, 0.450278, 0.487765,
0.040510, 0.592363, 0.771635, 0.577849, 0.315843],
[0.527759, 0.529503, 0.210423, 0.756794, 0.892670,
0.339374, 0.445837, 0.363265, 0.432114, 0.942045],
[0.560107, 0.110906, 0.115725, 0.761393, 0.969105,
0.921166, 0.455014, 0.593512, 0.111887, 0.217300],
[0.463382, 0.635591, 0.329484, 0.573602, 0.492558,
0.474174, 0.371906, 0.850465, 0.467637, 0.261373],
[0.033051, 0.422543, 0.294155, 0.699026, 0.846231,
0.047967, 0.686826, 0.480273, 0.463181, 0.345601],
[0.285473, 0.723925, 0.202386, 0.671909, 0.685277,
0.993969, 0.415329, 0.155218, 0.233826, 0.088752],
[0.0, 0.6, 0.8, 0.677718, 0.961189,
0.2, 0.8, 0.8, 0.524970, 0.815489],
[0.5, 0.5, 0.1, 0.156163, 0.274566,
0.5, 0.8, 0.8, 0.656166, 0.964211],
[0.1, 0.9, 0.0, 0.178217, 0.408438,
0.2, 0.1, 0.2, 0.051758, 0.906712]]
f = [90.4, 830.9, 52.7, 375.2, 289.7, 244.1, 470.2, 111.7,
968.9, 903.4, 918.5, 820.3, 441.2, 687.5, 836.9, 11.0,
454.5, 929.3, 952.6, 937.2, 870.5, 211.7, 378.4, 320.3,
729.6, 420.8, 213.8, 717.7, 285.4, 522.8, 748.3, 371.0,
501.2, 568.6, 111.9, 645.2, 486.2, 157.0, 968.5, 137.6,
127.2, 943.4, 437.2, 199.7, 415.4, 966.0, 362.3, -1000,
-1000, -1000]
numerator = 0.0
denominator = 0.0
for i in range(50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= math.fsum([ (x[k]/10 - z[j][k])**2
for k in range(10) ])
numerator += f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
dimension = 10
var_lower = np.array([0 for i in range(10)])
var_upper = np.array([10 for i in range(10)])
optimum_point = np.array([00., 06., 08., 06.77718, 09.61189,
02., 08., 08., 05.24970, 08.15489])
optimum_value = -1000
var_type = np.array(['I'] * 3 + ['R'] * 2 + ['I'] * 3 + ['R'] * 2)
# -- end class
class TestBlackBox(RbfoptBlackBox):
"""A black-box constructed from a known test function.
Parameters
----------
name : string
The name of the function to be implemented.
"""
def __init__(self, name):
"""Constructor.
"""
try:
thismodule = sys.modules[__name__]
self._function = getattr(thismodule, name.lower())
except AttributeError:
raise ValueError('Function ' + name + ' not implemented')
def get_dimension(self):
return self._function.dimension
def get_var_lower(self):
return self._function.var_lower
def get_var_upper(self):
return self._function.var_upper
def get_var_type(self):
return self._function.var_type
def evaluate(self, point):
return self._function.evaluate(point)
def evaluate_noisy(self, point):
raise NotImplementedError('evaluate_noisy() not implemented')
def has_evaluate_noisy(self):
return False
# -- end class
class TestNoisyBlackBox(RbfoptBlackBox):
"""A noisy black-box constructed from a given black-box function.
Parameters
----------
blackbox : `RbfoptBlackBox`
The black box function to which noise is added.
max_rel_error: float
Maximum relative error.
max_abs_error: float
Maximum absolute error.
"""
def __init__(self, blackbox, max_rel_error = 0.1, max_abs_error = 0.1):
"""Constructor.
"""
assert(max_rel_error >= 0.0)
assert(max_abs_error >= 0.0)
try:
# Get the original function if it is one from this module
self._function = getattr(blackbox, '_function')
except AttributeError:
pass
self._bb = blackbox
self._max_rel_error = max_rel_error
self._max_abs_error = max_abs_error
def get_dimension(self):
return self._bb.get_dimension()
def get_var_lower(self):
return self._bb.get_var_lower()
def get_var_upper(self):
return self._bb.get_var_upper()
def get_var_type(self):
return self._bb.get_var_type()
def evaluate(self, point):
return self._bb.evaluate(point)
def evaluate_noisy(self, point):
value = self._bb.evaluate(point)
rel_noise = np.random.uniform(-self._max_rel_error,
self._max_rel_error)
abs_noise = np.random.uniform(-self._max_abs_error,
self._max_abs_error)
return np.array([value + rel_noise*abs(value) + abs_noise,
- abs(rel_noise*abs(value) + abs_noise),
+ abs(rel_noise*abs(value) + abs_noise)])
def has_evaluate_noisy(self):
return True
# -- end class
class TestEnlargedBlackBox(RbfoptBlackBox):
"""A black-box constructed increasing the size of a test function.
Construct a black box function from a given function, increasing
its dimension by a given factor. The new function is put together
from several independent copies of the original function, plus a
coupling term. If the dimension muldiplier is `d` and the original
function has dimension `n`, the new function has dimension `n*d`
and is computed as:
.. math::
\sum_{j=1}^{d} a_j f(x_{(j-1)n+1},\dots,x_{jn}) + 0.4
f(g_1(x),\dots,g_n(x)),
where `a_j` are random weights that add up to 0.6, and `g_1`
through `g_n` are linear functions of a random subset of the
variables. These linear function are appropriately scaled and
clipped so that we do not exceed the original function bounds. The
optimum of the new function stays the same. Finally, all variables
are randomly permuted.
Parameters
----------
name : string
The name of the function to be implemented.
dimension_multiplier : int
Dimension multiplier
"""
def __init__(self, name, dimension_multiplier=1):
"""Constructor.
"""
assert(dimension_multiplier>=1)
try:
thismodule = sys.modules[__name__]
self._function = getattr(thismodule, name.lower())
except AttributeError:
raise ValueError('Function ' + name + ' not implemented')
dim = self._function.dimension
perm = np.random.permutation(dim * dimension_multiplier)
bounds = []
mult_factor = []
shift = []
# For the copy of the function coupling all variables,
# pick dimension_multiplier random variables to add together
coupling = np.reshape(np.random.permutation(dim*dimension_multiplier),
(dim, dimension_multiplier))
for i in range(dim):
# The bounds of the sum are just the sum of the lower
# and upper bounds of the component variables
lb = sum(self._function.var_lower[perm[val] % dim]
for val in coupling[i])
ub = sum(self._function.var_upper[perm[val] % dim]
for val in coupling[i])
bounds.append([lb, ub])
# The coefficients are computed so that the optimum
# stays the same
shifted_opt = sum(self._function.optimum_point[perm[val] % dim]
for val in coupling[i])
# Check the position of the optimum in the interval
ratio = (shifted_opt - lb)/(ub - lb)
orig_ratio = ((self._function.optimum_point[i] -
self._function.var_lower[i]) /
(self._function.var_upper[i] -
self._function.var_lower[i]))
# The multiplication factor should bring the
# transformed optimum to the original optimum
if (ratio != 0.0 and orig_ratio != 0.0):
mult_factor.append(orig_ratio / ratio)
shift.append(0)
elif (orig_ratio == 0.0):
# The true optimum is at the lower bound. We have to
# ensure the transformed point is mapped to it. The
# correct ratio would be zero, but to let the point
# vary, we change the transformed bound instead. The
# "max" in the bound is to prevent errors in case the
# shifted optimum is at the upper bound.
bounds[-1] = [shifted_opt, max(ub, shifted_opt+1)]
mult_factor.append(1.0)
shift.append(0)
else:
# The transformed point is at the lower bound. Ensure
# it can reach the true optimum.
mult_factor.append(1.0)
shift.append(self._function.optimum_point[i] -
self._function.var_lower[i])
# Compute weight of each copy of the function
int_weights = np.random.randint(1, 10, dimension_multiplier)
weight = np.array([0.6*val/sum(int_weights)
for val in int_weights] + [0.4])
# Store data necessary for function evaluation
self.coupling = coupling
self.extra_bounds = np.array(bounds)
self.mult_factor = np.array(mult_factor)
self.shift = np.array(shift)
self.weight = weight
self.permutation = perm
self.dimension = self._function.dimension
self.dimension_multiplier = dimension_multiplier
# Compute bounds and variable types
self.var_lower = np.array(
[self._function.var_lower[perm[i] % dim]
for i in range(dim*dimension_multiplier)])
self.var_upper = np.array(
[self._function.var_upper[perm[i] % dim]
for i in range(dim*dimension_multiplier)])
self.var_type = np.array(
[self._function.var_type[perm[i] % dim]
for i in range(dim*dimension_multiplier)])
self.optimum_point = np.array(
[self._function.optimum_point[perm[i] % dim]
for i in range(dim*dimension_multiplier)])
self.optimum_value = self._function.optimum_value
def get_dimension(self):
return self._function.dimension * self.dimension_multiplier
def get_var_lower(self):
return self.var_lower
def get_var_upper(self):
return self.var_upper
def get_var_type(self):
return self.var_type
def evaluate(self, point):
assert(len(point)==self.dimension*self.dimension_multiplier)
# First evaluate each copy of the function on individual variables
value = 0.0
for i in range(self.dimension_multiplier):
subpoint = np.array([point[np.where(self.permutation == j)[0][0]]
for j in range(i*self.dimension,
(i+1)*self.dimension)])
value += self.weight[i]*self._function.evaluate(subpoint)
# Add the coupling term
subpoint = np.zeros(self.dimension)
for i in range(self.dimension):
subpoint[i] = np.sum(point[self.coupling[i]])
subpoint = (self._function.var_lower + self.shift +
self.mult_factor * (subpoint - self.extra_bounds[:, 0]) /
(self.extra_bounds[:, 1] - self.extra_bounds[:, 0]) *
(self._function.var_upper - self._function.var_lower))
subpoint = np.clip(subpoint, a_min=self._function.var_lower,
a_max=self._function.var_upper)
value += self.weight[-1] * self._function.evaluate(subpoint)
return value
def evaluate_noisy(self, point):
raise NotImplementedError('evaluate_noisy() not implemented')
def has_evaluate_noisy(self):
return False
# -- end class
| 42.233474
| 78
| 0.516163
|
a286252639129bdab6ca82a5e6f74d5677e44821
| 14,641
|
py
|
Python
|
starthinker_ui/recipe/models.py
|
viohman/starthinker
|
20bd2d7fd1e541eb8a2c9b7159941f667e22e38e
|
[
"Apache-2.0"
] | null | null | null |
starthinker_ui/recipe/models.py
|
viohman/starthinker
|
20bd2d7fd1e541eb8a2c9b7159941f667e22e38e
|
[
"Apache-2.0"
] | null | null | null |
starthinker_ui/recipe/models.py
|
viohman/starthinker
|
20bd2d7fd1e541eb8a2c9b7159941f667e22e38e
|
[
"Apache-2.0"
] | null | null | null |
###########################################################################
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
import re
import pytz
import json
import functools
from itertools import chain
from datetime import date, datetime, timedelta
from django.db import models
from django.conf import settings
from starthinker.util.project import project
from starthinker_ui.account.models import Account, token_generate
from starthinker_ui.project.models import Project
from starthinker_ui.recipe.scripts import Script
JOB_INTERVAL_MS = float(1600) # milliseconds
JOB_LOOKBACK_MS = 5 * JOB_INTERVAL_MS # 8 seconds ( must guarantee to span several pings )
JOB_RECHECK_MS = 30 * 60 * 1000 # 30 minutes
RE_SLUG = re.compile(r'[^\w]')
def utc_milliseconds(utc_timestamp=None):
if utc_timestamp is None: utc_timestamp = datetime.utcnow()
utc_epoch = datetime.utcfromtimestamp(0)
return int((utc_timestamp - utc_epoch) / timedelta(milliseconds=1))
def utc_milliseconds_to_timezone(utm, timezone):
return utc_to_timezone(datetime.utcfromtimestamp(int(utm/1000)), timezone)
def utc_to_timezone(utc_timestamp, timezone):
tz = pytz.timezone(timezone)
return tz.normalize(utc_timestamp.replace(tzinfo=pytz.utc).astimezone(tz))
def timezone_to_utc(tz_timestamp):
return tz_timestamp.astimezone(pytz.utc).replace(tzinfo=None)
def time_ago(timestamp):
ago = ''
seconds = (datetime.utcnow() - timestamp).total_seconds()
if seconds is None:
ago = 'Unknown'
elif seconds == 0:
ago = 'Just Now'
else:
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 60)
if d: ago += '%d Days ' % d
if h: ago += '%d Hours ' % h
if m: ago += '%d Minutes ' % m
if ago == '' and s: ago = '1 Minute Ago'
else: ago += 'Ago'
return ago
def reference_default():
return token_generate(Recipe, 'token', 32)
class Recipe(models.Model):
account = models.ForeignKey(Account, on_delete=models.PROTECT, null=True)
token = models.CharField(max_length=8, unique=True)
reference = models.CharField(max_length=32, unique=True, default=reference_default)
project = models.ForeignKey(Project, on_delete=models.SET_NULL, null=True, blank=True)
name = models.CharField(max_length=64)
active = models.BooleanField(default=True)
manual = models.BooleanField(default=False)
week = models.CharField(max_length=64, default=json.dumps(['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']))
hour = models.CharField(max_length=128, default=json.dumps([3]))
timezone = models.CharField(max_length=32, blank=True, default='America/Los_Angeles')
tasks = models.TextField()
job_utm = models.BigIntegerField(blank=True, default=0)
job_status = models.TextField(default='{}')
worker_uid = models.CharField(max_length=128, default='')
worker_utm = models.BigIntegerField(blank=True, default=0)
birthday = models.DateField(auto_now_add=True)
_cache_log = None
def __str__(self):
return self.name
def __unicode__(self):
return self.name
def slug(self):
return RE_SLUG.sub('_', self.name)
def save(self, *args, **kwargs):
self.get_token()
self.get_reference()
super(Recipe, self).save(*args, **kwargs)
self._cache_log = None
def uid(self):
return self.pk or 'NEW'
def link_edit(self):
return '/recipe/edit/%d/' % self.pk
def link_delete(self):
return '/recipe/delete/%d/' % self.pk
def link_run(self):
return '/recipe/run/%d/' % self.pk if self.pk else ''
def link_cancel(self):
return '/recipe/cancel/%d/' % self.pk if self.pk else ''
def link_json(self):
return '/recipe/json/%d/' % self.pk if self.pk else ''
def link_colab(self):
return '/recipe/colabs/%d/' % self.pk if self.pk else ''
def link_airflow(self):
return '/recipe/airflow/%d/' % self.pk if self.pk else ''
def link_start(self):
return '%s/recipe/start/' % settings.CONST_URL
def link_stop(self):
return '%s/recipe/stop/' % settings.CONST_URL
def is_running(self):
return self.get_log()['status'] == 'RUNNING'
def get_token(self):
if not self.token: self.token = token_generate(Recipe, 'token')
return self.token
def get_reference(self):
if not self.reference: self.reference = token_generate(Recipe, 'reference', 32)
return self.reference
def get_values(self):
constants = {
'recipe_project':
self.get_project_identifier(),
'recipe_name':
self.name,
'recipe_slug':
self.slug(),
'recipe_token':
self.get_token(),
'recipe_timezone':
self.timezone,
'recipe_email':
self.account.email if self.account else None,
'recipe_email_token':
self.account.email.replace('@', '+%s@' % self.get_token())
if self.account else None,
}
tasks = json.loads(self.tasks or '[]')
for task in tasks: task['values'].update(constants)
return tasks
def set_values(self, scripts):
self.tasks = json.dumps(scripts)
def get_hours(self):
return [int(h) for h in json.loads(self.hour or '[]')]
def get_days(self):
return json.loads(self.week or '[]') or ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
def get_icon(self): return '' #get_icon('')
def get_credentials_user(self):
return self.account.get_credentials_path() if self.account else '{}'
def get_credentials_service(self):
return self.project.service if self.project and self.project.service else '{}'
def get_project_identifier(self):
return self.project.get_project_id() if self.project else ''
def get_scripts(self):
for value in self.get_values(): yield Script(value['tag'])
def get_tasks(self):
for task in chain.from_iterable(map(lambda s: s.get_tasks(), self.get_scripts())):
yield next(iter(task.items())) # script, task
def get_json(self, credentials=True):
return Script.get_json(
self.uid(),
self.get_project_identifier(),
self.get_credentials_user() if credentials else '',
self.get_credentials_service() if credentials else '',
self.timezone,
self.get_days(),
self.get_hours(),
self.get_values()
)
def activate(self):
self.active = True
self.save(update_fields=['active'])
def deactivate(self):
self.active = False
self.save(update_fields=['active'])
def update(self):
return self.get_status(update=True)
def force(self):
status = self.get_status(force=True)
self.worker_uid = '' # forces current worker to cancel job
self.save(update_fields=['worker_uid'])
return status
def cancel(self):
status = self.get_status(cancel=True)
self.worker_uid = '' # forces current worker to cancel job
self.save(update_fields=['worker_uid'])
return status
def get_job_utm(self, status):
now_tz = utc_to_timezone(datetime.utcnow(), self.timezone)
# check if tasks remain for today
hour = None
if now_tz.strftime('%a') in self.get_days():
for task in status['tasks']:
if task['done']: continue
else:
hour = task['hour']
break
# all tasks done, advance to next day first task
if hour is None:
now_tz += timedelta(hours=24)
for i in range(0, 7):
if now_tz.strftime('%a') in self.get_days(): break
else: now_tz += timedelta(hours=24)
# get the first hour ( if tasks exist, lame use of for loop but works )
for script, task in self.get_tasks():
try: hour = task.get('hour', self.get_hours())[0]
except IndexError: hour = 0
break
now_tz = now_tz.replace(hour=hour or 0, minute=0, second=0, microsecond=0)
return utc_milliseconds(timezone_to_utc(now_tz))
def get_status(self, update=False, force=False, cancel=False):
# current 24 hour time zone derived frame to RUN the job
now_utc = datetime.utcnow()
now_tz = utc_to_timezone(now_utc, self.timezone)
date_tz = str(now_tz.date())
# load prior status
try: status = json.loads(self.job_status)
except ValueError: status = {}
# create default status for new recipes
status.setdefault('date_tz', date_tz)
status.setdefault('tasks', [])
# if not saved yet, do nothing
if not self.pk:
return status
# if cancel, do it on whatever status exists
elif cancel:
for task in status['tasks']:
if not task['done']:
task['done'] = True
task['utc'] = str(now_utc)
task['event'] = 'JOB_CANCEL'
self.job_utm = self.get_job_utm(status)
self.job_status = json.dumps(status)
self.worker_uid = '' # forces current worker to cancel job
self.save(update_fields=['job_status', 'job_utm', 'worker_uid'])
# if manual and all task are done set the utm to be ignored in worker pulls
elif self.manual and not force and not update:
if not status['tasks'] or all(task['done'] for task in status['tasks']):
self.job_utm = 0
self.save(update_fields=['job_utm'])
# if updating, modify the status
elif force or update or (date_tz > status['date_tz'] and now_tz.strftime('%a') in self.get_days()):
status = {
'date_tz': date_tz,
'tasks': [],
}
# create task list based on recipe json
instances = {}
for order, (script, task) in enumerate(self.get_tasks()):
# if force use current hour, if schedule use task and recipe hours
hours = [now_tz.hour] if force else task.get('hour', self.get_hours())
# tasks with hours = [] will be skipped unless force=True
if hours:
instances.setdefault(script, 0)
instances[script] += 1
for hour in hours:
status['tasks'].append({
'order': order,
'script': script,
'instance': instances[script],
'hour': hour,
'utc': str(datetime.utcnow()),
'event': 'JOB_NEW' if update else 'JOB_PENDING',
'stdout': '',
'stderr': '',
'done': update # if saved by user, write as done for that day, user must force run first time
})
# sort new order by first by hour and second by order
def queue_compare(left, right):
if left['hour'] < right['hour']: return -1
elif left['hour'] > right['hour']: return 1
else:
if left['order'] < right['order']: return -1
elif left['order'] > right['order']: return 1
else: return 0
status['tasks'].sort(key=functools.cmp_to_key(queue_compare))
self.job_utm = self.get_job_utm(status)
self.job_status = json.dumps(status)
if force or update:
self.worker_uid = '' # cancel all current workers
self.save(update_fields=['job_status', 'job_utm', 'worker_uid'])
else:
self.save(update_fields=['job_status', 'job_utm'])
else:
job_utm = self.get_job_utm(status)
if job_utm != self.job_utm:
self.job_utm = job_utm
self.save(update_fields=['job_utm'])
return status
def get_task(self):
status = self.get_status()
# if not done return next task prior or equal to current time zone hour
now_tz = utc_to_timezone(datetime.utcnow(), self.timezone)
if now_tz.strftime('%a') in self.get_days():
for task in status['tasks']:
if not task['done'] and task['hour'] <= now_tz.hour:
task['recipe'] = self.get_json()
return task
return None
def set_task(self, script, instance, hour, event, stdout, stderr):
status = self.get_status()
for task in status['tasks']:
if task['script'] == script and task['instance'] == instance and task['hour'] == hour:
task['utc'] = str(datetime.utcnow())
task['event'] = event
if stdout: task['stdout'] += stdout
if stderr: task['stderr'] += stderr
task['done'] = (event != 'JOB_START')
self.job_status = json.dumps(status)
self.job_utm = self.get_job_utm(status)
self.worker_utm=utc_milliseconds() # give worker some time to clean up
self.save(update_fields=['worker_utm', 'job_utm', 'job_status'])
break
def get_log(self):
if self._cache_log is None:
self._cache_log = self.get_status()
error = False
timeout = False
new = False
done = 0
for task in self._cache_log['tasks']:
task['utc'] = datetime.strptime(task['utc'].split('.', 1)[0], "%Y-%m-%d %H:%M:%S")
task['ltc'] = utc_to_timezone(task['utc'], self.timezone)
task['ago'] = time_ago(task['utc'])
if task['done'] and task['event'] != 'JOB_NEW': done += 1
if self._cache_log.get('utc', task['utc']) <= task['utc']: self._cache_log['utc'] = task['utc']
if task['event'] == 'JOB_TIMEOUT': timeout = True
elif task['event'] == 'JOB_NEW': new = True
elif task['event'] not in ('JOB_PENDING', 'JOB_START', 'JOB_END'): error = True
if 'utc' not in self._cache_log: self._cache_log['utc'] = datetime.utcnow()
self._cache_log['utl'] = utc_to_timezone(self._cache_log['utc'], self.timezone)
self._cache_log['ago'] = time_ago(self._cache_log['utc'])
self._cache_log['percent'] = int(( done * 100 ) / ( len(self._cache_log['tasks']) or 1 ))
self._cache_log['uid'] = self.uid()
if timeout:
self._cache_log['status'] = 'TIMEOUT'
elif new:
self._cache_log['status'] = 'NEW'
elif error:
self._cache_log['status'] = 'ERROR'
elif not self._cache_log['tasks'] or all(task['done'] for task in self._cache_log['tasks']):
self._cache_log['status'] = 'FINISHED'
elif utc_milliseconds() - self.worker_utm < JOB_LOOKBACK_MS:
self._cache_log['status'] = 'RUNNING'
elif not self.active:
self._cache_log['status'] = 'PAUSED'
else:
self._cache_log['status'] = 'QUEUED'
return self._cache_log
| 32.827354
| 111
| 0.636432
|
d38fcc68a70fa02b765cd12c9bfe6605a585f3a2
| 2,491
|
py
|
Python
|
main.py
|
chocks/subreddit-sentiment-analysis
|
7f9c3698e3ae5d6515960fdfa59fa86a08387194
|
[
"MIT"
] | null | null | null |
main.py
|
chocks/subreddit-sentiment-analysis
|
7f9c3698e3ae5d6515960fdfa59fa86a08387194
|
[
"MIT"
] | null | null | null |
main.py
|
chocks/subreddit-sentiment-analysis
|
7f9c3698e3ae5d6515960fdfa59fa86a08387194
|
[
"MIT"
] | null | null | null |
"""
A simple command line utility to do sentiment analysis on a sub-reddit
This tries to predict the total positive, negative and neutral posts in a given time frame.
NOTE: This does not do apply advanced learning models but relies on the text-processing service for the prediction &
is intended to be for educational purposes.
"""
import praw
import requests
import argparse
from config import Config
def do_sentiment_analysis(subreddit_name, time_filter):
"""
Do sentiment analysis on a sub-reddit
:param subreddit_name:
:param time_filter:
:return:
"""
reddit_config = Config()
reddit = praw.Reddit(client_id=reddit_config.get_client_id(),
client_secret=reddit_config.get_client_secret(),
user_agent='USERAGENT')
all_submissions = reddit.subreddit(subreddit_name).top(time_filter=time_filter)
SENTIMENT_PROCESSING_API = 'http://text-processing.com/api/sentiment/'
pos_count = 0
neg_count = 0
neu_count = 0
total_posts = 0
for submission in all_submissions:
total_posts += 1
try:
payload = {'text': submission.title}
r = requests.post(SENTIMENT_PROCESSING_API, data=payload)
sentiment = r.json().get('label')
if sentiment == 'pos':
pos_count += 1
elif sentiment == 'neg':
neg_count += 1
else:
neu_count += 1
except:
raise
# Color formatters
formatters = {
'RED': '\033[91m',
'GREEN': '\033[92m',
'GREY': '\33[90m',
'END': '\033[0m',
}
print "Total posts scanned: {} ".format(total_posts)
print "{GREEN} Positive comments: {} {END}".format(pos_count, **formatters)
print "{RED} Negative comments: {} {END}".format(neg_count, **formatters)
print "{GREY} Neutral comments: {} {END}".format(neu_count, **formatters)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--subreddit_name",
help="Name of the sub-reddit to scan",
required=True)
parser.add_argument("-t", "--time_filter",
help="Time period to scan ",
choices=['all', 'day', 'hour', 'month', 'week', 'year'],
default='day')
args = parser.parse_args()
do_sentiment_analysis(args.subreddit_name, args.time_filter)
| 31.935897
| 116
| 0.602971
|
2592a26c12b2a08e89b0f99c9b576b1ee70ff7c8
| 1,401
|
py
|
Python
|
ee/core/models.py
|
rjdp/Easynginedemoplugin
|
9b9e5caa29630fae3679fb86018cdaa477f1e8c9
|
[
"MIT"
] | 1
|
2016-08-01T18:52:02.000Z
|
2016-08-01T18:52:02.000Z
|
ee/core/models.py
|
rjdp/Easynginedemoplugin
|
9b9e5caa29630fae3679fb86018cdaa477f1e8c9
|
[
"MIT"
] | null | null | null |
ee/core/models.py
|
rjdp/Easynginedemoplugin
|
9b9e5caa29630fae3679fb86018cdaa477f1e8c9
|
[
"MIT"
] | null | null | null |
from sqlalchemy import Column, DateTime, String, Integer, Boolean, func
from ee.core.database import Base
class SiteDB(Base):
"""
Databse model for site table
"""
__tablename__ = 'sites'
id = Column(Integer, primary_key=True)
sitename = Column(String, unique=True)
site_type = Column(String)
cache_type = Column(String)
site_path = Column(String)
# Use default=func.now() to set the default created time
# of a site to be the current time when a
# Site record was created
created_on = Column(DateTime, default=func.now())
is_enabled = Column(Boolean, unique=False, default=True, nullable=False)
is_ssl = Column(Boolean, unique=False, default=False)
storage_fs = Column(String)
storage_db = Column(String)
def __init__(self, sitename=None, site_type=None, cache_type=None,
site_path=None, site_enabled=None,
is_ssl=None, storage_fs=None, storage_db=None):
self.sitename = sitename
self.site_type = site_type
self.cache_type = cache_type
self.site_path = site_path
self.is_enabled = site_enabled
self.is_ssl = is_ssl
self.storage_fs = storage_fs
self.storage_db = storage_db
# def __repr__(self):
# return '<Site %r>' % (self.site_type)
#
# def getType(self):
# return '%r>' % (self.site_type)
| 31.840909
| 76
| 0.652391
|
753ac9c959b5fa53fe76f8208647742b3e449af0
| 2,151
|
py
|
Python
|
Src/DatasetToolbox.py
|
mortarsynth/ML_task1
|
a929dbf84c4c07194ad6cc4f3536b6ddd83a2644
|
[
"MIT"
] | null | null | null |
Src/DatasetToolbox.py
|
mortarsynth/ML_task1
|
a929dbf84c4c07194ad6cc4f3536b6ddd83a2644
|
[
"MIT"
] | null | null | null |
Src/DatasetToolbox.py
|
mortarsynth/ML_task1
|
a929dbf84c4c07194ad6cc4f3536b6ddd83a2644
|
[
"MIT"
] | null | null | null |
import numpy as np
def shuffle(dataset, labels):
dataset_size = labels.shape[0]
p = np.random.permutation(dataset_size)
dataset = dataset[p, :]
labels = labels[p]
return dataset, labels
def reduceByQuant(dataset, labels, quantizer=1): # make its size divisible by batch_size*num_folds
dataset_size = labels.shape[0]
multiple_size = (dataset_size // quantizer) * quantizer
dataset = dataset[0:multiple_size, :]
labels = labels[0:multiple_size]
new_dataset_size = multiple_size
return dataset, labels, new_dataset_size
def normalize(dataset, num_folds=1):
dataset_size = dataset.shape[0]
num_features = dataset.shape[1]
fold_size = dataset_size // num_folds
for fold_iter in range(num_folds):
for feature_iter in range(num_features):
f_mean = np.mean(dataset[fold_iter*fold_size:(fold_iter + 1)*fold_size,
feature_iter])
f_std = np.std(dataset[fold_iter*fold_size:(fold_iter + 1)*fold_size,
feature_iter])
f_max = np.max(dataset[fold_iter*fold_size:(fold_iter + 1)*fold_size,
feature_iter])
if not f_std == 0:
dataset[fold_iter*fold_size:(fold_iter + 1)*fold_size, feature_iter] -= f_mean
dataset[fold_iter*fold_size:(fold_iter + 1)*fold_size, feature_iter] /= f_std
elif not f_max == 0:
dataset[fold_iter*fold_size:(fold_iter + 1)*fold_size, feature_iter] /= f_max
return dataset
def makeFolds(dataset, labels, start_index, end_index, is_train):
if is_train:
data_fold = np.delete(dataset,
slice(start_index, end_index),
axis=0)
label_fold = np.delete(labels,
slice(start_index, end_index),
axis=0)
# is_val
else:
data_fold = dataset[start_index:end_index, :]
label_fold = labels[start_index:end_index]
return data_fold, label_fold
| 34.142857
| 99
| 0.593212
|
e7f58361a55aa3cf12e372f9c234e78d6c0537fb
| 1,781
|
py
|
Python
|
setup.py
|
paulross/typin
|
113224d868c95e93b9ae724b0a9d9cfe3e3c78f8
|
[
"MIT"
] | 7
|
2017-11-12T21:29:18.000Z
|
2019-01-30T01:50:47.000Z
|
setup.py
|
paulross/typin
|
113224d868c95e93b9ae724b0a9d9cfe3e3c78f8
|
[
"MIT"
] | null | null | null |
setup.py
|
paulross/typin
|
113224d868c95e93b9ae724b0a9d9cfe3e3c78f8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
# 'Click>=6.0',
# TODO: put package requirements here
]
setup_requirements = [
'pytest-runner',
# TODO(paulross): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
'pytest',
# TODO: put package test requirements here
]
setup(
name='typin',
version='0.1.0',
description="Python type inferencing.",
long_description=readme + '\n\n' + history,
author="Paul Ross",
author_email='apaulross@gmail.com',
url='https://github.com/paulross/typin',
packages=find_packages('src'),
package_dir={'':'src'},
entry_points={
'console_scripts': [
'typin_cli=typin.typin_cli:main'
]
},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='typin',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development',
'Topic :: Software Development :: Code Generators',
'Topic :: Software Development :: Testing',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| 26.984848
| 78
| 0.633914
|
f42b6cfba35a973318507b35426d1972fb4b1221
| 1,471
|
py
|
Python
|
bin/pzem_004t.py
|
kepon85/PvMonit
|
14d3e9dc8711b5721db76543dde3089a52e73fc2
|
[
"Beerware"
] | 16
|
2016-11-16T08:56:14.000Z
|
2021-04-30T02:49:26.000Z
|
bin/pzem_004t.py
|
kepon85/PvMonit
|
14d3e9dc8711b5721db76543dde3089a52e73fc2
|
[
"Beerware"
] | 9
|
2017-05-15T12:20:48.000Z
|
2021-03-13T17:45:12.000Z
|
bin/pzem_004t.py
|
kepon85/PvMonit
|
14d3e9dc8711b5721db76543dde3089a52e73fc2
|
[
"Beerware"
] | 9
|
2017-01-29T10:44:10.000Z
|
2021-12-06T08:52:16.000Z
|
#!/usr/bin/python3
# Contrib de akoirium
import pymodbus
import serial
import math
from pymodbus.pdu import ModbusRequest
from pymodbus.client.sync import ModbusSerialClient as ModbusClient
from pymodbus.transaction import ModbusRtuFramer
def calc (registers, factor):
format = '%%0.%df' % int (math.ceil (math.log10 (factor)))
if len(registers) == 1:
return format % ((1.0 * registers[0]) / factor)
elif len(registers) == 2:
return format % (((1.0 * registers[1] * 65535) + (1.0 * registers[0])) / factor)
#endif
#end calc
client = ModbusClient (method = "rtu", port="/dev/ttyUSB1", stopbits = 1, bytesize = 8, parity = 'N', baudrate = 9600)
#Connect to the serial modbus server
connection = client.connect()
if client.connect ():
try:
result = client.read_input_registers (0x0000, 10, unit = 0x01)
print ('{', end='')
print ('"V": ' + calc (result.registers[0:1], 10) + ', ', end='')
print ('"A": ' + calc (result.registers[1:3], 1000) + ', ', end='')
print ('"P": ' + calc (result.registers[3:5], 10) + ', ', end='')
print ('"E": ' + calc (result.registers[5:7], 1) + ', ', end='')
print ('"F": ' + calc (result.registers[7:8], 10) + ', ', end='')
print ('"f": ' + calc (result.registers[8:9], 100) + ', ', end='')
print ('"a": ' + calc (result.registers[9:10], 1), end='')
print ('}')
finally:
client.close()
#end try
#end if
| 34.209302
| 118
| 0.575799
|
dabd5ac218139076fbbb339b1dada932f1bc8c60
| 548
|
py
|
Python
|
queries/q12.py
|
csruiliu/tpch-pyspark
|
ec707ddd8a5e917b08e0ee1ce320b826fa6aa977
|
[
"MIT"
] | null | null | null |
queries/q12.py
|
csruiliu/tpch-pyspark
|
ec707ddd8a5e917b08e0ee1ce320b826fa6aa977
|
[
"MIT"
] | null | null | null |
queries/q12.py
|
csruiliu/tpch-pyspark
|
ec707ddd8a5e917b08e0ee1ce320b826fa6aa977
|
[
"MIT"
] | null | null | null |
query = """
SELECT L_SHIPMODE,
sum(CASE WHEN O_ORDERPRIORITY='1-URGENT'
OR O_ORDERPRIORITY='2-HIGH'
THEN 1 ELSE 0 END) AS HIGH_LINE_COUNT,
sum(CASE WHEN O_ORDERPRIORITY <> '1-URGENT'
AND O_ORDERPRIORITY <> '2-HIGH'
THEN 1 ELSE 0 END) AS LOW_LINE_COUNT
FROM orders,
lineitem
WHERE O_ORDERKEY = L_ORDERKEY
AND L_SHIPMODE IN ('MAIL', 'SHIP')
AND L_COMMITDATE < L_RECEIPTDATE
AND L_SHIPDATE < L_COMMITDATE
AND L_RECEIPTDATE >= '1994-01-01'
AND L_RECEIPTDATE < '1995-01-01'
GROUP BY L_SHIPMODE
ORDER BY L_SHIPMODE
"""
| 27.4
| 46
| 0.715328
|
9ab1d72fffa2ff7e039972ae8681cb2ad04ed903
| 434
|
py
|
Python
|
rvpvp/isa/rvf/fcvt_w_s.py
|
ultrafive/riscv-pvp
|
843e38422c3d545352b955764927d5e7847e5453
|
[
"Unlicense"
] | 5
|
2021-05-10T09:57:00.000Z
|
2021-10-05T14:39:20.000Z
|
rvpvp/isa/rvf/fcvt_w_s.py
|
ultrafive/riscv-pvp
|
843e38422c3d545352b955764927d5e7847e5453
|
[
"Unlicense"
] | null | null | null |
rvpvp/isa/rvf/fcvt_w_s.py
|
ultrafive/riscv-pvp
|
843e38422c3d545352b955764927d5e7847e5453
|
[
"Unlicense"
] | 1
|
2021-05-14T20:24:11.000Z
|
2021-05-14T20:24:11.000Z
|
from ...isa.inst import *
import numpy as np
class Fcvt_w_s(Inst):
name = 'fcvt.w.s'
def golden(self):
if 'val1' in self.keys():
if self['val1'] < -(1<<31) or np.isneginf(self['val1']):
return -(1<<31)
if self['val1'] > ((1<<31)-1) or np.isposinf(self['val1']) or np.isnan(self['val1']):
return (1<<31)-1
return int(self['val1'])
| 28.933333
| 97
| 0.479263
|
f2aba68d8420297e59b9485081be29377f2557b4
| 11,712
|
py
|
Python
|
lib/id3c/labelmaker.py
|
sonali-mhihim/id3c
|
1e3967d6c24e9cadb34cae1c5e1e79415a2250dc
|
[
"MIT"
] | 1
|
2021-03-19T15:18:33.000Z
|
2021-03-19T15:18:33.000Z
|
lib/id3c/labelmaker.py
|
sonali-mhihim/id3c
|
1e3967d6c24e9cadb34cae1c5e1e79415a2250dc
|
[
"MIT"
] | null | null | null |
lib/id3c/labelmaker.py
|
sonali-mhihim/id3c
|
1e3967d6c24e9cadb34cae1c5e1e79415a2250dc
|
[
"MIT"
] | null | null | null |
"""
Make physical barcode labels for identifiers.
"""
import logging
import json
import os
import requests
from itertools import chain
from typing import Iterable
LOG = logging.getLogger(__name__)
DEFAULT_LABEL_API = os.environ.get("LABEL_API") \
or "https://backoffice.seattleflu.org/labels"
class LabelLayout:
"""
Layouts, based on the kind of identifier, affect the number of copies of
each barcode, the label presentation, and label text.
"""
sku: str
barcode_type: str
copies_per_barcode = 1
reference: str
layouts = {'default'}
blank = {
"text": "",
"copies": 1,
}
def __init__(self, barcodes, layout: str='default'):
if not self.sku:
raise NotImplementedError("sku must be set by a subclass")
if not self.barcode_type:
raise NotImplementedError("barcode_type must be set by a subclass")
if layout not in self.layouts:
raise NotImplementedError(f"layout must be one of: {self.layouts}")
self.barcodes = barcodes
def label(self, barcode):
"""
Returns a label spec for the given *barcode*.
"""
return {
"text": f"{self.barcode_type} {barcode}\n{self.reference or ''}",
"barcode": barcode,
"copies": self.copies_per_barcode,
}
def blanks_before(self, barcode_number):
"""
Returns the number of blank labels to insert before the given
*barcode_number*. Defaults to 0 (no blanks).
"""
return 0
def spec(self):
"""
Returns a layout spec suitable for passing to a `Lab Labels
<https://github.com/MullinsLab/Lab-Labels>`_ web service.
"""
def flatten(iterable):
return list(chain.from_iterable(iterable))
return {
"type": self.sku,
"labels": list(
flatten(
(*([self.blank] * self.blanks_before(number)), labels)
for number, labels
in enumerate(map(self.label, self.barcodes), start = 1)
)
),
}
class LCRY1100TriplicateLayout(LabelLayout):
sku = "LCRY-1100"
copies_per_barcode = 3
def blanks_before(self, barcode_number):
"""
Each barcode maps to 3 labels. Each row is 4 labels wide, so for
better UX we want all labels in the 4th column to be blank. We can
express this without using a mutable label sequence number by inserting
a blank label before every barcode except the first (e.g. the 2nd
barcode normally would start filling in the 4th label; by inserting a
blank, it starts filling in from the 1st label of the next row).
"""
return 1 if barcode_number > 1 else 0
class SamplesLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = "SAMPLE"
copies_per_barcode = 2
reference = "seattleflu.org"
def blanks_before(self, barcode_number):
"""
Each barcode maps to 2 labels. Each row is 7 labels wide, so for
better UX we want all labels in the 7th column to be blank. We can
express this without using a mutable label sequence number by
inserting a blank label before every fourth barcode (e.g. the 4th
barcode normally would start filling in the 7th label; by inserting a
blank, it starts filling in from the 1st label of the next row).
"""
return 1 if barcode_number > 1 and (barcode_number - 1) % 3 == 0 else 0
class CollectionsSeattleFluLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "COLLECTION"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsKiosksLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "KIOSK"
copies_per_barcode = 2
reference = "seattleflu.org"
class CollectionsKiosksAsymptomaticLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "ASYMPTOMATIC KIOSK"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsEnvironmentalLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "ENVIRON"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsSwabAndSendLayout(LCRY1100TriplicateLayout):
barcode_type = "SWAB & SEND"
reference = "seattleflu.org"
class CollectionsHouseholdObservationLayout(LCRY1100TriplicateLayout):
barcode_type = "HH OBSERVATION"
reference = "seattleflu.org"
class CollectionsHouseholdObservationAsymptomaticLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "ASYMPTOMATIC HH OBS"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsHouseholdInterventionLayout(LCRY1100TriplicateLayout):
barcode_type = "HH INTERVENTION"
reference = "seattleflu.org"
class CollectionsHouseholdInterventionAsymptomaticLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "ASYMPTOMATIC HH INT"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsSelfTestLayout(LCRY1100TriplicateLayout):
barcode_type = "HOME TEST"
reference = "seattleflu.org"
class CollectionsFluAtHomeLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = "COLLECTION"
copies_per_barcode = 1
reference = "fluathome.org"
class KitsFluAtHomeLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "KIT"
copies_per_barcode = 1
reference = "fluathome.org"
class _TestStripsFluAtHomeLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = "TEST STRIP"
copies_per_barcode = 1
reference = "fluathome.org"
class CollectionsScanLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'SCAN'
copies_per_barcode = 2
reference = "scanpublichealth.org"
class CollectionsScanKiosksLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'SCAN - STAVE'
copies_per_barcode = 1
reference = "scanpublichealth.org"
class CollectionsCliaComplianceLayout(LabelLayout):
barcode_type = "CLIA"
copies_per_barcode = 1
reference = "seattleflu.org"
layouts = {'default', 'small'}
def __init__(self, barcodes, layout: str='default'):
self.layout = layout
self.sku = "LCRY-2380" if layout == 'small' else "LCRY-1100"
super().__init__(barcodes)
def label(self, barcode):
"""
Returns a label spec for the given *barcode*. If the small layout is
requested, excludes the barcode type and barcode text.
"""
if self.layout == 'small':
return {
"text": self.reference,
"barcode": barcode,
"copies": self.copies_per_barcode,
}
return super().label(barcode)
class CollectionsHaarviLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "COLLECTION"
copies_per_barcode = 1
reference = "HAARVI"
class SamplesHaarviLayout(LabelLayout):
sku = "LCRY-2380"
barcode_type = "SAMPLE"
copies_per_barcode = 1
reference = "HAARVI"
class CollectionsHouseholdGeneralLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = "HH GENERAL"
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsUWObservedLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'UW OBSERVED'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsUWHomeLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'UW HOME'
copies_per_barcode = 2
reference = "seattleflu.org"
class CollectionsChildcareLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'CHILDCARE'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsSchoolTestingHomeLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'SCHOOL TESTING HOME'
copies_per_barcode = 2
reference = "seattleflu.org"
class CollectionsSchoolTestingObservedLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'SCHOOL TESTING OBSERVED'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsAppleRespiratoryLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'APPLE'
copies_per_barcode = 2
reference = "seattleflu.org"
class CollectionsAppleRespiratorySerialLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'APPLE SERIAL'
copies_per_barcode = 2
reference = "seattleflu.org"
class CollectionsAdultFamilyHomeOutbreakLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'AFH OUTBREAK'
copies_per_barcode = 1
reference = "seattleflu.org"
class CollectionsWorkplaceOutbreakLayout(LabelLayout):
sku = "LCRY-1100"
barcode_type = 'WORKPLACE OUTBREAK'
copies_per_barcode = 1
reference = "seattleflu.org"
LAYOUTS = {
"samples": SamplesLayout,
"collections-scan": CollectionsScanLayout,
"collections-scan-kiosks": CollectionsScanKiosksLayout,
"collections-seattleflu.org": CollectionsSeattleFluLayout,
"collections-kiosks": CollectionsKiosksLayout,
"collections-kiosks-asymptomatic": CollectionsKiosksAsymptomaticLayout,
"collections-environmental": CollectionsEnvironmentalLayout,
"collections-swab&send": CollectionsSwabAndSendLayout,
"collections-household-observation": CollectionsHouseholdObservationLayout,
"collections-household-observation-asymptomatic": CollectionsHouseholdObservationAsymptomaticLayout,
"collections-household-intervention": CollectionsHouseholdInterventionLayout,
"collections-household-intervention-asymptomatic": CollectionsHouseholdInterventionAsymptomaticLayout,
"collections-household-general": CollectionsHouseholdGeneralLayout,
"collections-self-test": CollectionsSelfTestLayout,
"collections-fluathome.org": CollectionsFluAtHomeLayout,
"collections-clia-compliance": CollectionsCliaComplianceLayout,
"kits-fluathome.org": KitsFluAtHomeLayout,
"test-strips-fluathome.org": _TestStripsFluAtHomeLayout,
"samples-haarvi": SamplesHaarviLayout,
"collections-haarvi": CollectionsHaarviLayout,
'collections-uw-observed': CollectionsUWObservedLayout,
'collections-uw-home': CollectionsUWHomeLayout,
'collections-childcare': CollectionsChildcareLayout,
'collections-school-testing-home': CollectionsSchoolTestingHomeLayout,
'collections-school-testing-observed': CollectionsSchoolTestingObservedLayout,
'collections-apple-respiratory': CollectionsAppleRespiratoryLayout,
'collections-apple-respiratory-serial': CollectionsAppleRespiratorySerialLayout,
'collections-adult-family-home-outbreak': CollectionsAdultFamilyHomeOutbreakLayout,
'collections-workplace-outbreak': CollectionsWorkplaceOutbreakLayout,
}
def layout_identifiers(set_name: str, identifiers: Iterable, layout: str='default') -> LabelLayout:
"""
Use the layout associated with the given identifier *set_name* to make
labels for the given *identifiers*.
Each item in *identifiers* must have a ``barcode`` attribute. These are
passed to the layout.
"""
return LAYOUTS[set_name]([id.barcode for id in identifiers], layout)
def generate_pdf(layout: LabelLayout, api: str = DEFAULT_LABEL_API) -> bytes:
"""
Generate a PDF from the given *layout* using the `Lab Labels
<https://github.com/MullinsLab/Lab-Labels>`_ web service *api*.
Returns a byte string.
"""
spec = json.dumps(layout.spec())
LOG.info(f"Generating PDF using Lab Labels API at {api}")
response = requests.post(f"{api}/stickers",
headers = { "Content-Type": "application/json" },
data = spec)
response.raise_for_status()
return response.content
| 30.420779
| 106
| 0.691428
|
07c958bce6304a7844f694ae39cd81051df5b0a8
| 666
|
py
|
Python
|
lib/graph.py
|
psobot/SampleScanner
|
a95f694ad326bf0c7dda580f62dd38767c0a9754
|
[
"MIT"
] | 55
|
2017-04-12T15:32:42.000Z
|
2022-03-19T04:23:47.000Z
|
lib/graph.py
|
nandoflorestan/SampleScanner
|
9eced0c52759415229e454f1e9cf788cf1af3d7f
|
[
"MIT"
] | 7
|
2017-04-14T12:10:32.000Z
|
2021-10-15T11:43:59.000Z
|
lib/graph.py
|
psobot/SampleScanner
|
a95f694ad326bf0c7dda580f62dd38767c0a9754
|
[
"MIT"
] | 9
|
2017-04-13T18:59:45.000Z
|
2021-04-18T13:53:28.000Z
|
import os
import csv
import sys
import numpy
import itertools
import traceback
from tqdm import tqdm
from tabulate import tabulate
from wavio import read_wave_file
import matplotlib.pyplot as plt
sampling_rate = 48000.0
assume_stereo_frequency_match = True
CHUNK_SIZE = 2048
def process_all(start, stop, *files):
start = int(start)
stop = int(stop)
chunk_offset = ((-1 * start) % CHUNK_SIZE)
for file in files:
stereo = read_wave_file(file)
left = stereo[0]
right = stereo[1]
plt.plot(left[start:stop])
plt.plot(right[start:stop])
plt.show()
if __name__ == "__main__":
process_all(*sys.argv[1:])
| 20.181818
| 46
| 0.689189
|
bdf5e3577fafff34a0456f07d01fe77a6ed59e46
| 5,422
|
py
|
Python
|
tests/test_lookuptable.py
|
Bam4d/neon
|
87ee86a5129a45f0ebbb4bb559296de935208fc4
|
[
"Apache-2.0"
] | null | null | null |
tests/test_lookuptable.py
|
Bam4d/neon
|
87ee86a5129a45f0ebbb4bb559296de935208fc4
|
[
"Apache-2.0"
] | null | null | null |
tests/test_lookuptable.py
|
Bam4d/neon
|
87ee86a5129a45f0ebbb4bb559296de935208fc4
|
[
"Apache-2.0"
] | 2
|
2018-04-04T17:13:00.000Z
|
2018-04-17T21:15:24.000Z
|
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
'''
Test of a LookupTable layer, which is often being used for word embedding
'''
import itertools as itt
import numpy as np
from neon.backends import gen_backend
from neon import NervanaObject
from neon.initializers.initializer import GlorotUniform
from neon.layers.layer import LookupTable
def pytest_generate_tests(metafunc):
if metafunc.config.option.all:
bsz_rng = [16, 32, 64]
else:
bsz_rng = [128]
if 'basic_linargs' in metafunc.fixturenames:
fargs = []
if metafunc.config.option.all:
nin_rng = [1, 2, 64, 128]
nout_rng = [1, 4, 128, 64]
vocab_size = [1, 4, 1000, 2000]
else:
nin_rng = [4, 32]
nout_rng = [3, 33]
vocab_size = [10, 34]
fargs = itt.product(nin_rng, nout_rng, vocab_size, bsz_rng)
print fargs
metafunc.parametrize('basic_linargs', fargs)
def test_lookuptable_zeros_error(backend_default, basic_linargs):
# basic sanity check with 0 weights random inputs
nin, nout, batch_size, vocab_size = basic_linargs
NervanaObject.be.bsz = batch_size
dtypeu = np.float32
init_glorot = GlorotUniform()
layer = LookupTable(
vocab_size=vocab_size, embedding_dim=nout, init=init_glorot)
inp = np.random.random_integers(0, vocab_size-1, size=nin*batch_size)
layer.configure(nin)
layer.allocate()
layer.prev_layer = True # Hack to force delta buffer allocation
layer.set_deltas([layer.be.iobuf(nin)])
inputs = layer.be.array(inp.reshape((nin, batch_size)))
out = layer.fprop(inputs).get()
W = layer.W.get()
for i in range(nin*batch_size):
assert np.all(W[inp[i]].T == out[:, i])
err = dtypeu(np.zeros((nout, nin * batch_size)))
layer.bprop(layer.be.array(err)).asnumpyarray()
dw = layer.dW.asnumpyarray()
assert np.min(dw) == 0.0 and np.max(dw) == 0.0
return
def test_lookuptable_ones_error(backend_default, basic_linargs):
nin, nout, batch_size, vocab_size = basic_linargs
NervanaObject.be.bsz = batch_size
dtypeu = np.float32
init_glorot = GlorotUniform()
layer = LookupTable(
vocab_size=vocab_size, embedding_dim=nout, init=init_glorot)
inp = np.random.random_integers(0, vocab_size-1, size=nin*batch_size)
layer.configure(nin)
layer.allocate()
layer.prev_layer = True # Hack to force delta buffer allocation
layer.set_deltas([layer.be.iobuf(nin)])
inputs = layer.be.array(inp.reshape((nin, batch_size)))
out = layer.fprop(inputs).get()
W = layer.W.get()
for i in range(nin*batch_size):
assert np.all(W[inp[i]].T == out[:, i])
err = dtypeu(np.ones((nout, nin * batch_size)))
layer.bprop(layer.be.array(err)).asnumpyarray()
dw = layer.dW.asnumpyarray()
unqidx, count = np.unique(inp, return_counts=True)
dw_exp = np.zeros((1, nout))
for wrd_id, cnt in zip(unqidx, count):
dw_exp = err[:, 0] * cnt
assert np.all(dw_exp == dw[wrd_id, :])
return
def test_lookuptable_rand_error(backend_default, basic_linargs):
nin, nout, batch_size, vocab_size = basic_linargs
NervanaObject.be.bsz = batch_size
dtypeu = np.float32
init_glorot = GlorotUniform()
layer = LookupTable(
vocab_size=vocab_size, embedding_dim=nout, init=init_glorot)
inp = np.random.random_integers(0, vocab_size-1, size=nin*batch_size)
layer.configure(nin)
layer.allocate()
layer.prev_layer = True # Hack to force delta buffer allocation
layer.set_deltas([layer.be.iobuf(nin)])
inputs = layer.be.array(inp.reshape((nin, batch_size)))
out = layer.fprop(inputs).get()
W = layer.W.get()
for i in range(nin*batch_size):
assert np.all(W[inp[i]].T == out[:, i])
err = dtypeu(np.random.random((nout, nin * batch_size)))
layer.bprop(layer.be.array(err)).asnumpyarray()
dw = layer.dW.asnumpyarray()
unqidx, count = np.unique(inp, return_counts=True)
dw_exp = np.zeros((1, nout))
for wrd_id, cnt in zip(unqidx, count):
dw_exp[:] = 0
cnt_exp = 0
for i, w_id in enumerate(inp):
if w_id == wrd_id:
dw_exp[:] = dw_exp[:] + err[:, i]
cnt_exp += 1
assert np.allclose(dw[wrd_id, :], dw_exp, atol=0, rtol=1e-4)
assert np.allclose(dw_exp, dw[wrd_id, :], atol=0, rtol=1e-4)
assert cnt == cnt_exp
return
if __name__ == '__main__':
fargs = [1, 128, 1, 1]
be = gen_backend(backend='cpu',
datatype=np.float32,
batch_size=128,
rng_seed=0)
test_lookuptable_zeros_error(be, fargs)
| 32.662651
| 78
| 0.635006
|
01ac1b4158e2b28a350b3688a2b767cd3fb99a18
| 2,661
|
py
|
Python
|
py_wake/tests/test_sites/test_shear_models.py
|
aemoser/PyWake
|
889a2c10882195af21339e9bcf2ede0db9b58319
|
[
"MIT"
] | null | null | null |
py_wake/tests/test_sites/test_shear_models.py
|
aemoser/PyWake
|
889a2c10882195af21339e9bcf2ede0db9b58319
|
[
"MIT"
] | null | null | null |
py_wake/tests/test_sites/test_shear_models.py
|
aemoser/PyWake
|
889a2c10882195af21339e9bcf2ede0db9b58319
|
[
"MIT"
] | null | null | null |
from py_wake.site.shear import PowerShear, LogShear
import numpy as np
from py_wake.tests import npt
from py_wake.site._site import UniformSite
import xarray as xr
import matplotlib.pyplot as plt
def test_power_shear():
h_lst = np.arange(10, 100, 10)
site = UniformSite([1], .1, shear=PowerShear(70, alpha=[.1, .2]))
WS = site.local_wind(x_i=h_lst * 0, y_i=h_lst * 0, h_i=h_lst, wd=[0, 180], ws=[10, 12]).WS
if 0:
plt.plot(WS.sel(wd=0, ws=10), WS.h, label='alpha=0.1')
plt.plot((h_lst / 70)**0.1 * 10, h_lst, ':')
plt.plot(WS.sel(wd=180, ws=12), WS.h, label='alpha=0.2')
plt.plot((h_lst / 70)**0.2 * 12, h_lst, ':')
plt.legend()
plt.show()
npt.assert_array_equal(WS.sel(wd=0, ws=10), (h_lst / 70)**0.1 * 10)
npt.assert_array_equal(WS.sel(wd=180, ws=12), (h_lst / 70)**0.2 * 12)
def test_log_shear():
h_lst = np.arange(10, 100, 10)
site = UniformSite([1], .1, shear=LogShear(70, z0=[.02, 2]))
WS = site.local_wind(x_i=h_lst * 0, y_i=h_lst * 0, h_i=h_lst, wd=[0, 180], ws=[10, 12]).WS
if 0:
plt.plot(WS.sel(wd=0, ws=10), WS.h, label='z0=0.02')
plt.plot(np.log(h_lst / 0.02) / np.log(70 / 0.02) * 10, h_lst, ':')
plt.plot(WS.sel(wd=180, ws=12), WS.h, label='z0=2')
plt.plot(np.log(h_lst / 2) / np.log(70 / 2) * 12, h_lst, ':')
plt.legend()
plt.show()
npt.assert_array_equal(WS.sel(wd=0, ws=10), np.log(h_lst / 0.02) / np.log(70 / 0.02) * 10)
npt.assert_array_equal(WS.sel(wd=180, ws=12), np.log(h_lst / 2) / np.log(70 / 2) * 12)
def test_log_shear_constant_z0():
h_lst = np.arange(10, 100, 10)
site = UniformSite([1], .1, shear=LogShear(70, z0=.02))
WS = site.local_wind(x_i=h_lst * 0, y_i=h_lst * 0, h_i=h_lst, wd=[0, 180], ws=[10, 12]).WS
if 0:
plt.plot(WS.sel(ws=10), WS.h, label='z0=0.02')
plt.plot(np.log(h_lst / 0.02) / np.log(70 / 0.02) * 10, h_lst, ':')
plt.legend()
plt.show()
npt.assert_array_equal(WS.sel(ws=10), np.log(h_lst / 0.02) / np.log(70 / 0.02) * 10)
def test_custom_shear():
def my_shear(WS, WD, h):
return WS * (0.02 * (h - 70) + 1) * (WD * 0 + 1)
h_lst = np.arange(10, 100, 10)
site = UniformSite([1], .1, shear=my_shear)
WS = site.local_wind(x_i=h_lst * 0, y_i=h_lst * 0, h_i=h_lst, wd=[0, 180], ws=[10, 12]).WS
if 0:
plt.plot(WS.sel(wd=0, ws=10), WS.h, label='2z-2')
plt.plot((h_lst - 70) * 0.2 + 10, h_lst, ':')
plt.legend()
plt.show()
npt.assert_array_almost_equal(WS.sel(wd=0, ws=10), (h_lst - 70) * 0.2 + 10)
if __name__ == '__main__':
test_power_shear()
| 36.452055
| 94
| 0.57159
|
e2deddb0087b884af47b8979246992b8d05ce7b5
| 556
|
py
|
Python
|
problems/16. Merge Sorted Array.py
|
mr-vaibh/Python-200days-leetcode
|
7d0b4c15f91ef1384fb41a967d61cbc508cac034
|
[
"MIT"
] | null | null | null |
problems/16. Merge Sorted Array.py
|
mr-vaibh/Python-200days-leetcode
|
7d0b4c15f91ef1384fb41a967d61cbc508cac034
|
[
"MIT"
] | null | null | null |
problems/16. Merge Sorted Array.py
|
mr-vaibh/Python-200days-leetcode
|
7d0b4c15f91ef1384fb41a967d61cbc508cac034
|
[
"MIT"
] | null | null | null |
def merge(nums1, m, nums2, n):
if n == 0:
return
p = m - 1
q = n - 1
r = m + n - 1
while p != -1 and q != -1:
if nums1[p] >= nums2[q]:
nums1[r] = nums1[p]
p -= 1
else:
nums1[r] = nums2[q]
q -= 1
r -= 1
if p != -1:
nums1[0:r+1] = nums1[0:p+1]
else:
nums1[0:r+1] = nums2[0:q+1]
first = [1, 2, 3, 0, 0, 0]
second = [2, 5, 6,]
merge(first, 3, second, 3)
print(first)
# https://leetcode.com/problems/merge-sorted-array/
# DOUBT!!!!
| 18.533333
| 51
| 0.419065
|
69fde923b8233178ec997288019b68b56f88a1f2
| 3,398
|
py
|
Python
|
Biljne vrste/models.py
|
toni4848/biljnevrste_repo
|
8d48a75c67a0208ddad1be78284d653fb2303c94
|
[
"MIT"
] | null | null | null |
Biljne vrste/models.py
|
toni4848/biljnevrste_repo
|
8d48a75c67a0208ddad1be78284d653fb2303c94
|
[
"MIT"
] | null | null | null |
Biljne vrste/models.py
|
toni4848/biljnevrste_repo
|
8d48a75c67a0208ddad1be78284d653fb2303c94
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class BiljnaVrsta(models.Model):
ID_biljne_vrste = models.AutoField(primary_key=True)
hrvatski_naziv = models.CharField(max_length=100)
latinski_naziv = models.CharField(max_length=100)
sinonim_vrste = models.CharField(max_length=100)
opis_vrste = models.CharField(max_length=225)
ID_roda = models.ForeignKey(Rod, on_delete=models.CASCADE)
ID_sistematicara = models.ForeignKey(Sistematicar, on_delete=models.CASCADE)
class Meta:
verbose_name = "Biljna vrsta"
verbose_name_plural = "Biljne vrste"
def __str__(self):
return self.hrvatski_naziv
class Slika(models.Model):
ID_slike = models.AutoField(primary_key=True)
naziv_slike = models.CharField(max_length=50)
opis_slike = models.CharField(max_length=225)
ID_uporabnog_djela = models.ForeignKey(UporabniDio, on_delete=models.CASCADE)
class Meta:
verbose_name = "Slika"
verbose_name_plural = "Slike"
def __str__(self):
return self.naziv_slike
class UporabniDio(models.Model):
ID_uporabnog_djela = models.AutoField(primary_key=True)
ID_slike = models.ForeignKey(Slika, on_delete=models.CASCADE)
uporabni_dio = models.CharField(max_length=100)
class Meta:
verbose_name = "Uporabni dio"
verbose_name_plural = "Uporabni djelovi"
def __str__(self):
return self.uporabni_dio
''''
class UporabniDioVrste(models.Model):
ID_uporabnog_djela = models.ForeignKey(UporabniDio, on_delete=models.CASCADE)
ID_biljne_vrste = models.ManyToManyField('BiljnaVrsta')
class Meta:
verbose_name = "Uporabni dio vrste"
verbose_name_plural = "Uporabni djelovi vrste"
def __str__(self): #Nije potrebno u string hm
return self.ID_uporabnog_djela
return self.ID_biljne_vrste #Returnovi nisu potrebni tu hm
''''
class Rod(models.Model):
ID_roda = models.AutoField(primary_key=True)
naziv_roda = models.CharField(max_length=30)
class Meta:
verbose_name = "Rod"
verbose_name_plural = "Rodovi"
def __str__(self):
return self.naziv_roda
class Porodica(models.Model):
ID_porodice = models.AutoField(primary_key=True)
naziv_hrvatski = models.CharField(max_length=100)
naziv_latinski = models.CharField(max_length=100)
ID_roda = models.ForeignKey(Rod, on_delete=models.CASCADE)
class Meta:
verbose_name = "Porodica"
verbose_name_plural = "Porodice"
def __str__(self):
return self.naziv_hrvatski
class Sistematicar(models.Model):
ID_sistematicara = models.AutoField(primary_key=True)
naziv_sistematicara = models.CharField(max_length=100)
class Meta:
verbose_name = "Sistematicar"
verbose_name_plural = "Sistematicari"
def __str__(self):
return self.naziv_sistematicara
class Podvrsta(models.Model):
ID_podvrste = models.AutoField(primary_key=True)
naziv_podvrste = models.CharField(max_length=100)
class Meta:
verbose_name = "Podvrsta"
verbose_name_plural = "Podvrste"
def __str__(self):
return self.naziv_podvrste
class Varijet(models.Model):
ID_varijeta = models.AutoField(primary_key=True)
naziv_varijeta = models.CharField(max_length=100)
ID_podvrste = models.ForeignKey(Podvrsta, on_delete=models.CASCADE)
class Meta:
verbose_name = "Varijet"
verbose_name_plural = "Varijeteti"
def __str__(self):
return self.naziv_varijeta
| 23.929577
| 80
| 0.74691
|
7ba5c274428c5bc4978fd8ed09947c7298e3964b
| 506
|
py
|
Python
|
nahamconctf2020/shifts_ahoy/exploit.py
|
nhtri2003gmail/ctf-write-ups
|
7e969c47027c39b614e10739ae3a953eed17dfa3
|
[
"MIT"
] | 101
|
2020-03-09T17:40:47.000Z
|
2022-03-31T23:26:55.000Z
|
nahamconctf2020/shifts_ahoy/exploit.py
|
nhtri2003gmail/ctf-write-ups
|
7e969c47027c39b614e10739ae3a953eed17dfa3
|
[
"MIT"
] | 1
|
2021-11-09T13:39:40.000Z
|
2021-11-10T19:15:04.000Z
|
nahamconctf2020/shifts_ahoy/exploit.py
|
datajerk/ctf-write-ups
|
1bc4ecc63a59de7d924c7214b1ce467801792da0
|
[
"MIT"
] | 31
|
2020-05-27T12:29:50.000Z
|
2022-03-31T23:23:32.000Z
|
#!/usr/bin/python3
from pwn import *
#p = process('./shifts-ahoy')
p = remote('jh2i.com', 50015)
p.sendlineafter('> ','1')
p.recvuntil('Enter the message: ')
binary = ELF('shifts-ahoy')
context.update(arch='amd64',os='linux')
shellcode = asm(shellcraft.sh())
jmp_r15 = list(binary.search(asm('jmp r15')))[0]
print("jmp r15",hex(jmp_r15))
payload = bytes([(x - ord('\r')) & 0xff for x in shellcode])
payload += (0x58 - len(payload)) * b'A'
payload += p64(jmp_r15)
p.sendline(payload)
p.interactive()
| 22
| 61
| 0.658103
|
d4a6721aa066faca54e32331dcf709004b778d34
| 148
|
py
|
Python
|
src/__init__.py
|
dpyro/heapwrap
|
9cbf03edc19fd247cecc7a754c9c61221649f8b2
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
dpyro/heapwrap
|
9cbf03edc19fd247cecc7a754c9c61221649f8b2
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
dpyro/heapwrap
|
9cbf03edc19fd247cecc7a754c9c61221649f8b2
|
[
"MIT"
] | null | null | null |
"""@package heapwrap
Provides class wrappers for a min or max-heap using `heapq`.
"""
from . maxheap import MaxHeap
from . minheap import MinHeap
| 18.5
| 60
| 0.743243
|
c503f60c4d8f1b6936eeac9bb225ada57604a248
| 3,754
|
py
|
Python
|
Shortener/views.py
|
zeborg/UrlShortener
|
85a3e804bf318f460f20834f0cff05dad8ade6bd
|
[
"MIT"
] | 64
|
2017-06-14T14:35:16.000Z
|
2021-04-20T07:52:53.000Z
|
Shortener/views.py
|
zeborg/UrlShortener
|
85a3e804bf318f460f20834f0cff05dad8ade6bd
|
[
"MIT"
] | 26
|
2017-06-04T17:57:05.000Z
|
2021-09-22T17:38:09.000Z
|
Shortener/views.py
|
zeborg/UrlShortener
|
85a3e804bf318f460f20834f0cff05dad8ade6bd
|
[
"MIT"
] | 118
|
2017-06-05T22:08:02.000Z
|
2020-10-04T07:52:14.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from pyshorteners import Shortener
from .forms import Urlform, HOSTS
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework import status
from .serializers import UrlAPISerializer
from .services.rebrandly import Rebrandly
from .services.madwire import Madwire
# from pyshorteners.exceptions import UnknownShortenerException
BITLY_TOKEN = "19c73c3f96d4b2a64d0337ef7380cf0de313e8f7"
GOOGLE_TOKEN = "AIzaSyCyj45kuk95kopaSuJ4NvErGMyTVV9i3n4"
REBRANDLY_TOKEN = "b71d7dcfd2f14f0ca4f533bbd6fd226a"
CUTTLY_TOKEN = "9991d9d73156b576fbdfdc92d805dbfb80c76"
similar_hosts = ['Chilpit', 'Clckru', "Isgd", "Tinyurl", "Dagd", "NullPointer", "Osdb", "Qpsru"]
# reduced code and improved efficiency
# added cuttly token
def worker(url, host):
# Madwire, Google, and Rebrandly no longer supported by pyshortener hence removed
shortener = Shortener()
if host == "Bitly":
shortener = Shortener(api_key=BITLY_TOKEN)
short_url = shortener.bitly.short(url)
elif host == "Cuttly":
shortener = Shortener(api_key=CUTTLY_TOKEN)
short_url = shortener.cuttly.short(url)
elif host in similar_hosts:
short_url = getattr(shortener, host.lower())
short_url = short_url.short(url)
elif host == "Tinyurl":
short_url = shortener.tinyurl.short(url)
elif host == "Osdb":
short_url = shortener.osdb.short(url)
elif host == "Chilpit":
short_url = shortener.chilpit.short(url)
elif host == "Clckru":
short_url = shortener.clckru.short(url)
elif host == "Dagd":
short_url = shortener.dagd.short(url)
elif host == "Qpsru":
short_url = shortener.qpsru.short(url)
else:
short_url = "That service is no longer available via pyshortener"
return short_url
def home(request):
template = 'shortener/home.html'
if request.method == 'GET':
form_class = Urlform()
else:
form_class = Urlform(request.POST)
if form_class.is_valid():
url = form_class.cleaned_data['url']
host = form_class.cleaned_data['host']
short_url = worker(url, host)
form_class = Urlform()
return render(request, template, {'form': form_class, 'short_url': short_url, })
return render(request, template, {'form': form_class, })
class UrlShortenerAPIViewSet(viewsets.ViewSet):
"""
Shortens URL via a POST method.
Provide the following fields in your POST request:
"long_url": "URL to shorten, Example: https://www.youtube.com/watch?v=Y2VF8tmLFHw",
"host": "Shortening service to use, must be one of: [hosts]"
Returns:
"short_url": "Shortened URL"
"""
hostsString = ""
for host in HOSTS: hostsString += host[0] + " "
__doc__ = __doc__.replace("[hosts]", hostsString)
def create(self, request, format=None):
serializer = UrlAPISerializer(data=request.data)
if serializer.is_valid():
UrlAPIObject = serializer.create(serializer.data)
try:
ShortURL = worker(UrlAPIObject.long_url, UrlAPIObject.host)
except (TypeError):
return Response({'error': u'host must be one of: ' + self.hostsString},
status=status.HTTP_400_BAD_REQUEST)
except ValueError:
return Response({'error': u'url invalid, please use a valid url'}, status=status.HTTP_400_BAD_REQUEST)
return Response({'short_url': str(ShortURL)}, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| 35.752381
| 118
| 0.676079
|
563144924b5977a28c5363100b3701a57e094c56
| 379
|
py
|
Python
|
tests/registries/legacy-1/test_legacy1_default_registry.py
|
pawelkopka/kopf
|
51a3a70e09a17cf3baec2946b64b125a90595cf4
|
[
"MIT"
] | 1,038
|
2019-03-26T16:32:56.000Z
|
2022-03-27T09:15:03.000Z
|
tests/registries/legacy-1/test_legacy1_default_registry.py
|
pawelkopka/kopf
|
51a3a70e09a17cf3baec2946b64b125a90595cf4
|
[
"MIT"
] | 393
|
2019-03-26T13:43:42.000Z
|
2020-09-14T13:18:14.000Z
|
tests/registries/legacy-1/test_legacy1_default_registry.py
|
pawelkopka/kopf
|
51a3a70e09a17cf3baec2946b64b125a90595cf4
|
[
"MIT"
] | 116
|
2019-03-31T23:01:09.000Z
|
2022-03-18T16:44:58.000Z
|
import kopf
def test_getting_default_registry():
registry = kopf.get_default_registry()
assert isinstance(registry, kopf.GlobalRegistry)
def test_setting_default_registry():
registry_expected = kopf.GlobalRegistry()
kopf.set_default_registry(registry_expected)
registry_actual = kopf.get_default_registry()
assert registry_actual is registry_expected
| 27.071429
| 52
| 0.799472
|
376c80121e1b0be6fff54f3427ce610642538ae9
| 1,254
|
py
|
Python
|
stock-price/stock-price-ic.py
|
cbarrese/katas
|
655b07562c06bb8b532ca141705ff127fb7e9e12
|
[
"MIT"
] | null | null | null |
stock-price/stock-price-ic.py
|
cbarrese/katas
|
655b07562c06bb8b532ca141705ff127fb7e9e12
|
[
"MIT"
] | null | null | null |
stock-price/stock-price-ic.py
|
cbarrese/katas
|
655b07562c06bb8b532ca141705ff127fb7e9e12
|
[
"MIT"
] | null | null | null |
stock_prices_yesterday = input('Enter stock prices:')
if len(stock_prices_yesterday) < 2:
raise IndexError('Getting a profit requires at least 2 prices')
# we'll greedily update min_price and max_profit, so we initialize
# them to the first price and the first possible profit
min_price = stock_prices_yesterday[0]
max_profit = stock_prices_yesterday[1] - stock_prices_yesterday[0]
for index, current_price in enumerate(stock_prices_yesterday):
# skip the first (0th) time
# we can't sell at the first time, since we must buy first,
# and we can't buy and sell at the same time!
# if we took this out, we'd try to buy /and/ sell at time 0.
# this would give a profit of 0, which is a problem if our
# max_profit is supposed to be /negative/--we'd return 0!
if index == 0:
continue
print index, current_price
# see what our profit would be if we bought at the
# min price and sold at the current price
potential_profit = current_price - min_price
# update max_profit if we can do better
max_profit = max(max_profit, potential_profit)
# update min_price so it's always
# the lowest price we've seen so far
min_price = min(min_price, current_price)
print max_profit
| 36.882353
| 70
| 0.717703
|
1e19fac2069d428990d452e8c5fc3993704774ba
| 979
|
py
|
Python
|
scripts/ex_casA_image_cat.py
|
kernsuite-debian/pyvo
|
ee85c50c5c520ac7bede2d6f18de225c57dedc33
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2016-07-12T22:27:36.000Z
|
2016-07-12T22:27:36.000Z
|
scripts/ex_casA_image_cat.py
|
kernsuite-debian/pyvo
|
ee85c50c5c520ac7bede2d6f18de225c57dedc33
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
scripts/ex_casA_image_cat.py
|
kernsuite-debian/pyvo
|
ee85c50c5c520ac7bede2d6f18de225c57dedc33
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python
import pyvo as vo
# find archives with x-ray images
archives = vo.regsearch(servicetype='image', waveband='xray')
# position of my favorite source
pos = vo.object2pos('Cas A')
# find images and list in a file
with open('cas-a.csv', 'w') as csv:
print >> csv, "Archive short name,Archive title,Image", \
"title,RA,Dec,format,URL"
for arch in archives:
print "searching %s..." % arch.shortname
try:
matches = arch.search(pos=pos, size=0.25)
except vo.DALAccessError, ex:
print "Trouble accessing %s archive (%s)"\
% (arch.shortname, str(ex))
continue
print "...found %d images" % matches.nrecs
for image in matches:
print >> csv, ','.join(
(arch.shortname, arch.title, image.title,
str(image.ra), str(image.dec), image.format,
image.getdataurl()) )
| 34.964286
| 61
| 0.558733
|
635672eae9d329f22fc55b66f83bc747c9022662
| 442
|
py
|
Python
|
modulector/migrations/0026_auto_20210222_1813.py
|
omics-datascience/modulector
|
357d8f6f8eab5d04b2357a08d177d75cbdad001a
|
[
"MIT"
] | 2
|
2021-07-10T20:45:58.000Z
|
2021-08-18T02:24:58.000Z
|
modulector/migrations/0026_auto_20210222_1813.py
|
omics-datascience/modulector
|
357d8f6f8eab5d04b2357a08d177d75cbdad001a
|
[
"MIT"
] | 3
|
2021-11-07T23:18:58.000Z
|
2021-11-22T23:17:35.000Z
|
modulector/migrations/0026_auto_20210222_1813.py
|
omics-datascience/modulector
|
357d8f6f8eab5d04b2357a08d177d75cbdad001a
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.8 on 2021-02-22 18:13
# IMPORTANT: this migration fixes a problem with multiple URL templates which were
# used in a previous version of Modulector
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('modulector', '0025_auto_20210212_1725'),
]
operations = [
migrations.RunSQL("DELETE FROM MODULECTOR_URLTEMPLATE WHERE name != 'mirbase'")
]
| 26
| 87
| 0.714932
|
d0be18917b9a691f47431bdb30a9bd2d60bdcb41
| 204,800
|
py
|
Python
|
electrophysiology_analysis/barrylab_ephys_analysis/scripts/exp_scales/paper_figures.py
|
Barry-lab/Publication_TanniDeCothiBarry2021
|
425bc0bd9a74b837d912820e9ea1539a111fcb1f
|
[
"Unlicense"
] | null | null | null |
electrophysiology_analysis/barrylab_ephys_analysis/scripts/exp_scales/paper_figures.py
|
Barry-lab/Publication_TanniDeCothiBarry2021
|
425bc0bd9a74b837d912820e9ea1539a111fcb1f
|
[
"Unlicense"
] | null | null | null |
electrophysiology_analysis/barrylab_ephys_analysis/scripts/exp_scales/paper_figures.py
|
Barry-lab/Publication_TanniDeCothiBarry2021
|
425bc0bd9a74b837d912820e9ea1539a111fcb1f
|
[
"Unlicense"
] | null | null | null |
import os
import pickle
import sys
from random import shuffle
import numpy as np
from scipy.stats import kruskal, mannwhitneyu, pearsonr, linregress, friedmanchisquare, gamma, poisson, zscore
from scipy.special import gamma as gamma_function
from scipy.optimize import minimize
from scipy.spatial.distance import jensenshannon
from statsmodels.stats.multitest import multipletests
from itertools import combinations
from scipy import ndimage
import pandas as pd
from tqdm import tqdm
from pingouin import partial_corr
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib.gridspec import GridSpecFromSubplotSpec
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import seaborn as sns
from barrylab_ephys_analysis.spatial.ratemaps import SpatialRatemap
from barrylab_ephys_analysis.spatial.similarity import spatial_correlation
from barrylab_ephys_analysis.scripts.exp_scales import snippets
from barrylab_ephys_analysis.spikes.correlograms import plot_correlogram
from barrylab_ephys_analysis.scripts.exp_scales.params import Params
from barrylab_ephys_analysis.recording_io import Recording
from barrylab_ephys_analysis.scripts.exp_scales import load
from barrylab_ephys_analysis.scripts.exp_scales.paper_preprocess import preprocess_and_save_all_animals, \
create_df_fields_for_recordings, create_unit_data_frames_for_recordings
from barrylab_ephys_analysis.spatial.fields import compute_field_contour
from barrylab_ephys_analysis.spikes.utils import count_spikes_in_sample_bins
from barrylab_ephys_analysis.lfp.oscillations import FrequencyBandFrequency
from barrylab_ephys_analysis.scripts.exp_scales.paper_methods import ValueByBinnedDistancePlot, \
plot_raincloud_and_stats, get_max_ylim, compute_pairwise_comparisons, plot_stats_dict_to_axes, \
SpatialFilteringLegend, get_field_data_with_distance_to_boundary, FieldCoverage, RatemapsPlot, \
BayesianPositionDecodingArenaAccuracy, compute_distances_to_landmarks, filter_dataframe_by_direction, \
PlaceFieldPeakDistribution, PopulationVectorChangeRate
seaborn_font_scale = 1.5
sns_colors = sns.color_palette(n_colors=(5 + 5 + 4))
sns_animal_colors = sns_colors[:5]
sns_environment_colors = sns_colors[5:10]
sns_other_colors = sns_colors[10:]
def paper_figures_path(fpath):
path = os.path.join(fpath, Params.analysis_path, 'PaperFigures')
if not os.path.isdir(path):
os.mkdir(path)
return path
spatial_filter_legend_instance = SpatialFilteringLegend()
main_experiment_ids = ('exp_scales_a', 'exp_scales_b', 'exp_scales_c', 'exp_scales_d')
experiment_id_substitutes = {
'exp_scales_a': 'A',
'exp_scales_b': 'B',
'exp_scales_c': 'C',
'exp_scales_d': 'D',
'exp_scales_a2': "A'"
}
experiment_id_substitutes_inverse = {value: key for key, value in experiment_id_substitutes.items()}
spatial_windows = {
'exp_scales_a': (0, 87.5, 0, 125),
'exp_scales_b': (0, 175, 0, 125),
'exp_scales_c': (0, 175, 0, 250),
'exp_scales_d': (0, 350, 0, 250),
'exp_scales_a2': (0, 87.5, 0, 125)
}
arena_areas_meters = {
'exp_scales_a': 87.5 * 125 / 10000,
'exp_scales_b': 175 * 125 / 10000,
'exp_scales_c': 175 * 250 / 10000,
'exp_scales_d': 350 * 250 / 10000,
'exp_scales_a2': 87.5 * 125 / 10000
}
arena_areas_meters_short_env = {experiment_id_substitutes[key]: value for key, value in arena_areas_meters.items()}
experiment_ids_with_areas = {
'exp_scales_a': 'A, {:.2f}'.format(arena_areas_meters['exp_scales_a']),
'exp_scales_b': 'B, {:.2f}'.format(arena_areas_meters['exp_scales_b']),
'exp_scales_c': 'C, {:.2f}'.format(arena_areas_meters['exp_scales_c']),
'exp_scales_d': 'D, {:.2f}'.format(arena_areas_meters['exp_scales_d']),
'exp_scales_a2': "A', {:.2f}".format(arena_areas_meters['exp_scales_a2'])
}
experiment_ids_with_areas_ticks = {
'ticks': [0.0] + [arena_areas_meters[x] for x in main_experiment_ids],
'ticklabels': ['0'] + [experiment_ids_with_areas[x] for x in main_experiment_ids]
}
def construct_df_population_vector_change_file_path(fpath):
return os.path.join(fpath, Params.analysis_path, 'df_population_vector_change.p')
class ExampleUnit(object):
@staticmethod
def make_ratemaps_subfigure(recordings, recordings_unit_ind):
fig = RatemapsPlot.make_default_figure()
RatemapsPlot.plot(recordings, recordings_unit_ind, fig, draw_gaussians=False, draw_ellipses=False)
return fig
@staticmethod
def plot_waveforms(recordings, recordings_unit_ind, fig, gs, time_bar_ms=0.5, volt_bar_uv=100):
"""Plots waveforms of the unit from all 4 channels.
The time and voltage scale bars are in size specified in input arguments.
"""
index = snippets.get_index_where_most_spikes_in_unit_list(recordings.units[recordings_unit_ind])
waveforms = recordings.units[recordings_unit_ind][index]['analysis']['mean_waveform']
sampling_rate = recordings.units[recordings_unit_ind][index]['sampling_rate']
y_limits = [np.amin(-waveforms) * 1.02, np.amax(-waveforms) * 1.02]
for nw, gs_field in zip(range(4), gs):
ax = fig.add_subplot(gs_field)
ax.plot(-waveforms[nw, :])
ax.set_ylim(y_limits)
ax.axis('off')
if nw == 2:
time_bar_length = (time_bar_ms / 1000) * sampling_rate
x_right_edge = ax.get_xlim()[1]
top_edge = y_limits[1] * 0.95
ax.plot([x_right_edge - time_bar_length, x_right_edge], [top_edge, top_edge],
color='black', linewidth=2.5)
ax.plot([x_right_edge, x_right_edge], [top_edge - volt_bar_uv, top_edge],
color='black', linewidth=2.5)
@staticmethod
def plot_autocorrelograms(recordings, recordings_unit_ind, fig, gs):
"""Autocorrelation plot parameters are specified in Params.autocorrelation_params.
These should be the following:
top plot: 'bin_size': 0.001 seconds, 'max_lag': 0.05 seconds,
bottom plot: 'bin_size': 0.01 seconds, 'max_lag': 0.5 seconds,
"""
index = snippets.get_index_where_most_spikes_in_unit_list(recordings.units[recordings_unit_ind])
autocorrelations = recordings.units[recordings_unit_ind][index]['analysis']['autocorrelations']
for correlation_name, gs_field in zip(sorted(autocorrelations), gs):
ax = fig.add_subplot(gs_field)
ax.axis('off')
plot_correlogram(autocorrelations[correlation_name]['values'],
autocorrelations[correlation_name]['bin_edges'],
ax)
@staticmethod
def make_waveform_and_autocorrelogram_subfigure(recordings, recordings_unit_ind):
fig = plt.figure(figsize=(RatemapsPlot.default_figure_size[0], RatemapsPlot.default_figure_size[1] / 3.))
gs_main = fig.add_gridspec(1, 2)
gs_main.tight_layout(fig, pad=0.2)
gs_waveforms = GridSpecFromSubplotSpec(2, 2, gs_main[0], wspace=0, hspace=0)
gs_correlograms = GridSpecFromSubplotSpec(2, 1, gs_main[1])
ExampleUnit.plot_waveforms(recordings, recordings_unit_ind, fig, gs_waveforms)
ExampleUnit.plot_autocorrelograms(recordings, recordings_unit_ind, fig, gs_correlograms)
return fig
@staticmethod
def write(fpath, all_recordings, df_units, unit=20, prefix='', verbose=True):
figure_name = prefix + 'ExampleUnit'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
recordings = [x for x in all_recordings if x[0].info['animal'] == df_units.loc[unit, 'animal']][0]
recordings_unit_ind = df_units.loc[unit, 'animal_unit']
fig = ExampleUnit.make_waveform_and_autocorrelogram_subfigure(recordings, recordings_unit_ind)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}_waveforms.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}_waveforms.svg'.format(figure_name)))
plt.close(fig)
fig = ExampleUnit.make_ratemaps_subfigure(recordings, recordings_unit_ind)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}_ratemaps.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}_ratemaps.svg'.format(figure_name)))
plt.close(fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class FieldDetectionMethod(object):
@staticmethod
def plot(all_recordings, df_units, axs, unit=1136, experiment_id='exp_scales_c'):
recordings = [x for x in all_recordings if x[0].info['animal'] == df_units.loc[unit, 'animal']][0]
recordings_unit_ind = df_units.loc[unit, 'animal_unit']
i_recording = [i for i in range(len(recordings)) if recordings[i].info['experiment_id'] == experiment_id][0]
unit = recordings.units[recordings_unit_ind][i_recording]
ratemap = unit['analysis']['spatial_ratemaps']['spike_rates_smoothed']
# Plot ratemap
axes_image = axs[0].imshow(ratemap, cmap='jet')
divider = make_axes_locatable(axs[0])
cax = divider.append_axes("right", size='{}%'.format(10), pad=0.05)
axs[0].figure.colorbar(axes_image, cax=cax, ax=axs[0])
cax.set_ylabel('spike rate (Hz)')
field_inds = np.where((recordings.df_fields['experiment_id'] == experiment_id)
& (recordings.df_fields['animal_unit'] == recordings_unit_ind))[0]
contours = [compute_field_contour(recordings[0].analysis['fields'][i]['ratemap']) for i in field_inds]
colors = [np.array([1, 0.64, 0]), (np.array((165, 42, 42)) / 255)]
for contour, color in zip(contours, colors):
SpatialRatemap.plot_contours(contour, axs[0], color=color)
# Create an RGB array
array = np.ones((ratemap.shape[0], ratemap.shape[1], 3), dtype=np.float32)
array[:, np.array([0, ratemap.shape[1] - 1])] = 0
array[np.array([0, ratemap.shape[0] - 1]), :] = 0
# Plot first threshold
threshold = 1
field_map = ndimage.label(ratemap > threshold)[0]
inds = np.where(field_map == 1)
array[inds[0], inds[1], :] = np.array([0, 0.5, 0])[None, None, :]
axs[1].imshow(array)
axs[1].set_title('threshold = {0:#.3g} Hz'.format(threshold))
# Plot second threshold
threshold = 1.25
field_map = ndimage.label(ratemap > threshold)[0]
inds = np.where(field_map == 1)
array[inds[0], inds[1], :] = colors[1][None, None, :] # Brown
inds = np.where(field_map == 2)
array[inds[0], inds[1], :] = colors[0][None, None, :]
axs[2].imshow(array)
axs[2].set_title('threshold = {0:#.3g} Hz'.format(threshold))
# Plot third threshold
threshold = 4.75
field_map = ndimage.label(ratemap > threshold)[0]
inds = np.where(field_map == 1)
array[inds[0], inds[1], :] = np.array([1, 0.75, 0.79])[None, None, :]
inds = np.where(field_map == 2)
array[inds[0], inds[1], :] = np.array([1, 0, 0])[None, None, :]
axs[3].imshow(array)
axs[3].set_title('threshold = {0:#.3g} Hz'.format(threshold))
@staticmethod
def make_figure(all_recordings, df_units):
fig, axs = plt.subplots(1, 4, figsize=(12, 4), gridspec_kw={'width_ratios': (1.4, 1, 1, 1)})
plt.subplots_adjust(left=0, right=0.995, bottom=0.15, top=0.85, wspace=0.3)
for ax in axs:
ax.axis('off')
FieldDetectionMethod.plot(all_recordings, df_units, axs)
return fig
@staticmethod
def write(fpath, all_recordings, df_units, prefix='', verbose=True):
figure_name = prefix + 'FieldDetectionMethod'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig = FieldDetectionMethod.make_figure(all_recordings, df_units)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
plt.close(fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class IntraTrialCorrelations(object):
@staticmethod
def compute(all_recordings):
per_unit_animal = []
per_unit_environment = []
per_unit_minutes = []
per_unit_halves = []
for recordings in all_recordings:
for i_recording, recording in enumerate(recordings[:4]):
if not (recording.info['experiment_id'] in main_experiment_ids):
continue
odd_minute_ratemap_stack = []
even_minute_ratemap_stack = []
first_half_ratemap_stack = []
second_half_ratemap_stack = []
# Get correlations of each unit and collect ratemap stacks
for unit in recording.units:
if unit['analysis']['category'] != 'place_cell':
continue
# Compute per unit correlations
per_unit_animal.append(recording.info['animal'])
per_unit_environment.append(experiment_id_substitutes[recording.info['experiment_id']])
per_unit_minutes.append(
spatial_correlation(
unit['analysis']['spatial_ratemaps']['spike_rates_minutes']['odd'],
unit['analysis']['spatial_ratemaps']['spike_rates_minutes']['even'],
**Params.ratemap_stability_kwargs
)[0]
)
per_unit_halves.append(
spatial_correlation(
unit['analysis']['spatial_ratemaps']['spike_rates_halves']['first'],
unit['analysis']['spatial_ratemaps']['spike_rates_halves']['second'],
**Params.ratemap_stability_kwargs
)[0]
)
# Collect ratemaps to stack
odd_minute_ratemap_stack.append(
unit['analysis']['spatial_ratemaps']['spike_rates_minutes']['odd'])
even_minute_ratemap_stack.append(
unit['analysis']['spatial_ratemaps']['spike_rates_minutes']['even'])
first_half_ratemap_stack.append(
unit['analysis']['spatial_ratemaps']['spike_rates_halves']['first'])
second_half_ratemap_stack.append(
unit['analysis']['spatial_ratemaps']['spike_rates_halves']['second'])
# Create DataFrame
df_per_unit = pd.DataFrame({
'animal': per_unit_animal,
'environment': per_unit_environment,
'minutes': per_unit_minutes,
'halves': per_unit_halves
})
return df_per_unit
@staticmethod
def plot(all_recordings, ax, stat_ax, stripplot_size=1):
df = IntraTrialCorrelations.compute(all_recordings).rename(columns={'halves': 'Pearson $\it{r}$'})
plot_raincloud_and_stats('environment', 'Pearson $\it{r}$', df, ax, stat_ax,
palette=sns.color_palette(sns_environment_colors[:len(main_experiment_ids)]),
x_order=[experiment_id_substitutes[experiment_id]
for experiment_id in main_experiment_ids],
stripplot_size=stripplot_size)
ax.set_yticks([y for y in ax.get_yticks() if y <= 1])
ax.set_xlabel('environment')
@staticmethod
def make_figure(all_recordings):
fig, ax = plt.subplots(1, 1, figsize=(6, 5))
plt.subplots_adjust(left=0.15, bottom=0.15, right=0.99, top=0.98)
stat_fig, stat_ax = plt.subplots(1, 1, figsize=(8, 8))
plt.tight_layout(pad=1.5)
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
IntraTrialCorrelations.plot(all_recordings, ax, stat_ax)
return fig, stat_fig
@staticmethod
def print_correlation_between_spatial_correlation_and_field_count_per_cell(all_recordings, df_units, df_fields):
df = df_fields.loc[df_fields['experiment_id'] == 'exp_scales_d',
['animal', 'animal_unit']].copy(deep=True)
df = df.merge(df_units[['animal', 'animal_unit', 'category']].copy(deep=True),
how='left', on=['animal', 'animal_unit'])
df = df.loc[df['category'] == 'place_cell', ['animal', 'animal_unit']] # Only keep place cell fields
df['count'] = 1
df = df.groupby(['animal', 'animal_unit'])['count'].sum().reset_index()
animal_recordings = {recordings[0].info['animal']: recordings for recordings in all_recordings}
animal_exp_scales_d_recording_index = {}
for animal, recordings in animal_recordings.items():
animal_exp_scales_d_recording_index[animal] = [
recording.info['experiment_id'] for recording in animal_recordings[animal]
].index('exp_scales_d')
spatial_correlations = []
for animal, animal_unit in zip(df['animal'], df['animal_unit']):
unit = animal_recordings[animal].units[animal_unit][animal_exp_scales_d_recording_index[animal]]
spatial_correlations.append(
unit['analysis']['spatial_ratemaps']['spike_rates_halves']['stability']
)
df['spatial_correlation_between_halves'] = spatial_correlations
print()
print('Correlation between spatial correlation (1st and 2nd half) and count of place fields in environment D: '
'r={:.3f} p={:.6f} N={}'.format(
*pearsonr(df['spatial_correlation_between_halves'], df['count']), df.shape[0]
))
print()
@staticmethod
def write(fpath, all_recordings, df_units, df_fields, prefix='', verbose=True):
figure_name = prefix + 'IntraTrialCorrelations'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = IntraTrialCorrelations.make_figure(all_recordings)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
IntraTrialCorrelations.print_correlation_between_spatial_correlation_and_field_count_per_cell(
all_recordings, df_units, df_fields
)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class PlaceCellAndFieldCounts(object):
@staticmethod
def plot_stacked_bars(ax, df, value_name, legend=True):
bar_bottom_heights = {environment: 0 for environment in df['environment'].unique()}
colors = {animal: color for animal, color in zip(sorted(df['animal'].unique()), sns_animal_colors)}
bars = {}
for animal in sorted(df['animal'].unique()):
df_tmp = df.loc[df['animal'] == animal].copy().sort_values('environment')
bars[animal] = ax.bar(
np.arange(len(bar_bottom_heights)),
df_tmp[value_name],
bottom=[bar_bottom_heights[environment] for environment in df_tmp['environment']],
color=colors[animal],
width=0.8,
linewidth=0
)
for environment, value in zip(df_tmp['environment'], df_tmp[value_name]):
bar_bottom_heights[environment] += value
if legend:
ax.legend(list(bars.values()), ['animal {}'.format(i) for i in range(1, len(bars) + 1)])
ax.set_xticks(np.arange(len(bar_bottom_heights)))
ax.set_xticklabels(sorted(list(bar_bottom_heights.keys())))
ax.set_xlabel('environment')
ax.set_ylabel(value_name)
@staticmethod
def plot_line_for_each_animal(ax, df, value_name, legend=True):
df['environment_size'] = \
np.array([FieldsDetectedAcrossEnvironments.environment_sizes[
FieldsDetectedAcrossEnvironments.environment_names.index(x)
]
for x in df['environment']])
colors = {animal: color for animal, color in zip(sorted(df['animal'].unique()), sns_animal_colors)}
for i_animal, animal in enumerate(sorted(df['animal'].unique())):
df_tmp = df.loc[df['animal'] == animal].copy().sort_values('environment')
ax.plot(df_tmp['environment_size'], df_tmp[value_name], color=colors[animal],
label='{}'.format(i_animal + 1), marker='o', linewidth=2)
if legend:
ax.legend(title='animal')
ax.set_xticks(experiment_ids_with_areas_ticks['ticks'])
ax.set_xticklabels(experiment_ids_with_areas_ticks['ticklabels'])
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
ax.set_xlabel('environment, size (m$^2$)')
ax.set_ylabel(value_name)
@staticmethod
def plot_place_cell_count_in_environments(df_units, df_fields, ax, stat_ax):
"""The plot shows the count of place cells that have at least one field in each of the environments.
"""
df = df_fields[['animal', 'animal_unit', 'experiment_id']].copy(deep=True)
df.drop_duplicates(['animal', 'animal_unit', 'experiment_id'], inplace=True)
df = df.merge(df_units[['animal', 'animal_unit', 'category']].copy(deep=True),
how='left', on=['animal', 'animal_unit'])
df = df[df['category'] == 'place_cell'][['animal', 'experiment_id', 'animal_unit']]
animal_place_cell_counts = {
animal: df.loc[df['animal'] == animal, 'animal_unit'].unique().size
for animal in df['animal'].unique()
}
df['place cell count'] = 1
df = df[['animal', 'experiment_id', 'place cell count']]\
.groupby(['animal', 'experiment_id']).sum().reset_index()
df = df.loc[df['experiment_id'] != 'exp_scales_a2'].copy().reset_index()
df.replace(to_replace={'experiment_id': experiment_id_substitutes}, inplace=True)
df.rename(columns={'experiment_id': 'environment'}, inplace=True)
PlaceCellAndFieldCounts.plot_line_for_each_animal(ax, df, 'place cell count', legend=False)
table_cell_text = [['Animal', 'Cell count']]
for animal, count in animal_place_cell_counts.items():
table_cell_text.append([animal, '{:d}'.format(count)])
stat_ax.table(cellText=table_cell_text, cellLoc='left', loc='upper left', edges='open')
@staticmethod
def plot_place_field_count_in_environments(df_units, df_fields, ax):
"""The plot shows the count of place fields that each animal had in each environment, across all place cells.
"""
df = df_fields[['animal', 'animal_unit', 'experiment_id']].copy(deep=True)
df = df.merge(df_units[['animal', 'animal_unit', 'category']].copy(deep=True),
how='left', on=['animal', 'animal_unit'])
df = df[df['category'] == 'place_cell'][['animal', 'experiment_id']]
df['place field count'] = 1
df = df.groupby(['animal', 'experiment_id']).sum().reset_index()
df = df.loc[df['experiment_id'] != 'exp_scales_a2'].copy().reset_index()
df.replace(to_replace={'experiment_id': experiment_id_substitutes}, inplace=True)
df.rename(columns={'experiment_id': 'environment'}, inplace=True)
PlaceCellAndFieldCounts.plot_line_for_each_animal(ax, df, 'place field count', legend=True)
@staticmethod
def make_figure(df_units, df_fields):
fig, axs = plt.subplots(1, 2, figsize=(7, 4))
plt.subplots_adjust(left=0.11, bottom=0.27, right=0.99, top=0.90, wspace=0.35)
stat_fig, stat_ax = plt.subplots(1, 1, figsize=(10, 10))
plt.tight_layout(pad=1.5)
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
PlaceCellAndFieldCounts.plot_place_cell_count_in_environments(df_units, df_fields, axs[0], stat_ax)
PlaceCellAndFieldCounts.plot_place_field_count_in_environments(df_units, df_fields, axs[1])
return fig, stat_fig
@staticmethod
def write(fpath, df_units, df_fields, prefix='', verbose=True):
figure_name = prefix + 'PlaceCellAndFieldCounts'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = PlaceCellAndFieldCounts.make_figure(df_units, df_fields)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class FieldsDetectedAcrossEnvironments(object):
environment_sizes = [0.875 * 1.25, 1.75 * 1.25, 1.75 * 2.5, 3.5 * 2.5]
environment_names = ['A', 'B', 'C', 'D']
@staticmethod
def get_field_or_cell_count_per_environment_and_animal(df_units, df_fields):
# Take care not to modify df_fields and only take relevant fields
df_fields = df_fields.loc[df_fields['experiment_id'] != 'exp_scales_a2',
['unit', 'animal', 'experiment_id']].copy(deep=True)
# Only keep fields for cell category specified
df_fields = df_fields[df_fields['unit'].isin(np.where(df_units['category'] == 'place_cell')[0])]
# Get total number of units per animal
total_number_of_units = {}
for animal in df_fields['animal'].unique():
total_number_of_units[animal] = df_fields.loc[df_fields['animal'] == animal, 'unit'].unique().size
# Get total number of fields per animal
total_number_of_fields = {}
for animal in df_fields['animal'].unique():
total_number_of_fields[animal] = df_fields.loc[df_fields['animal'] == animal, 'unit'].size
# Drop fields in exp_scales_a2
df_fields = df_fields[df_fields['experiment_id'] != 'exp_scales_a2']
# Replace experiment_id values for plotting and rename the column
df_fields.replace(to_replace={'experiment_id': experiment_id_substitutes},
inplace=True)
df_fields.rename(columns={'experiment_id': 'environment'}, inplace=True)
# Count the number of units and fields present in each environment for each animal
number_of_units = df_fields.drop_duplicates().groupby(['animal', 'environment']).count()
number_of_units.reset_index(inplace=True)
number_of_units.rename(columns={'unit': 'place cells'}, inplace=True)
number_of_fields = df_fields.groupby(['animal', 'environment']).count()
number_of_fields.reset_index(inplace=True)
number_of_fields.rename(columns={'unit': 'place fields'}, inplace=True)
# Compute percentage of total units and fields
for animal, total in total_number_of_units.items():
number_of_units.loc[number_of_units['animal'] == animal, 'place cells'] = \
(number_of_units.loc[number_of_units['animal'] == animal, 'place cells'] / float(total))
for animal, total in total_number_of_fields.items():
number_of_fields.loc[number_of_fields['animal'] == animal, 'place fields'] = \
(number_of_fields.loc[number_of_fields['animal'] == animal, 'place fields']
/ float(total))
# Set environment column equal to relative size to facilitate plotting
number_of_units['environment_size'] = \
np.array([FieldsDetectedAcrossEnvironments.environment_sizes[
FieldsDetectedAcrossEnvironments.environment_names.index(x)
]
for x in number_of_units['environment']])
number_of_fields['environment_size'] = \
np.array([FieldsDetectedAcrossEnvironments.environment_sizes[
FieldsDetectedAcrossEnvironments.environment_names.index(x)
]
for x in number_of_fields['environment']])
# Compute area normalized percentage
number_of_fields['place fields per unit area'] = \
number_of_fields['place fields'].divide(number_of_fields['environment_size'])
return number_of_units, number_of_fields
@staticmethod
def environment_field_density_model(area, slope, intercept):
return (area * slope + intercept) / area
@staticmethod
def environment_field_density_proportional_to_baseline(area, slope, intercept):
return FieldsDetectedAcrossEnvironments.environment_field_density_model(area, slope, intercept) / slope
@staticmethod
def compute_environment_area_with_field_density_correction(area, parameters):
field_density_multiplier = \
FieldsDetectedAcrossEnvironments.environment_field_density_proportional_to_baseline(
area, parameters['slope'], parameters['intercept']
)
return area * field_density_multiplier
@staticmethod
def compute_environment_areas_with_field_density_correction(parameters=None):
areas = {}
for experiment_id in main_experiment_ids:
if parameters is None:
areas[experiment_id_substitutes[experiment_id]] = arena_areas_meters[experiment_id]
else:
areas[experiment_id_substitutes[experiment_id]] = \
FieldsDetectedAcrossEnvironments.compute_environment_area_with_field_density_correction(
arena_areas_meters[experiment_id], parameters
)
areas['combined environment'] = np.sum(list(areas.values()))
return areas
@staticmethod
def plot_place_field_distribution(df_units, df_fields, ax, stat_ax):
"""Shows the distribution of place fields of each animal between the different environments.
Therefore, the values are a percentage of the total number of fields in a given animal.
The dotted black line shows the least squares linear regression line.
The inset shows the percentage of place fields in each square metre in each environment.
These values are computed by dividing the values in main axes with the size of the environment.
The x-axis units of inset axes are the same as main axes.
Box plots show median, Q1, Q3 and range. As here N=5, each box plot element corresponds to
one of the data points.
"""
fit_plot_x_vals = np.linspace(1, 9, 100)
_, df = \
FieldsDetectedAcrossEnvironments.get_field_or_cell_count_per_environment_and_animal(
df_units, df_fields
)
df = df.copy(deep=True)
environments = sorted(np.unique(df['environment']))
environment_sizes = sorted(np.unique(df['environment_size']))
colors_dict = {animal: color for animal, color in zip(sorted(df['animal'].unique()), sns_animal_colors)}
ax.scatter(df['environment_size'] + np.random.uniform(-0.2, 0.2, df['environment_size'].size),
df['place fields'],
s=50,
c=[colors_dict[x] for x in df['animal']],
linewidth=1, edgecolors='black', zorder=1, alpha=0.75)
# Fit and plot linear model to distribution data
main_line_slope, main_line_intercept, main_line_r_value, main_line_p_value, main_line_std_err = \
linregress(df['environment_size'], df['place fields'])
ax.plot(fit_plot_x_vals, main_line_intercept + main_line_slope * fit_plot_x_vals,
color='black', linestyle=':', zorder=2)
ax_text_right_side_x = fit_plot_x_vals[3 * len(fit_plot_x_vals) // 4]
ax_text_right_side_y = main_line_intercept + main_line_slope * ax_text_right_side_x
line_residuals = np.abs((main_line_intercept + main_line_slope * df['environment_size'])
- df['place fields'])
main_line_mean_squared_error = np.mean(line_residuals ** 2)
pearson_r_value, pearson_p_value = pearsonr(df['environment_size'], df['place fields'])
# Plot linear model r value
line_text = (
'y = {:.3f}x + {:.3f}\n'.format(main_line_slope, main_line_intercept)
+ '$\it{r}$' + ' = {:.{prec}f}'.format(main_line_r_value, prec=2)
)
ax.text(ax_text_right_side_x, ax_text_right_side_y, line_text, ha='right', va='bottom')
# Plot place field density
ax_inset_height = \
(main_line_intercept + main_line_slope * environment_sizes[-1] * 0.5) / ax.get_ylim()[1] * 100 * 0.75
ax_inset = inset_axes(ax, width='45%', height='{:.0f}%'.format(ax_inset_height), loc='lower right')
ax_inset.xaxis.tick_top()
ax_inset.scatter(df['environment_size'] + np.random.uniform(-0.2, 0.2, df['environment_size'].size),
df['place fields per unit area'], s=50, c=[colors_dict[x] for x in df['animal']],
linewidth=1, edgecolors='black', zorder=1, alpha=0.75)
ax_inset.plot(
fit_plot_x_vals, (main_line_intercept + main_line_slope * fit_plot_x_vals) / fit_plot_x_vals,
color='black', linestyle=':', zorder=2
)
# Adjust axes parameters
ax.set_ylim((0, ax.get_ylim()[1]))
ax.set_xlim((0, ax.get_xlim()[1]))
ax.set_xticks(experiment_ids_with_areas_ticks['ticks'])
ax.set_xticklabels(experiment_ids_with_areas_ticks['ticklabels'])
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
ax.set_xlabel('environment, size (m$^2$)')
ax.set_ylabel('proportion of place fields')
ax_inset.set_ylim((0, ax_inset.get_ylim()[1]))
ax_inset.set_xlim((0, ax_inset.get_xlim()[1]))
ax_inset.set_xticks(experiment_ids_with_areas_ticks['ticks'])
ax_inset.set_xlabel('size (m$^2$)')
ax_inset.set_ylabel('proportion\nof fields / m$^2$')
plt.setp(ax_inset.get_xticklabels(), rotation=45, ha='left', rotation_mode='anchor')
ax_inset.xaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax_inset.xaxis.set_label_position('top')
ax_inset.xaxis.set_ticks_position('top')
ax_inset.yaxis.labelpad = 0
# Compute stats
kruskal_h_value, kruskal_pvalue = \
kruskal(*[df[df['environment'] == group]['place fields'] for group in environments])
density_kruskal_h_value, density_kruskal_pvalue = \
kruskal(*[df[df['environment'] == group]['place fields per unit area'] for group in environments])
df_sorted = df.sort_values('animal')
friedman_chisq_value, friedman_pvalue = \
friedmanchisquare(*[df_sorted[df_sorted['environment'] == group]['place fields']
for group in environments])
density_friedman_chisq_value, density_friedman_pvalue = \
friedmanchisquare(*[df_sorted[df_sorted['environment'] == group]['place fields per unit area']
for group in environments])
# Plot stats to stat_ax
stat_ax.set_title('Place field formation')
table_cell_text = [['Field distribution', 'H-value', 'p-value'],
['Kruskal-Wallis test',
'{:.2e}'.format(kruskal_h_value), '{:.2e}'.format(kruskal_pvalue)],
['', '', ''],
['Field distribution', 'chi-square statistic', 'p-value'],
['Friedman test',
'{:.2e}'.format(friedman_chisq_value), '{:.2e}'.format(friedman_pvalue)],
['', '', ''],
['Field density', 'H-value', 'p-value'],
['Kruskal-Wallis test',
'{:.2e}'.format(density_kruskal_h_value), '{:.2e}'.format(density_kruskal_pvalue)],
['', '', ''],
['Field density', 'chi-square statistic', 'p-value'],
['Friedman test',
'{:.2e}'.format(density_friedman_chisq_value), '{:.2e}'.format(density_friedman_pvalue)],
['', '', ''],
['fitted main linear model', 'parameters', ''],
['', 'line_slope', '{:.3f}'.format(main_line_slope)],
['', 'line_intercept', '{:.3f}'.format(main_line_intercept)],
['', 'line_r_value', '{:.3f}'.format(main_line_r_value)],
['', 'line_p_value', '{:.3e}'.format(main_line_p_value)],
['', 'line_std_err', '{:.5f}'.format(main_line_std_err)],
['', 'line_mean_squared_error', '{:.3e}'.format(main_line_mean_squared_error)],
['', '', ''],
['pearson', 'r', 'p'],
['', '{:.3f}'.format(pearson_r_value), '{:.3e}'.format(pearson_p_value)],
['', '', ''],
['environment', 'mean value', '']]
mean_values = df.groupby('environment')['place fields'].mean()
for env, value in zip(mean_values.index, mean_values):
table_cell_text.append([env, str(value), ''])
stat_ax.table(cellText=table_cell_text, cellLoc='left', loc='upper left', edges='open')
return {'slope': main_line_slope, 'intercept': main_line_intercept}
@staticmethod
def make_figure(df_units, df_fields, verbose=False):
fig, ax = plt.subplots(1, 1, figsize=(6, 5))
plt.subplots_adjust(left=0.11, bottom=0.2, right=0.99, top=0.98)
stat_fig, stat_axs = plt.subplots(1, 1, figsize=(8, 15))
plt.tight_layout(pad=1.5)
stat_axs.set_xticks([], [])
stat_axs.set_yticks([], [])
environment_field_density_model_parameters = FieldsDetectedAcrossEnvironments.plot_place_field_distribution(
df_units, df_fields, ax, stat_axs
)
return fig, stat_fig, environment_field_density_model_parameters
@staticmethod
def write(fpath, df_units, df_fields, prefix='', verbose=True):
figure_name = prefix + 'FieldsDetectedAcrossEnvironments'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig, environment_field_density_model_parameters = \
FieldsDetectedAcrossEnvironments.make_figure(df_units, df_fields, verbose=verbose)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
return environment_field_density_model_parameters
class Remapping:
@staticmethod
def compute_ratemap_correlation(all_recordings, min_included_rate, min_required_bins):
rho = []
rho_halves = []
shuffle_rho = []
for recordings in all_recordings:
ratemaps_a = []
ratemaps_a2 = []
ratemaps_a_1st_half = []
ratemaps_a_2nd_half = []
ratemap_shape = None
for i, unit in enumerate(recordings.units):
if recordings.first_available_recording_unit(i)['analysis']['category'] != 'place_cell':
continue
if unit[0] is None:
ratemaps_a.append(None)
else:
ratemaps_a.append(unit[0]['analysis']['spatial_ratemaps']['spike_rates_smoothed'])
ratemaps_a_1st_half.append(unit[0]['analysis']['spatial_ratemaps']['spike_rates_halves']['first'])
ratemaps_a_2nd_half.append(unit[0]['analysis']['spatial_ratemaps']['spike_rates_halves']['second'])
if ratemap_shape is None:
ratemap_shape = ratemaps_a[-1].shape
if unit[4] is None:
ratemaps_a2.append(None)
else:
ratemaps_a2.append(unit[4]['analysis']['spatial_ratemaps']['spike_rates_smoothed'])
for rm_list in (ratemaps_a, ratemaps_a2):
for i, ratemap in enumerate(rm_list):
if ratemap is None:
rm_list[i] = np.zeros(ratemap_shape, dtype=np.float64)
for ratemap_a, ratemap_a2 in zip(ratemaps_a, ratemaps_a2):
rho.append(spatial_correlation(ratemap_a, ratemap_a2,
min_included_value=min_included_rate,
min_bins=min_required_bins)[0])
for ratemap_1st_half, ratemap_2nd_half in zip(ratemaps_a_1st_half, ratemaps_a_2nd_half):
rho_halves.append(spatial_correlation(ratemap_1st_half, ratemap_2nd_half,
min_included_value=min_included_rate,
min_bins=min_required_bins)[0])
shuffle(ratemaps_a2)
for ratemap_a, ratemap_a2 in zip(ratemaps_a, ratemaps_a2):
shuffle_rho.append(spatial_correlation(ratemap_a, ratemap_a2,
min_included_value=min_included_rate,
min_bins=min_required_bins)[0])
df_a_halves = pd.DataFrame({'Pearson $\it{r}$': np.array(rho_halves)})
df_a_halves['group'] = 'A 1/2 v 1/2\nintra-trial'
df_a = pd.DataFrame({'Pearson $\it{r}$': np.array(rho)})
df_a['group'] = "A v A'\ninter-trial"
df_a2 = pd.DataFrame({'Pearson $\it{r}$': np.array(shuffle_rho)})
df_a2['group'] = "A v shuffled A'"
df = pd.concat((df_a_halves, df_a, df_a2), axis=0, ignore_index=True)
df.dropna(inplace=True)
return df
@staticmethod
def plot_ratemap_correlation(all_recordings, ax, stat_ax, min_included_rate, min_required_bins):
"""Plot shows the correlation between ratemaps:
A 1/2 v 1/2 - same unit in environment A, ratemaps computed on first and last half of the recording.
A v A' - same unit in environment A and environment A'
A v shuffled A' - ratemap of one unit in environment A is correlated with a ratemap of a random unit
from the same animal in environment A'. Only a single iteration of this permutation is performed.
The box plot shows median, Q1, Q3 and 5-95% data range.
"""
df = Remapping.compute_ratemap_correlation(all_recordings, min_included_rate, min_required_bins)
groups_order = sorted(list(df['group'].unique()))
plot_raincloud_and_stats('group', 'Pearson $\it{r}$', df, ax, stat_ax,
palette=sns.color_palette(sns_other_colors[:len(groups_order)]),
x_order=groups_order)
ax.set_yticks([y for y in ax.get_yticks() if y <= 1])
stat_ax.set_title('ratemap_correlation')
@staticmethod
def compute_bayes_decoding_arena_accuracy_and_peak_values(all_recordings, position_decoding_name):
dfs, _ = BayesianPositionDecodingArenaAccuracy.compute_for_all_recordings(
all_recordings, position_decoding_name
)
df_accuracy = pd.concat(dfs).reset_index()
return df_accuracy
@staticmethod
def plot_bayes_decoding_arena_accuracy(df, ax, stat_ax):
"""Plots the percentage of samples decoded to each environment, separately for when animal
was in each of the environments (except A'). Each datapoint is the percentage value for a single animal.
"""
order = ('A', 'B', 'C', 'D')
hue_order = ('A', 'B', 'C', 'D')
sns.barplot(x='real environment', y='percentage', hue='decoded environment',
order=order, hue_order=hue_order,
data=df, palette=sns.color_palette(sns_environment_colors[:4]),
ax=ax)
sns.stripplot(x='real environment', y='percentage', hue='decoded environment',
order=order, hue_order=hue_order,
data=df, palette=sns.color_palette(sns_environment_colors[:4]),
ax=ax, linewidth=2, dodge=True)
ax.set_yscale('log')
ax.set_ylabel('decoded samples in location (%)')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[len(hue_order):], labels[len(hue_order):],
ncol=len(df['decoded environment'].unique()), title='decoded environment',
loc='lower left', bbox_to_anchor=(0, 1))
# Plot statistics on stat_ax
stat_ax.set_title('bayes_decoding_arena_accuracy')
mean_abcd_correct = np.mean(df[(df['real environment'] == df['decoded environment'])]['percentage'])
table_cell_text = [['Mean occurance', 'environments', 'accuracy'],
['correct', 'A, B, C, D', '{:.2f}'.format(mean_abcd_correct)],
['', '', ''],
['Environment', 'Percentage correct', ''],
['A', str(np.mean(df[(df['real environment'] == df['decoded environment'])
& (df['real environment'] == 'A')]['percentage'])), ''],
['B', str(np.mean(df[(df['real environment'] == df['decoded environment'])
& (df['real environment'] == 'B')]['percentage'])), ''],
['C', str(np.mean(df[(df['real environment'] == df['decoded environment'])
& (df['real environment'] == 'C')]['percentage'])), ''],
['D', str(np.mean(df[(df['real environment'] == df['decoded environment'])
& (df['real environment'] == 'D')]['percentage'])), '']]
stat_ax.table(cellText=table_cell_text, cellLoc='left', loc='upper left', edges='open')
@staticmethod
def plot_bayes_decoding_results(all_recordings, position_decoding_name, ax, stat_ax):
df_accuracy = Remapping.compute_bayes_decoding_arena_accuracy_and_peak_values(all_recordings,
position_decoding_name)
Remapping.plot_bayes_decoding_arena_accuracy(df_accuracy, ax, stat_ax)
@staticmethod
def make_figure(all_recordings):
fig, axs = plt.subplots(1, 2, figsize=(12, 5), gridspec_kw={'width_ratios': [1, 2]})
plt.subplots_adjust(left=0.07, right=0.99, bottom=0.12, top=0.83, hspace=0.4)
stat_fig, stat_axs = plt.subplots(1, 2, figsize=(12, 15))
plt.tight_layout(pad=1.5)
for ax in stat_axs.flatten():
ax.set_xticks([], [])
ax.set_yticks([], [])
Remapping.plot_ratemap_correlation(all_recordings, axs[0], stat_axs[0],
Params.ratemap_stability_kwargs['min_included_value'],
Params.ratemap_stability_kwargs['min_bins'])
Remapping.plot_bayes_decoding_results(all_recordings, 'position_decoding_large_window',
axs[1], stat_axs[1])
return fig, stat_fig
@staticmethod
def write(fpath, all_recordings, prefix='', verbose=True):
figure_name = prefix + 'Remapping'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = Remapping.make_figure(all_recordings)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class FieldsPerCellAcrossEnvironmentsSimple:
@staticmethod
def compute_place_field_formation_propensity(df_fields, df_units, combine_environments=False,
add_silent_cells=True):
# Create a copy of df_fields with only the relevant columns
df = df_fields[['unit', 'animal', 'animal_unit', 'experiment_id']].copy(deep=True)
df = df.merge(df_units[['animal', 'animal_unit', 'category']].copy(deep=True),
how='left', on=['animal', 'animal_unit'])
df = df[df['category'] == 'place_cell'] # Only keep place cell fields
df = df[['unit', 'experiment_id']]
# Only keep fields not in exp_scales_a2
df = df[df['experiment_id'] != 'exp_scales_a2']
# Replace experiment_id values for plotting and rename the column
df.replace(to_replace={'experiment_id': experiment_id_substitutes}, inplace=True)
df.rename(columns={'experiment_id': 'environment'}, inplace=True)
# Assign all units to a single environment if requested
if combine_environments:
df['environment'] = 'combined environment'
# Compute count per unit
df['number of fields'] = np.ones(df.shape[0])
df = df.groupby(['unit', 'environment']).sum().reset_index()
# Keep a separate count per unit for output
df_count_per_unit = df.copy(deep=True)
# Compute number of place cells with 0 fields based on how many place cells were recorded in total
n_total_place_cells = df['unit'].unique().size
silent_unit_environments = []
n_silent_units_in_environment = []
for environment, series in df.groupby('environment').count().iterrows():
silent_unit_environments.append(environment)
n_silent_units_in_environment.append(n_total_place_cells - series['unit'])
# Compute count per environment and field count
df['number of place cells'] = np.ones(df.shape[0])
df = df.groupby(['environment', 'number of fields'])['number of place cells'].count().reset_index()
# Add the silent cells to the count
if add_silent_cells:
df_place_cells = pd.concat([df, pd.DataFrame({'environment': silent_unit_environments,
'number of fields': np.zeros(len(silent_unit_environments)),
'number of place cells': n_silent_units_in_environment})],
ignore_index=True)
else:
df_place_cells = df
dfg = df_place_cells.groupby(['environment']).sum().reset_index()[['environment', 'number of place cells']]
dfg.rename(columns={'number of place cells': 'total place cells'}, inplace=True)
df_place_cells = df_place_cells.merge(dfg, how='left', on='environment')
df_place_cells['proportion of place cells'] = \
df_place_cells['number of place cells'] / df_place_cells['total place cells']
return df_place_cells, df_count_per_unit
@staticmethod
def plot(df_units, df_fields, ax, stat_ax):
df_by_environment, df_count_per_unit = \
FieldsPerCellAcrossEnvironmentsSimple.compute_place_field_formation_propensity(df_fields, df_units,
add_silent_cells=False)
df_combined, df_count_per_unit_combined = \
FieldsPerCellAcrossEnvironmentsSimple.compute_place_field_formation_propensity(
df_fields, df_units, combine_environments=True, add_silent_cells=False
)
df = pd.concat([df_by_environment, df_combined], 0, ignore_index=True, sort=True)
df_count_per_unit = pd.concat([df_count_per_unit, df_count_per_unit_combined], 0, ignore_index=True, sort=True)
environments = df['environment'].unique()
colors = sns_environment_colors[:len(environments)]
colors_dict = {key: color for key, color in zip(environments, colors)}
environment_mean_field_counts = {}
environment_proportion_multifield_cells = {}
for environment in environments:
idx = df_count_per_unit['environment'] == environment
environment_mean_field_counts[environment] = df_count_per_unit.loc[idx, 'number of fields'].mean()
environment_proportion_multifield_cells[environment] = \
np.sum(df_count_per_unit.loc[idx, 'number of fields'] > 1) / np.sum(idx)
second_to_last_field_count = 11
last_field_count = 13
last_field_count_label = None
last_field_count_value = None
for i, environment in enumerate(environments):
df_tmp = df.loc[df['environment'] == environment].copy()
df_tmp = df_tmp.sort_values('number of fields')
# Crop extra values
if np.any(df_tmp['number of fields'] > second_to_last_field_count):
if last_field_count_label is not None:
raise Exception('last_value_label should only be assgined once, for combined environment')
last_value_label = np.max(df_tmp['number of fields'])
last_field_count_value = df_tmp['proportion of place cells'].values[-1]
df_tmp = df_tmp.loc[df_tmp['number of fields'] <= second_to_last_field_count].copy()
df_tmp_row = df_tmp.iloc[0:1].copy(deep=True)
df_tmp_row.loc[df_tmp_row.index[0], 'number of fields'] = last_field_count
df_tmp_row.loc[df_tmp_row.index[0], 'proportion of place cells'] = last_field_count_value
ax.plot(df_tmp['number of fields'], df_tmp['proportion of place cells'], color=colors_dict[environment],
label=environment, marker='o', linewidth=2, zorder=-i)
if last_field_count_value is None or last_value_label is None:
raise Exception('last_field_count_value was never set. Must include combined environment.')
ax.legend(loc='upper right')
ax.set_xlabel('number of fields')
ax.set_ylabel('proportion of active place cells')
ax.set_xticks(np.arange(1, last_field_count, 2))
xtick_labels = np.arange(1, last_field_count, 2)
xtick_labels[-1] = last_value_label
ax.set_xticklabels(xtick_labels)
table_cell_text = [['Environment', 'Mean field count']]
for environment, mean_count in environment_mean_field_counts.items():
table_cell_text.append([environment, '{:.3f}'.format(mean_count)])
table_cell_text.append(['', ''])
table_cell_text.append(['Environment', 'proportion multi-field'])
for environment, proportion_multifield in environment_proportion_multifield_cells.items():
table_cell_text.append([environment, '{:.3f}'.format(proportion_multifield)])
stat_ax.table(cellText=table_cell_text, cellLoc='left', loc='upper left', edges='open')
@staticmethod
def make_figure(df_units, df_fields):
fig, ax = plt.subplots(1, 1, figsize=(5, 4))
plt.subplots_adjust(left=0.13, bottom=0.15, right=0.99, top=0.90, wspace=0.3)
stat_fig, stat_ax = plt.subplots(1, 1, figsize=(10, 10))
plt.tight_layout(pad=1.5)
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
FieldsPerCellAcrossEnvironmentsSimple.plot(df_units, df_fields, ax, stat_ax)
return fig, stat_fig
@staticmethod
def write(fpath, df_units, df_fields, prefix='', verbose=True):
figure_name = prefix + 'FieldsPerCellAcrossEnvironmentsSimple'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = FieldsPerCellAcrossEnvironmentsSimple.make_figure(df_units, df_fields)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class ConservationOfFieldFormationPropensity(object):
@staticmethod
def get_dataframe(df_units, df_fields, environment_field_density_model_parameters):
# Create a copy of df_fields with only the relevant columns
df = df_fields[['animal', 'animal_unit', 'experiment_id']].copy(deep=True)
df = df.merge(df_units[['animal', 'animal_unit', 'category']].copy(deep=True),
how='left', on=['animal', 'animal_unit'])
# Only keep place cell fields
df = df.loc[df['category'] == 'place_cell', ['animal', 'animal_unit', 'experiment_id']]
# Drop fields in smallest environment
df = df[(df['experiment_id'] != 'exp_scales_a') & (df['experiment_id'] != 'exp_scales_a2')]
# Count fields per unit in each environment
df['count'] = 1
df = df.groupby(['animal', 'animal_unit', 'experiment_id']).sum().reset_index()
# Compute field formation propensity
environment_areas_corrected = \
FieldsDetectedAcrossEnvironments.compute_environment_areas_with_field_density_correction(
parameters=environment_field_density_model_parameters
)
new_environment_areas_corrected = {}
for environment, area in environment_areas_corrected.items():
if environment in experiment_id_substitutes_inverse:
new_environment_areas_corrected[experiment_id_substitutes_inverse[environment]] = area
df['environment_areas'] = df['experiment_id'].map(new_environment_areas_corrected)
df['field_formation_propensity'] = df['count'] / df['environment_areas']
return df
@staticmethod
def plot(df_units, df_fields, environment_field_density_model_parameters, ax, stat_ax, n_shuffles=1000):
df = ConservationOfFieldFormationPropensity.get_dataframe(df_units, df_fields,
environment_field_density_model_parameters)
measure = 'field_formation_propensity'
required_count = df['experiment_id'].unique().size
df = df[df.groupby(['animal', 'animal_unit'])[measure].transform('size') == required_count].reset_index(
drop=True)
df_real = df.groupby(['animal', 'animal_unit']).std().reset_index().copy(deep=True)
# Create multiple shuffled unit variance results
shuffle_indices = {animal: {} for animal in df['animal'].unique()}
for animal in df['animal'].unique():
for experiment_id in df.loc[df['animal'] == animal, 'experiment_id'].unique():
shuffle_indices[animal][experiment_id] = \
np.where((df['animal'] == animal) & (df['experiment_id'] == experiment_id))[0]
df_shuffle = []
for _ in tqdm(range(n_shuffles)):
for animal in df['animal'].unique():
for experiment_id in shuffle_indices[animal].keys():
previous_indices = shuffle_indices[animal][experiment_id]
new_indices = shuffle_indices[animal][experiment_id].copy()
np.random.shuffle(new_indices)
df.loc[previous_indices, 'animal_unit'] = df.loc[new_indices, 'animal_unit'].values
df_shuffle.append(df.groupby(['animal', 'animal_unit']).std().reset_index())
df_shuffle = pd.concat(df_shuffle, axis=0, ignore_index=True)
# Compute Mann-Whitney rank test
statistic, pvalue = mannwhitneyu(df_real[measure], df_shuffle[measure], alternative='less')
# Plot results
df_real['group'] = 'data'
df_shuffle['group'] = 'shuffle'
df = pd.concat([df_real, df_shuffle], axis=0, ignore_index=True)
sns.histplot(
data=df, x=measure, hue='group', element="step", fill=False,
cumulative=True, stat='density', common_norm=False, ax=ax
)
# plot_normalised_histograms_of_real_and_shuffle_data(df_real[measure], df_shuffle[measure], ax, bins=10)
ax.set_ylabel('cumulative proportion of cells')
ax.set_xlabel('st.dev.(place fields / m$^2$)')
# ax.legend()
# Plot stats
stat_ax.set_title(measure)
table_cell_text = [
['Mann-Whitney test', ''],
['U value', '{:.2e}'.format(statistic)],
['p-value', '{:.2e}'.format(pvalue)],
['', ''],
['Total samples', str(df_real.shape[0])],
['n_shuffles', str(n_shuffles)]
]
for animal in sorted(df_real['animal'].unique()):
table_cell_text += [[animal, str(np.sum(df_real['animal'] == animal))]]
stat_ax.table(cellText=table_cell_text, cellLoc='left', loc='upper left', edges='open')
@staticmethod
def make_figure(df_units, df_fields, environment_field_density_model_parameters, verbose=False):
fig, ax = plt.subplots(1, 1, figsize=(3, 3))
plt.subplots_adjust(left=0.32, bottom=0.2, right=0.98, top=0.9)
stat_fig, stat_ax = plt.subplots(1, 1, figsize=(10, 10))
plt.tight_layout(pad=1)
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
ConservationOfFieldFormationPropensity.plot(df_units, df_fields, environment_field_density_model_parameters,
ax, stat_ax)
return fig, stat_fig
@staticmethod
def write(fpath, df_units, df_fields, environment_field_density_model_parameters, prefix='', verbose=True):
figure_name = prefix + 'ConservationOfFieldFormationPropensity'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = ConservationOfFieldFormationPropensity.make_figure(
df_units, df_fields, environment_field_density_model_parameters, verbose=verbose
)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class FieldsPerCellAcrossEnvironments:
@staticmethod
def compute_distribution_from_field_counts(field_counts):
field_counts, unit_counts = np.unique(field_counts, return_counts=True)
unit_count_by_field_count = np.zeros(np.max(field_counts) + 1)
unit_count_by_field_count[field_counts] = unit_counts
field_count_distribution = unit_count_by_field_count / np.sum(unit_count_by_field_count)
return field_count_distribution
@staticmethod
def predict_probability_of_field_count(areas, counts, gamma_shape, gamma_scale):
"""Returns directly the Poisson(Gamma) probability mass function value at specific counts.
Rich et al 2014 DOI: 10.1126/science.1255635
Supplementary materials page 8 equation with slight correction: (1 - p) instead of (p - 1).
"""
gamma_rates = 1 / (gamma_scale * areas)
r = gamma_shape
p_values = gamma_rates / (gamma_rates + 1)
probabilities = [
(gamma_function(r + x) / (gamma_function(r) * gamma_function(x + 1)))
* np.power(p, r) * np.power(1 - p, x)
for x, p in zip(counts, p_values)
]
return np.array(probabilities)
@staticmethod
def predict_field_count_distribution_with_gamma_poisson(area, gamma_shape, gamma_scale, max_counts=100):
"""Returns an array of field count distribution among units
"""
return FieldsPerCellAcrossEnvironments.predict_probability_of_field_count(
area * np.ones(max_counts), np.arange(max_counts), gamma_shape, gamma_scale
)
@staticmethod
def predict_field_count_distribution_with_equal_poisson(area, poisson_rate, max_counts=100):
"""Returns an array of field count distribution among units
"""
return poisson.pmf(np.arange(max_counts), poisson_rate * area)
@staticmethod
def compute_log_likelihood_of_data_given_gamma_parameters(areas, field_counts, gamma_shape, gamma_scale):
"""
Returns the sum over natural logarithms of likelihood estimations with given gamma parameters.
:param areas: shape (N,) of environment sizes where N cells were observed to be active
:param field_counts: shape (N,) of field counts of N cells.
:param gamma_shape:
:param gamma_scale:
:return: log_likelihood
"""
probabilities = FieldsPerCellAcrossEnvironments.predict_probability_of_field_count(
areas, field_counts, gamma_shape, gamma_scale
)
return np.sum(np.log(probabilities))
@staticmethod
def compute_log_likelihood_of_data_given_poisson_rate(areas, field_counts, poisson_rate):
"""
Returns the sum over natural logarithms of likelihood estimations with given poisson rate parameter.
:param areas:
:param field_counts:
:param poisson_rate: Poisson rate per unit area that is constant for all cells.
:return: log_likelihood
"""
probabilities = poisson.pmf(field_counts, poisson_rate * areas)
return np.sum(np.log(probabilities))
@staticmethod
def construct_negative_log_likelihood_model_fitting_method(model_name):
if model_name == 'gamma-poisson':
return lambda x, areas, field_counts: \
-FieldsPerCellAcrossEnvironments.compute_log_likelihood_of_data_given_gamma_parameters(
areas, field_counts, x[0], x[1]
)
elif model_name == 'equal-poisson':
return lambda x, areas, field_counts: \
-FieldsPerCellAcrossEnvironments.compute_log_likelihood_of_data_given_poisson_rate(
areas, field_counts, x[0]
)
else:
raise ValueError('Expected model name gamma-poisson or equal-poisson but got {}'.format(model_name))
@staticmethod
def predict_field_count_distributions_in_multiple_areas(areas, model_name, params, total_units=100000):
"""Returns an array of field count of same units in multiple areas
"""
if model_name == 'gamma-poisson':
propensities = gamma.rvs(a=params[0], loc=0, scale=params[1], size=total_units)
elif model_name == 'equal-poisson':
propensities = params[0] * np.ones(total_units)
else:
raise ValueError('Expected model name gamma-poisson or equal-poisson but got {}'.format(model_name))
field_counts_all = [poisson.rvs(propensities * area) for area in areas]
# Compute field count distribution in each area separately using all units
field_count_distributions_per_area_all = []
for field_counts_in_area in field_counts_all:
field_count_distributions_per_area_all.append(
FieldsPerCellAcrossEnvironments.compute_distribution_from_field_counts(field_counts_in_area)
)
# Compute field count distribution in each area separately using only the units with one field
# in at least one of the areas
idx_active = np.zeros(total_units, dtype=np.bool)
for field_counts_in_area_all in field_counts_all:
idx_active = np.logical_or(idx_active, field_counts_in_area_all > 0)
field_count_distributions_per_area_active = []
for field_counts_in_area in field_counts_all:
field_count_distributions_per_area_active.append(
FieldsPerCellAcrossEnvironments.compute_distribution_from_field_counts(
field_counts_in_area[idx_active]
)
)
return field_count_distributions_per_area_all, field_count_distributions_per_area_active
@staticmethod
def predict_field_count_distributions_in_multiple_areas_for_active_units(
areas, gamma_shape, gamma_scale, environment_field_density_model_parameters, total_units=100000,
):
"""Returns an array of field count of same units in multiple areas, only using modelled
units that had at least one field in one of the experiment areas
"""
environment_areas_corrected = \
FieldsDetectedAcrossEnvironments.compute_environment_areas_with_field_density_correction(
parameters=environment_field_density_model_parameters
)
environment_areas_corrected = [
environment_areas_corrected[environment] for environment in ('A', 'B', 'C', 'D')
]
propensities = gamma.rvs(a=gamma_shape, loc=0, scale=gamma_scale, size=total_units)
field_counts_all = [poisson.rvs(propensities * area) for area in environment_areas_corrected]
# Compute field count distribution in each area separately using all units
field_count_distributions_per_area_all = []
for field_counts_in_area in field_counts_all:
field_count_distributions_per_area_all.append(
FieldsPerCellAcrossEnvironments.compute_distribution_from_field_counts(field_counts_in_area)
)
# Compute field count distribution in each area separately using only the units with one field
# in at least one of the areas
idx_active = np.zeros(total_units, dtype=np.bool)
for field_counts_in_area_all in field_counts_all:
idx_active = np.logical_or(idx_active, field_counts_in_area_all > 0)
propensities = propensities[idx_active]
field_counts_all = [poisson.rvs(propensities * area) for area in areas]
field_count_distributions_per_area_active = []
for field_counts_in_area in field_counts_all:
field_count_distributions_per_area_active.append(
FieldsPerCellAcrossEnvironments.compute_distribution_from_field_counts(
field_counts_in_area
)
)
return field_count_distributions_per_area_all, field_count_distributions_per_area_active
@staticmethod
def plot_gamma_pdf(gamma_shape, gamma_scale, ax):
x = np.linspace(0, gamma_shape * gamma_scale * 5, 1000)
y = gamma.pdf(x, gamma_shape, loc=0, scale=gamma_scale)
ax.plot(x, y)
ax.set_xlim((0, x.max()))
ax.set_ylim((0, ax.get_ylim()[1]))
ax.set_ylabel('Gamma pdf')
ax.set_xlabel('place fields / m$^2$')
ax.text(0.9, 0.9, 'shape = {:.3f}\nscale = {:.3f}'.format(gamma_shape, gamma_scale),
ha='right', va='top', transform=ax.transAxes)
@staticmethod
def plot_field_formation_propensities(df_fields, df_units, environment_field_density_model_parameters, ax, stat_ax):
df_by_environment, _ = \
FieldsPerCellAcrossEnvironmentsSimple.compute_place_field_formation_propensity(df_fields, df_units)
df_combined, df_count_per_unit = \
FieldsPerCellAcrossEnvironmentsSimple.compute_place_field_formation_propensity(
df_fields, df_units, combine_environments=True
)
df = pd.concat([df_by_environment, df_combined], 0, ignore_index=True, sort=True)
areas_corrected = FieldsDetectedAcrossEnvironments.compute_environment_areas_with_field_density_correction(
parameters=environment_field_density_model_parameters
)
areas_not_corrected = FieldsDetectedAcrossEnvironments.compute_environment_areas_with_field_density_correction(
parameters=None
)
df_count_per_unit['areas_corrected'] = df_count_per_unit['environment'].map(areas_corrected)
df_count_per_unit['areas_not_corrected'] = df_count_per_unit['environment'].map(areas_not_corrected)
# Drop cell counts with 0 fields in each environment and recompute proportion of cells
dfs = []
areas_corrected_list = []
areas_not_corrected_list = []
field_count_distribution_prediction_list = []
for environment in list(df['environment'].unique()):
idx = df['environment'] == environment
field_counts = np.int16(df.loc[idx, 'number of fields'])
unit_percentages = df.loc[idx, 'proportion of place cells']
field_count_distribution = np.zeros(np.max(field_counts) + 1)
field_count_distribution[field_counts] = unit_percentages.values
field_count_distribution = field_count_distribution[1:]
field_count_distribution = field_count_distribution / np.sum(field_count_distribution)
df_tmp = pd.DataFrame({'number of fields': np.arange(field_count_distribution.size) + 1,
'proportion of active place cells': field_count_distribution})
df_tmp['environment'] = environment
areas_corrected_list.append(areas_corrected[environment])
areas_not_corrected_list.append(areas_not_corrected[environment])
field_count_distribution_prediction_list.append(field_count_distribution)
dfs.append(df_tmp)
df = pd.concat(dfs, 0, ignore_index=True, sort=True)
df['values'] = 'data - place cells in environment'
environments = sorted(df['environment'].unique())
environments_real = sorted(list(set(environments) - {'combined environment'}))
# Fit model parameters using maximum likelihood estimation
print('Fitting Poisson rate with field density correction')
res = minimize(
FieldsPerCellAcrossEnvironments.construct_negative_log_likelihood_model_fitting_method('equal-poisson'),
np.array([0.1]),
args=(df_count_per_unit['areas_corrected'].values, df_count_per_unit['number of fields'].values),
bounds=((np.finfo(np.float32).resolution, None),),
options={'disp': True}
)
poisson_rate, = res.x
print('Fitting Poisson rate without field density correction')
res = minimize(
FieldsPerCellAcrossEnvironments.construct_negative_log_likelihood_model_fitting_method('equal-poisson'),
np.array([0.1]),
args=(df_count_per_unit['areas_not_corrected'].values, df_count_per_unit['number of fields'].values),
bounds=((np.finfo(np.float32).resolution, None),),
options={'disp': True}
)
not_corrected_poisson_rate, = res.x
print('Fitting Gamma parameters with field density correction')
res = minimize(
FieldsPerCellAcrossEnvironments.construct_negative_log_likelihood_model_fitting_method('gamma-poisson'),
np.array([1, 0.1]),
args=(df_count_per_unit['areas_corrected'].values, df_count_per_unit['number of fields'].values),
bounds=((np.finfo(np.float32).resolution, None), (np.finfo(np.float32).resolution, None)),
options={'disp': True}
)
gamma_shape, gamma_scale = res.x
print('Fitting Gamma parameters without field density correction')
res = minimize(
FieldsPerCellAcrossEnvironments.construct_negative_log_likelihood_model_fitting_method('gamma-poisson'),
np.array([1, 0.1]),
args=(df_count_per_unit['areas_not_corrected'].values, df_count_per_unit['number of fields'].values),
bounds=((np.finfo(np.float32).resolution, None), (np.finfo(np.float32).resolution, None)),
options={'disp': True}
)
not_corrected_gamma_shape, not_corrected_gamma_scale = res.x
# Make predictions
for model_name in ('equal-Poisson place cells in environment', 'gamma-Poisson place cells in environment'):
dfs = []
for environment in environments:
if model_name == 'equal-Poisson place cells in environment':
field_count_distribution_full = \
FieldsPerCellAcrossEnvironments.predict_field_count_distribution_with_equal_poisson(
areas_corrected[environment], poisson_rate
)
elif model_name == 'gamma-Poisson place cells in environment':
field_count_distribution_full = \
FieldsPerCellAcrossEnvironments.predict_field_count_distribution_with_gamma_poisson(
areas_corrected[environment], gamma_shape, gamma_scale
)
else:
raise ValueError()
# Compute the distribution normalised to the total number of units with at least one field
sum_without_silent_cells = 1 - field_count_distribution_full[0]
field_count_distribution_less = field_count_distribution_full[1:]
field_count_distribution_less = field_count_distribution_less / sum_without_silent_cells
field_count_distribution_less = np.concatenate([[np.nan], field_count_distribution_less])
df_tmp = pd.DataFrame({'number of fields': np.arange(field_count_distribution_full.size),
'proportion of all place cells': field_count_distribution_full,
'proportion of active place cells': field_count_distribution_less})
df_tmp['environment'] = environment
dfs.append(df_tmp)
df_tmp = pd.concat(dfs, 0, ignore_index=True, sort=True)
df_tmp['values'] = model_name
df = pd.concat([df, df_tmp], 0, ignore_index=True, sort=True)
# Plot
fig = ax.figure
ax.axis('off')
width_ratios = [df.loc[(df['environment'] == environment)
& (df['values'] == 'data - place cells in environment'), 'number of fields'].max()
for environment in environments_real]
gs = GridSpecFromSubplotSpec(2, 1, ax, height_ratios=[1, 3], hspace=0.4)
ax_top = fig.add_subplot(gs[0])
ax_bottom = fig.add_subplot(gs[1])
ax_top.axis('off')
ax_bottom.axis('off')
gs = GridSpecFromSubplotSpec(1, len(width_ratios), ax_bottom, wspace=0.08, width_ratios=width_ratios)
axs_real_environments = [fig.add_subplot(g) for g in gs]
gs = GridSpecFromSubplotSpec(1, 2, ax_top, wspace=0.2, width_ratios=[3, 1])
axs_top_left = fig.add_subplot(gs[0])
axs_top_right = fig.add_subplot(gs[1])
colors = sns_environment_colors[:len(environments)]
colors_dict = {key: color for key, color in zip(environments, colors)}
for ax, environment in zip(axs_real_environments + [axs_top_left], environments):
idx = df['environment'] == environment
idx = idx & (df['number of fields'] > 0)
label = 'data - place cells in environment'
n_fields = df.loc[idx & (df['values'] == label), 'number of fields'].values
prop_units = df.loc[idx & (df['values'] == label), 'proportion of active place cells'].values
ax.bar(n_fields, prop_units, align='center', width=np.ones(n_fields.size) * 0.9,
color=colors_dict[environment])
label = 'equal-Poisson place cells in environment'
n_fields = df.loc[idx & (df['values'] == label), 'number of fields'].values
prop_units = df.loc[idx & (df['values'] == label), 'proportion of active place cells'].values
ax.plot(n_fields, prop_units, 'r', linewidth=2, label='equal-Poisson')
label = 'gamma-Poisson place cells in environment'
n_fields = df.loc[idx & (df['values'] == label), 'number of fields'].values
prop_units = df.loc[idx & (df['values'] == label), 'proportion of active place cells'].values
ax.plot(n_fields, prop_units, 'k', linewidth=2, label='gamma-Poisson')
ax.set_xlim((0.5, df.loc[idx & (df['values'] == 'data - place cells in environment'),
'number of fields'].max() + 0.5))
ax.set_xticks(np.arange(1, df.loc[idx & (df['values'] == 'data - place cells in environment'),
'number of fields'].max(),
2))
ax.set_title('( {} )'.format(environment))
axs_top_left.legend(loc='upper right', framealpha=1)
axs_real_environments[3].legend(loc='upper right', framealpha=1)
axs_real_environments[0].set_ylabel('proportion of cells')
axs_real_environments[2].set_xlabel('number of fields')
axs_top_left.set_ylabel('proportion of cells')
axs_top_left.set_xlabel('number of fields')
ylim = get_max_ylim(axs_real_environments)
for ax in axs_real_environments:
ax.set_ylim(ylim)
for ax in axs_real_environments[1:]:
plt.setp(ax.get_yticklabels(), visible=False)
# Plot Gamma distribution pdf to inset
FieldsPerCellAcrossEnvironments.plot_gamma_pdf(gamma_shape, gamma_scale, axs_top_right)
# Compute statistics for all models
# Maximum loglikelihood
poisson_maximum_loglikelihood = \
FieldsPerCellAcrossEnvironments.compute_log_likelihood_of_data_given_poisson_rate(
df_count_per_unit['areas_corrected'].values, df_count_per_unit['number of fields'].values,
poisson_rate
)
not_corrected_poisson_maximum_loglikelihood = \
FieldsPerCellAcrossEnvironments.compute_log_likelihood_of_data_given_poisson_rate(
df_count_per_unit['areas_not_corrected'].values, df_count_per_unit['number of fields'].values,
not_corrected_poisson_rate
)
gamma_maximum_loglikelihood = \
FieldsPerCellAcrossEnvironments.compute_log_likelihood_of_data_given_gamma_parameters(
df_count_per_unit['areas_corrected'].values, df_count_per_unit['number of fields'].values,
gamma_shape, gamma_scale
)
not_corrected_gamma_maximum_loglikelihood = \
FieldsPerCellAcrossEnvironments.compute_log_likelihood_of_data_given_gamma_parameters(
df_count_per_unit['areas_not_corrected'].values, df_count_per_unit['number of fields'].values,
not_corrected_gamma_shape, not_corrected_gamma_scale
)
model_names = ('poisson', 'not_corrected_poisson', 'gamma', 'not_corrected_gamma')
model_parameter_count = (1, 1, 2, 2)
model_loglikelihoods = (
poisson_maximum_loglikelihood,
not_corrected_poisson_maximum_loglikelihood,
gamma_maximum_loglikelihood,
not_corrected_gamma_maximum_loglikelihood
)
# AIC
aic = {}
for name, loglikelihood, parameter_count in zip(model_names, model_loglikelihoods, model_parameter_count):
aic[name] = 2 * parameter_count - 2 * loglikelihood
# BIC
bic = {}
for name, loglikelihood, parameter_count in zip(model_names, model_loglikelihoods, model_parameter_count):
bic[name] = parameter_count * np.log(df_count_per_unit.shape[0]) - 2 * loglikelihood
# Plot statistics for all models
table_cell_text = [['equal-Poisson', 'corrected'],
['poisson_rate', str(poisson_rate)],
['poisson_maximum_loglikelihood', str(poisson_maximum_loglikelihood)],
['akaike information criterion', str(aic['poisson'])],
['bayesian information criterion', str(bic['poisson'])],
['', ''],
['equal-Poisson', 'not corrected'],
['not_corrected_poisson_rate', str(not_corrected_poisson_rate)],
['not_corrected_poisson_maximum_loglikelihood',
str(not_corrected_poisson_maximum_loglikelihood)],
['akaike information criterion', str(aic['not_corrected_poisson'])],
['bayesian information criterion', str(bic['not_corrected_poisson'])],
['', ''],
['gamma-Poisson', 'corrected'],
['gamma_shape', str(gamma_shape)],
['gamma_scale', str(gamma_scale)],
['gamma_maximum_loglikelihood', str(gamma_maximum_loglikelihood)],
['akaike information criterion', str(aic['gamma'])],
['bayesian information criterion', str(bic['gamma'])],
['', ''],
['gamma-Poisson', 'not corrected'],
['not_corrected_gamma_shape', str(not_corrected_gamma_shape)],
['not_corrected_gamma_scale', str(not_corrected_gamma_scale)],
['not_corrected_gamma_maximum_loglikelihood',
str(not_corrected_gamma_maximum_loglikelihood)],
['akaike information criterion', str(aic['not_corrected_gamma'])],
['bayesian information criterion', str(bic['not_corrected_gamma'])],
['', ''],
['N cells to fit models', str(df_count_per_unit.shape[0])]]
stat_ax.table(cellText=table_cell_text, cellLoc='left', loc='upper left', edges='open')
gamma_model_fit = {
'gamma_shape': gamma_shape, 'gamma_scale': gamma_scale,
'not_corrected_gamma_shape': not_corrected_gamma_shape,
'not_corrected_gamma_scale': not_corrected_gamma_scale,
'poisson_rate': poisson_rate,
'not_corrected_poisson_rate': not_corrected_poisson_rate
}
return gamma_model_fit
@staticmethod
def make_figure(df_units, df_fields, environment_field_density_model_parameters, verbose=False):
fig, ax = plt.subplots(1, 1, figsize=(9, 7))
plt.subplots_adjust(left=0.08, bottom=0.08, right=0.99, top=0.95)
stat_fig, stat_ax = plt.subplots(1, 1, figsize=(10, 10))
plt.tight_layout(pad=1.5)
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
gamma_model_fit = \
FieldsPerCellAcrossEnvironments.plot_field_formation_propensities(
df_fields, df_units, environment_field_density_model_parameters, ax, stat_ax
)
return fig, stat_fig, gamma_model_fit
@staticmethod
def write(fpath, df_units, df_fields, environment_field_density_model_parameters, prefix='', verbose=True):
figure_name = prefix + 'FieldsPerCellAcrossEnvironments'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig, gamma_model_fit = FieldsPerCellAcrossEnvironments.make_figure(
df_units, df_fields, environment_field_density_model_parameters, verbose=verbose
)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
return gamma_model_fit
class PlaceCellsDetectedAcrossEnvironments(object):
@staticmethod
def plot_place_cell_recruitment(df_units, df_fields, environment_field_density_model_parameters,
gamma_model_fit, ax, stat_ax):
"""Plots the percentage of place cells detected in each animal that have a field in each environment.
The inset shows the projection from this model to 100% recruitment.
Units of inset axes are the same as main axes.
"""
df, _ = \
FieldsDetectedAcrossEnvironments.get_field_or_cell_count_per_environment_and_animal(
df_units, df_fields
)
df = df.copy(deep=True)
# Compute prediction proportion of units active in each environment for plotting
environment_areas = FieldsDetectedAcrossEnvironments.compute_environment_areas_with_field_density_correction()
plot_areas = np.array([environment_areas[environment] for environment in ('A', 'B', 'C', 'D')])
# TODO: Uncomment below to compute for environments of all sizes up to 9 m2.
# plot_areas = np.linspace(0, 9, 1000)
areas_corrected = np.array([
FieldsDetectedAcrossEnvironments.compute_environment_area_with_field_density_correction(
physical_area, environment_field_density_model_parameters
)
for physical_area in plot_areas
])
_, field_count_distributions_per_area_active = \
FieldsPerCellAcrossEnvironments.predict_field_count_distributions_in_multiple_areas(
areas_corrected, 'gamma-poisson', (gamma_model_fit['gamma_shape'], gamma_model_fit['gamma_scale'])
)
eval_recruitment = np.array(
[(1 - field_count_distribution[0])
for field_count_distribution in field_count_distributions_per_area_active]
)
environment_areas_corrected = \
FieldsDetectedAcrossEnvironments.compute_environment_areas_with_field_density_correction(
parameters=environment_field_density_model_parameters
)
df['corrected_environment_size'] = \
np.array([environment_areas_corrected[environment] for environment in df['environment']])
environment_areas_not_corrected = \
FieldsDetectedAcrossEnvironments.compute_environment_areas_with_field_density_correction(
parameters=None
)
df['not_corrected_environment_size'] = \
np.array([environment_areas_not_corrected[environment] for environment in df['environment']])
# Compute prediction proportion of units active in each environment for each sample to compute MSE
_, field_count_distributions_per_area_active = \
FieldsPerCellAcrossEnvironments.predict_field_count_distributions_in_multiple_areas(
df['corrected_environment_size'], 'gamma-poisson',
(gamma_model_fit['gamma_shape'], gamma_model_fit['gamma_scale'])
)
df['model_prediction'] = np.array(
[(1 - field_count_distribution[0])
for field_count_distribution in field_count_distributions_per_area_active]
)
# Compute predictions for each sample also without field density correction
_, not_corrected_field_count_distributions_per_area_active = \
FieldsPerCellAcrossEnvironments.predict_field_count_distributions_in_multiple_areas(
df['not_corrected_environment_size'], 'gamma-poisson',
(gamma_model_fit['not_corrected_gamma_shape'], gamma_model_fit['not_corrected_gamma_scale'])
)
df['not_corrected_model_prediction'] = np.array(
[(1 - field_count_distribution[0])
for field_count_distribution in not_corrected_field_count_distributions_per_area_active]
)
# Compute mean squared error for recruitment curve
model_residuals = np.abs(df['model_prediction'] - df['place cells'])
model_mean_squared_error = np.mean(model_residuals ** 2)
not_corrected_model_residuals = np.abs(df['not_corrected_model_prediction'] - df['place cells'])
not_corrected_model_mean_squared_error = np.mean(not_corrected_model_residuals ** 2)
# Compute full recruitment curve
eval_full_recruitment = []
eval_full_recruitment_area = np.arange(0.1, 10.01, 0.1) ** 2
for area in eval_full_recruitment_area:
eval_full_recruitment.append(
1 - FieldsPerCellAcrossEnvironments.predict_field_count_distribution_with_gamma_poisson(
area, gamma_model_fit['gamma_shape'], gamma_model_fit['gamma_scale'], max_counts=1
)[0]
)
eval_full_recruitment = np.array(eval_full_recruitment)
recruitment_99 = np.min(eval_full_recruitment_area[eval_full_recruitment > 0.99])
# Plot data
environments = sorted(np.unique(df['environment']))
colors_dict = {animal: color for animal, color in zip(sorted(df['animal'].unique()), sns_animal_colors)}
ax.scatter(df['environment_size'] + np.random.uniform(-0.2, 0.2, df['environment_size'].size),
df['place cells'], s=50, c=[colors_dict[x] for x in df['animal']],
linewidth=1, edgecolors='black', zorder=1)
# Plot model fits
model_points = ax.scatter(plot_areas, eval_recruitment, marker='X', color='black',
label='gamma-Poisson', s=150, zorder=-1)
ax.legend(handles=[model_points], loc='upper left')
# Compute recruitment estimation and plot to an inset axes
ax_inset = inset_axes(ax, width='40%', height='40%', loc='lower right')
ax_inset.plot(eval_full_recruitment_area, eval_full_recruitment, color='black')
ax_inset.set_ylabel('proportion of\nall place cells')
ax_inset.set_xlabel('area (m$^2$)')
ax_inset.xaxis.set_label_position('top')
ax_inset.xaxis.set_ticks_position('top')
ax_inset.set_ylim((0, 1))
ax_inset.set_xscale('log')
ax_inset.set_xlim((eval_full_recruitment_area[0], eval_full_recruitment_area[-1]))
xtick_values = [0.1, 1, 10, 100]
ax_inset.set_xticks(xtick_values)
# Adjust axes parameters
ax.set_xlim((0, ax.get_xlim()[1]))
ax.set_xticks(experiment_ids_with_areas_ticks['ticks'])
ax.set_xticklabels(experiment_ids_with_areas_ticks['ticklabels'])
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
ax.set_xlabel('environment, size (m$^2$)')
ax.set_ylabel('proportion of active place cells')
ax.set_ylim((0, 1))
plt.setp(ax_inset.get_xticklabels(), ha='left')
# Compute stats
kruskal_h_value, kruskal_pvalue = \
kruskal(*[df[df['environment'] == group]['place cells'] for group in environments])
df_sorted = df.sort_values('animal')
friedman_chisq_value, friedman_pvalue = \
friedmanchisquare(*[df_sorted[df_sorted['environment'] == group]['place cells']
for group in environments])
# Plot stats to stat_ax
stat_ax.set_title('Place cell recruitment')
table_cell_text = [['Kruskal-Wallis test', 'H-value', 'p-value'],
['', '{:.2e}'.format(kruskal_h_value), '{:.2e}'.format(kruskal_pvalue)],
['', '', ''],
['Friedman test test', 'chi-square statistic', 'p-value'],
['', '{:.2e}'.format(friedman_chisq_value), '{:.2e}'.format(friedman_pvalue)],
['', '', ''],
['', 'model_mean_squared_error', '{:.3e}'.format(model_mean_squared_error)],
['', 'not_corrected_model_mean_squared_error',
'{:.3e}'.format(not_corrected_model_mean_squared_error)],
['', '', ''],
['99% recruitment area size',
'{:.1f} m$^2$'.format(np.max(recruitment_99)), '']]
stat_ax.table(cellText=table_cell_text, cellLoc='left', loc='upper left', edges='open')
@staticmethod
def make_figure(df_units, df_fields, environment_field_density_model_parameters, gamma_model_fit, verbose=False):
fig, ax = plt.subplots(1, 1, figsize=(6.2, 5))
plt.subplots_adjust(left=0.11, bottom=0.21, right=0.96, top=0.98)
stat_fig, stat_ax = plt.subplots(1, 1, figsize=(12, 6))
plt.tight_layout(pad=1.5)
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
PlaceCellsDetectedAcrossEnvironments.plot_place_cell_recruitment(
df_units, df_fields, environment_field_density_model_parameters,
gamma_model_fit, ax, stat_ax
)
return fig, stat_fig
@staticmethod
def write(fpath, df_units, df_fields, environment_field_density_model_parameters,
gamma_model_fit, prefix='', verbose=True):
figure_name = prefix + 'PlaceCellsDetectedAcrossEnvironments'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = PlaceCellsDetectedAcrossEnvironments.make_figure(
df_units, df_fields, environment_field_density_model_parameters, gamma_model_fit, verbose=verbose
)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class FieldDistributionWithinEnvironments(object):
@staticmethod
def compute(all_recordings, df_units, df_fields, bin_size, combined=False):
df = PlaceFieldPeakDistribution.compute(all_recordings, df_fields, df_units,
'centroids', bin_size=bin_size, combined=combined)
del df['field peak proportion of environment per m^2']
df.rename(columns={'field peak proportion of total per m^2': 'density'}, inplace=True)
compute_distances_to_landmarks(df, np.stack((df['x_coord'].values, df['y_coord'].values), axis=1))
return df
@staticmethod
def plot(all_recordings, df_units, df_fields, fig, ax, stat_axs):
df = FieldDistributionWithinEnvironments.compute(all_recordings, df_units, df_fields, 4,
combined=False)
ValueByBinnedDistancePlot(
df, 'density', 'distance to wall (cm)', fig, ax, stat_axs, kind='strip',
first_plot_title='< {:d} cm from wall'.format(ValueByBinnedDistancePlot.distance_bin_width),
xlabel='distance to wall (cm)', ylabel='proportion of fields / m$^2$',
friedman_grouping_variable='animal',
plot_stats_test='Mann-Whitney',
aggregate_by_distance_and_animal='mean',
data_selection_label_kwargs={}
)
@staticmethod
def make_figure(all_recordings, df_units, df_fields):
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
plt.subplots_adjust(left=0.16, bottom=0.16, right=0.995, top=0.85, wspace=0.25, hspace=0.4)
stat_fig, stat_axs = plt.subplots(2, 1, figsize=(10, 25), gridspec_kw={'height_ratios': [2.5, 4]})
plt.tight_layout(pad=1.5)
for stat_ax in stat_axs.flatten():
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
FieldDistributionWithinEnvironments.plot(all_recordings, df_units, df_fields, fig, ax, stat_axs)
return fig, stat_fig
@staticmethod
def write(fpath, all_recordings, df_units, df_fields, prefix='', verbose=True):
figure_name = prefix + 'FieldDistributionWithinEnvironments'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = FieldDistributionWithinEnvironments.make_figure(all_recordings, df_units, df_fields)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class BinnedByDistancePlots:
@staticmethod
def compute_and_write_stats(df, binned_measure, value, stat_ax):
x_bin_order = sorted(df[binned_measure].unique())
df = df.sort_values('animal')
test_groups = [df[df[binned_measure] == group][value]
for group in x_bin_order]
kruskal_h_value, kruskal_pvalue = kruskal(*test_groups)
if all(test_groups[0].size == x.size for x in test_groups[1:]) and len(test_groups) > 2:
friedman_chisq_value, friedman_pvalue = friedmanchisquare(*test_groups)
else:
friedman_chisq_value, friedman_pvalue = (np.nan, np.nan)
test_results = []
for test in ('Mann-Whitney', 'Wilcoxon'):
if (
len(test_groups) == 2
or (test == 'Mann-Whitney' and kruskal_pvalue <= 0.05)
or (test == 'Wilcoxon' and friedman_pvalue <= 0.05)
):
one_test_result_dict_list = compute_pairwise_comparisons(df, binned_measure, value,
list(combinations(x_bin_order, 2)), test=test)
if len(one_test_result_dict_list) > 1:
p_values = [test_result_dict['p-value'] for test_result_dict in one_test_result_dict_list]
_, p_values, _, _ = multipletests(p_values, alpha=0.05, method='fdr_bh')
for test_result_dict, p_value in zip(one_test_result_dict_list, p_values):
test_result_dict['p-value'] = p_value
test_result_dict['correction'] = 'fdr_bh'
test_results.append(one_test_result_dict_list)
table_cell_text = [
['', 'statistic', 'p-value'],
['Kruskal-Wallis test', '{:.2e}'.format(kruskal_h_value), '{:.2e}'.format(kruskal_pvalue)],
['Friedman test', '{:.2e}'.format(friedman_chisq_value), '{:.2e}'.format(friedman_pvalue)]
]
if 'n' in df.columns:
for animal in sorted(df['animal'].unique()):
tmp = [['', '', ''], ['animal', animal, '']]
for distance_bin in x_bin_order:
tmp.append(['bin', str(distance_bin),
'n={}'.format(
df[(df['animal'] == animal)
& (df[binned_measure] == distance_bin)]['n'].values[0])]
)
table_cell_text += tmp
table_cell_text.append(['', '', ''])
table_cell_text.append(['bin', 'mean', ''])
for x_bin in x_bin_order:
table_cell_text.append([str(x_bin), str(np.nanmean(df.loc[df[binned_measure] == x_bin, value])), ''])
table_cell_text.append(['', '', ''])
table_cell_text.append(['Mean overall', str(np.nanmean(df[value])), ''])
stat_ax.table(cellText=table_cell_text, cellLoc='left', loc='upper left', edges='open')
plot_stats_dict_to_axes(test_results, stat_ax, loc=(0, 0), va='bottom')
significance_found = (
kruskal_pvalue <= 0.05 or friedman_pvalue <= 0.05
or (len(test_groups) == 2 and any([d['p-value'] <= 0.05 for d in sum(test_results, [])]))
)
return significance_found
@staticmethod
def compute_and_write_stats_with_hue(df, binned_measure, value, hue, stat_ax):
hue_values = df[hue].unique()
x_bin_order = sorted(df[binned_measure].unique())
if len(hue_values) != 2:
raise Exception('Only 2 hue levels supported.')
table_cell_text = []
for hue_value in hue_values:
df_tmp = df.loc[df[hue] == hue_value]
df_tmp = df_tmp.sort_values('animal')
test_groups = [df_tmp[df_tmp[binned_measure] == group][value]
for group in x_bin_order]
kruskal_h_value, kruskal_pvalue = kruskal(*test_groups)
if all(test_groups[0].size == x.size for x in test_groups[1:]) and len(test_groups) > 2:
friedman_chisq_value, friedman_pvalue = friedmanchisquare(*test_groups)
else:
friedman_chisq_value, friedman_pvalue = (np.nan, np.nan)
table_cell_text += [
['', 'statistic', 'p-value'],
['Kruskal-Wallis test', '{:.2e}'.format(kruskal_h_value), '{:.2e}'.format(kruskal_pvalue)],
['Friedman test', '{:.2e}'.format(friedman_chisq_value), '{:.2e}'.format(friedman_pvalue)]
]
if 'n' in df_tmp.columns:
for animal in sorted(df_tmp['animal'].unique()):
tmp = [['', '', ''], ['animal', animal, '']]
for distance_bin in x_bin_order:
tmp.append(['bin', str(distance_bin),
'n={}'.format(
df_tmp[(df_tmp['animal'] == animal)
& (df_tmp[binned_measure] == distance_bin)]['n'].values[0])]
)
table_cell_text += tmp
test_results = []
for test in ('Mann-Whitney', 'Wilcoxon'):
for binned_measure_value in df[binned_measure].unique():
df_tmp = df.loc[df[binned_measure] == binned_measure_value]
test_results.append(
compute_pairwise_comparisons(df_tmp, hue, value,
[hue_values], test=test)
)
table_cell_text.append(['', '', ''])
table_cell_text.append(['bin', 'mean', ''])
for x_bin in x_bin_order:
table_cell_text.append([str(x_bin), str(np.nanmean(df.loc[df[binned_measure] == x_bin, value])), ''])
table_cell_text.append(['', '', ''])
table_cell_text.append(['Mean overall', str(np.nanmean(df[value])), ''])
stat_ax.table(cellText=table_cell_text, cellLoc='left', loc='upper left', edges='open')
plot_stats_dict_to_axes(test_results, stat_ax, loc=(0, 0), va='bottom')
significance_found = True
return significance_found
@staticmethod
def plot_value_binned_by_distance_as_scatter_for_one_environment(
df, binned_measure, bin_edges, value, environment, jitter, ax, stat_ax,
animal_colors_dict, filter_to_middle_third=False, filter_to_first_bin_from_wall=False,
orientation_rule=None, direction_rule=None, yscale=None, hue=None, legend=False
):
if legend and hue is None:
raise Exception('legend only available if hue specified.')
distance_bin_width = bin_edges[1] - bin_edges[0]
df = df.loc[df['environment'] == environment]
bin_edges = bin_edges[bin_edges < df[binned_measure].max() + distance_bin_width]
if hue is None:
bin_jitters = np.random.uniform(-jitter, jitter, df[binned_measure].size)
ax.scatter(
df[binned_measure] + bin_jitters,
df[value], s=50, c=[animal_colors_dict[x] for x in df['animal']],
linewidth=1, edgecolors='black', zorder=1, alpha=0.75
)
else:
hue_values = df[hue].unique()
if len(hue_values) != 2:
raise Exception('Hue {} has {} different values, but only exactly 2 is accepted.'.format(
hue, len(hue_values)))
scatter_handles = []
for hue_value, marker, hue_jitter in zip(hue_values, ('o', 'v'), (-jitter * 1.5, jitter * 1.5)):
df_hue = df.loc[df[hue] == hue_value]
bin_jitters = np.random.uniform(-jitter, jitter, df_hue[binned_measure].size) + hue_jitter
scatter_handles.append(ax.scatter(
df_hue[binned_measure] + bin_jitters,
df_hue[value], s=50, c=[animal_colors_dict[x] for x in df_hue['animal']],
linewidth=1, edgecolors='black', zorder=1, alpha=0.75, marker=marker, label=hue_value
))
if legend:
ax.legend(title=hue)
ax.set_xticks(bin_edges)
ax.set_xlim((bin_edges[0], bin_edges[-1]))
if yscale is not None:
ax.set_yscale(yscale)
ax.set_title('')
spatial_filter_legend_instance.append_to_axes(
ax, experiment_id_substitutes_inverse[environment],
distance_measure=binned_measure,
distance_bin_width=distance_bin_width,
filter_to_middle_third=filter_to_middle_third,
filter_to_first_bin_from_wall=filter_to_first_bin_from_wall,
orientation_rule=orientation_rule,
direction_rule=direction_rule,
proportional_to_environment_size=True,
max_bin_center=bin_edges[-1]
)
# Stats
if hue is None:
significance_found = BinnedByDistancePlots.compute_and_write_stats(df, binned_measure, value, stat_ax)
else:
significance_found = BinnedByDistancePlots.compute_and_write_stats_with_hue(df, binned_measure, value,
hue, stat_ax)
if significance_found and yscale is None:
ylim = list(ax.get_ylim())
ylim[1] = ylim[1] + 0.5 * (ylim[1] - ylim[0])
ax.set_ylim(ylim)
@staticmethod
def plot_value_binned_by_measure_as_scatter_by_environment(
df, ax, stat_ax, binned_measure, bin_edges, value, xlabel, ylabel,
filter_to_middle_third=False, filter_to_first_bin_from_wall=False,
orientation_rule=None, direction_rule=None, yscale=None, ymax=None,
plot_first_bin_comparison_between_environments=True, hue=None
):
if plot_first_bin_comparison_between_environments and hue is not None:
raise Exception('Plotting first bin comparison with hue is not supported')
environments = sorted(df['environment'].unique())
df['environment_number'] = df['environment'].map({'A': 1, 'B': 2, 'C': 3, 'D': 4})
binned_measure_order = {}
for environment in environments:
binned_measure_order[environment] = \
sorted(df.loc[df['environment'] == environment, binned_measure].unique())
first_bin = list(binned_measure_order.values())[0][0]
# Create ax
if plot_first_bin_comparison_between_environments:
width_ratios = [len(environments), 0.01] + [len(binned_measure_order[env]) for env in environments[1:]]
gs = GridSpecFromSubplotSpec(1, len(width_ratios), ax, wspace=0.25, width_ratios=width_ratios)
ax.axis('off')
axs = [ax.figure.add_subplot(g) for g in gs]
ax_empty = axs.pop(1)
ax_empty.axis('off')
ax_first_bin = axs.pop(0)
else:
width_ratios = [len(binned_measure_order[env]) for env in environments[1:]]
gs = GridSpecFromSubplotSpec(1, len(width_ratios), ax, wspace=0.25, width_ratios=width_ratios)
ax.axis('off')
axs = [ax.figure.add_subplot(g) for g in gs]
ax_first_bin = None
# Create stat ax
gs = GridSpecFromSubplotSpec(1, len(axs) + 1, stat_ax)
stat_ax.axis('off')
stat_axs = [stat_ax.figure.add_subplot(g) for g in gs]
# Plot
colors_dict = {animal: color for animal, color in zip(sorted(df['animal'].unique()), sns_animal_colors)}
if plot_first_bin_comparison_between_environments:
df_first_bin = df.loc[df[binned_measure] == first_bin]
ax_first_bin.scatter(
df_first_bin['environment_number'] + np.random.uniform(-0.2, 0.2, df_first_bin['environment_number'].size),
df_first_bin[value], s=50, c=[colors_dict[x] for x in df_first_bin['animal']],
linewidth=1, edgecolors='black', zorder=1, alpha=0.75
)
ax_first_bin.set_xlim((df_first_bin['environment_number'].min() - 0.5,
df_first_bin['environment_number'].max() + 0.5))
ax_first_bin.set_xticks(sorted(df_first_bin['environment_number'].unique()))
ax_first_bin.set_xticklabels(environments)
if yscale is not None:
ax_first_bin.set_yscale(yscale)
significance_found = BinnedByDistancePlots.compute_and_write_stats(
df_first_bin, 'environment_number', value, stat_axs[0]
)
if significance_found and yscale is None:
ylim = list(ax_first_bin.get_ylim())
ylim[1] = ylim[1] + 0.5 * (ylim[1] - ylim[0])
ax_first_bin.set_ylim(ylim)
for i, environment in enumerate(environments[1:]):
BinnedByDistancePlots.plot_value_binned_by_distance_as_scatter_for_one_environment(
df, binned_measure, bin_edges, value, environment, 0.2 * first_bin,
axs[i], stat_axs[i + 1], colors_dict,
filter_to_middle_third=filter_to_middle_third,
filter_to_first_bin_from_wall=filter_to_first_bin_from_wall,
orientation_rule=orientation_rule, direction_rule=direction_rule, yscale=yscale,
hue=hue, legend=(True if (hue is not None and environment == 'D') else False)
)
if plot_first_bin_comparison_between_environments:
all_axs = [ax_first_bin] + axs
else:
all_axs = axs
ylim = get_max_ylim(all_axs)
ylim = ylim if ymax is None else (ylim[0], ymax)
for ax in all_axs:
ax.set_ylim(ylim)
if plot_first_bin_comparison_between_environments:
for ax in axs:
ax.set_yticklabels([], [])
else:
axs[0].set_ylabel(ylabel)
for ax in axs[1:]:
ax.set_yticklabels([], [])
if plot_first_bin_comparison_between_environments:
ax_first_bin.set_ylabel(ylabel)
ax_first_bin.set_xlabel('environment')
if bin_edges[1] % 1 == 0:
ax_first_bin.set_title('< {:d} cm from wall'.format(int(bin_edges[1])))
else:
ax_first_bin.set_title('< {:.2f} cm from wall'.format(bin_edges[1]))
axs[1].set_xlabel(xlabel)
return all_axs
class FieldDensity:
bin_size = 25
max_bin_center = 100
environment_wall_sizes = {experiment_id: np.array([spatial_window[1], spatial_window[3]])
for experiment_id, spatial_window in spatial_windows.items()}
@staticmethod
def compute_environment_wall_distance_bin_areas(experiment_id):
large_shape = FieldDensity.environment_wall_sizes[experiment_id]
current_distance = float(FieldDensity.bin_size) / 2.
areas = {}
while True:
small_shape = large_shape - 2 * FieldDensity.bin_size
if np.any(small_shape < 0):
break
areas[current_distance] = (np.prod(large_shape) - np.prod(small_shape)) / (10 ** 4)
current_distance = current_distance + FieldDensity.bin_size
large_shape = small_shape
return areas
@staticmethod
def compute_density_per_bin(df):
experiment_id = df['experiment_id'].values[0]
distance_bin_areas = FieldDensity.compute_environment_wall_distance_bin_areas(experiment_id)
ValueByBinnedDistancePlot.bin_distance_values(df, 'peak_nearest_wall', FieldDensity.bin_size,
environment_column='experiment_id')
df.drop(index=df.loc[df['peak_nearest_wall'] > FieldDensity.max_bin_center].index, inplace=True)
df['count'] = 1
df = df.groupby(['experiment_id', 'animal', 'peak_nearest_wall']).sum().reset_index()
df['area'] = [distance_bin_areas[distance_bin] for distance_bin in df['peak_nearest_wall'].values]
df['density'] = df['count'] / df['area']
del df['count']
del df['area']
del df['experiment_id']
del df['animal']
df.rename(columns={'peak_nearest_wall': 'distance to wall (cm)'}, inplace=True)
return df
@staticmethod
def compute(df_units, df_fields):
# Create a copy of df_fields with only the relevant columns
df = df_fields.loc[df_fields['experiment_id'] != 'exp_scales_a2',
['animal', 'animal_unit', 'experiment_id', 'peak_nearest_wall']
].copy(deep=True)
df = df.merge(df_units[['animal', 'animal_unit', 'category']].copy(deep=True),
how='left', on=['animal', 'animal_unit'])
df = df[df['category'] == 'place_cell'] # Only keep place cell fields
df = df[['experiment_id', 'animal', 'peak_nearest_wall']]
df_count_density = \
df.groupby(['experiment_id', 'animal']).apply(FieldDensity.compute_density_per_bin).reset_index()
df_count = \
df.groupby('animal').count()[['experiment_id']].reset_index().rename(columns={'experiment_id': 'count'})
df = df_count_density.merge(df_count, how='left', on='animal')
df['density'] = df['density'] / df['count']
del df['count']
df['environment'] = df['experiment_id'].map(experiment_id_substitutes)
return df
@staticmethod
def plot(df_units, df_fields, ax, stat_ax):
df = FieldDensity.compute(df_units, df_fields)
binned_measure = 'distance to wall (cm)'
value = 'density'
xlabel = 'distance to wall (cm)'
ylabel = 'proportion of fields / m$^2$'
bin_edges = np.arange(0, df[binned_measure].max() + FieldDensity.bin_size, FieldDensity.bin_size)
BinnedByDistancePlots.plot_value_binned_by_measure_as_scatter_by_environment(
df, ax, stat_ax, binned_measure, bin_edges, value, xlabel, ylabel,
filter_to_middle_third=False, filter_to_first_bin_from_wall=False,
orientation_rule=None, direction_rule=None
)
@staticmethod
def make_figure(df_units, df_fields):
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
plt.subplots_adjust(left=0.14, bottom=0.16, right=0.97, top=0.8, wspace=0.25, hspace=0.4)
stat_fig, stat_ax = plt.subplots(1, 1, figsize=(30, 15))
plt.tight_layout(pad=1.5)
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
FieldDensity.plot(df_units, df_fields, ax, stat_ax)
return fig, stat_fig
@staticmethod
def write(fpath, df_units, df_fields, prefix='', verbose=True):
figure_name = prefix + 'FieldDensity'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = FieldDensity.make_figure(df_units, df_fields)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class FieldDensityByDwell:
bin_size = 25
max_bin_centers = {'A': 25, 'B': 50, 'C': 75, 'D': 100}
@staticmethod
def compute_sum_in_bins_within_and_across_distances_to_wall(experiment_id, x, y, values):
x_width = spatial_windows[experiment_id][1]
y_width = spatial_windows[experiment_id][3]
if experiment_id == 'exp_scales_a':
print('')
wall_distances = []
sums = []
x_centers = []
y_centers = []
for x_edge in np.arange(0, x_width, FieldDensityByDwell.bin_size):
for y_edge in np.arange(0, y_width, FieldDensityByDwell.bin_size):
x_center = x_edge + FieldDensityByDwell.bin_size / 2.
y_center = y_edge + FieldDensityByDwell.bin_size / 2.
x_centers.append(x_center)
y_centers.append(y_center)
x_distance = min(x_center, max(x_width - x_center, x_width / 2.))
y_distance = min(y_center, max(y_width - y_center, x_width / 2.))
wall_distances.append(min(x_distance, y_distance))
idx = (((x >= x_edge) & (x < x_edge + FieldDensityByDwell.bin_size))
& ((y >= y_edge) & (y < y_edge + FieldDensityByDwell.bin_size)))
if np.sum(idx) == 0:
sums.append(0)
else:
sums.append(np.sum(values[idx]))
return np.array(wall_distances), np.array(sums), np.array(x_centers), np.array(y_centers)
@staticmethod
def get_sampling_maps(all_recordings):
sampling_maps = {}
position_bin_centers = {}
for recordings in all_recordings:
animal_sampling_maps = {}
animal_position_bins = {}
for recording in recordings[:4]:
spatial_ratemap = SpatialRatemap(
recording.position['xy'], np.array([0.1]), recording.position['sampling_rate'],
spatial_window=(0, recording.info['arena_size'][0], 0, recording.info['arena_size'][1]),
xy_mask=recording.position['analysis']['ratemap_speed_mask'],
bin_size=Params.spatial_ratemap['bin_size']
)
animal_sampling_maps[recording.info['experiment_id']] = spatial_ratemap.dwell_time
x_position_bin_centers, y_position_bin_centers = spatial_ratemap.position_bins
animal_position_bins[recording.info['experiment_id']] = {'x': x_position_bin_centers,
'y': y_position_bin_centers}
sampling_maps[recordings[0].info['animal']] = animal_sampling_maps
position_bin_centers[recordings[0].info['animal']] = animal_position_bins
return sampling_maps, position_bin_centers
@staticmethod
def compute(all_recordings, df_units, df_fields):
# Compute field counts
# Only keep fields belonging to place cells
df_fields = df_fields[df_fields['unit'].isin(np.where(df_units['category'] == 'place_cell')[0])]
# Only keep fields not in exp_scales_a2
df_fields = df_fields[df_fields['experiment_id'] != 'exp_scales_a2']
dfs = []
for animal in df_fields['animal'].unique():
for experiment_id in df_fields['experiment_id'].unique():
idx = (df_fields['animal'] == animal) & (df_fields['experiment_id'] == experiment_id)
wall_distances, field_counts, x_centers, y_centers = \
FieldDensityByDwell.compute_sum_in_bins_within_and_across_distances_to_wall(
experiment_id, df_fields.loc[idx, 'peak_x'], df_fields.loc[idx, 'peak_y'], np.ones(np.sum(idx))
)
dfs.append(pd.DataFrame({
'animal': animal, 'experiment_id': experiment_id, 'wall_distance': wall_distances,
'x_center': x_centers, 'y_center': y_centers, 'field_count': field_counts
}))
df = pd.concat(dfs, axis=0, ignore_index=True)
# Compute field count proportional to total in animal
df = df.merge(df_fields.groupby(['animal'])['experiment_id'].count().reset_index().rename(
columns={'experiment_id': 'animal_total_field_count'}), on='animal', how='left')
df['proportional_field_count'] = df['field_count'] / df['animal_total_field_count']
# Compute bin_area
dfs = []
for animal in df_fields['animal'].unique():
for experiment_id in df_fields['experiment_id'].unique():
x_centers = np.arange(0.5, spatial_windows[experiment_id][1], 1)
y_centers = np.arange(0.5, spatial_windows[experiment_id][3], 1)
x_centers, y_centers = np.meshgrid(x_centers, y_centers)
x_centers = x_centers.flatten()
y_centers = y_centers.flatten()
wall_distances, bin_areas_cm, x_centers, y_centers = \
FieldDensityByDwell.compute_sum_in_bins_within_and_across_distances_to_wall(
experiment_id, x_centers, y_centers, np.ones(x_centers.size)
)
bin_areas_m = bin_areas_cm / (100 ** 2)
dfs.append(pd.DataFrame({
'animal': animal, 'experiment_id': experiment_id, 'wall_distance': wall_distances,
'x_center': x_centers, 'y_center': y_centers, 'bin_area': bin_areas_m
}))
df = df.merge(pd.concat(dfs, axis=0, ignore_index=True),
on=['animal', 'experiment_id', 'wall_distance', 'x_center', 'y_center'],
how='left')
df['proportional_field_density'] = df['proportional_field_count'] / df['bin_area']
# Compute bin sampling density
sampling_maps, position_bin_centers = FieldDensityByDwell.get_sampling_maps(all_recordings)
dfs = []
for animal in df_fields['animal'].unique():
for experiment_id in df_fields['experiment_id'].unique():
x_centers, y_centers = np.meshgrid(position_bin_centers[animal][experiment_id]['x'],
position_bin_centers[animal][experiment_id]['y'])
x_centers = x_centers.flatten()
y_centers = y_centers.flatten()
wall_distances, dwell_times, x_centers, y_centers = \
FieldDensityByDwell.compute_sum_in_bins_within_and_across_distances_to_wall(
experiment_id, x_centers, y_centers, sampling_maps[animal][experiment_id].flatten()
)
dfs.append(pd.DataFrame({
'animal': animal, 'experiment_id': experiment_id, 'wall_distance': wall_distances,
'x_center': x_centers, 'y_center': y_centers, 'dwell_time': dwell_times
}))
df = df.merge(pd.concat(dfs, axis=0, ignore_index=True),
on=['animal', 'experiment_id', 'wall_distance', 'x_center', 'y_center'],
how='left')
# Split animal and experiment distance bins by dwell time
dfs = []
for animal in df['animal'].unique():
for experiment_id in df['experiment_id'].unique():
for wall_distance in df['wall_distance'].unique():
idx = ((df['animal'] == animal)
& (df['experiment_id'] == experiment_id)
& (df['wall_distance'] == wall_distance))
dwell_times = df.loc[idx, 'dwell_time'].values
dwell_time_group = np.array(['low' for _ in range(dwell_times.size)], dtype=np.object)
dwell_time_group[dwell_times >= np.median(dwell_times)] = 'high'
dfs.append(pd.DataFrame({
'animal': animal, 'experiment_id': experiment_id, 'wall_distance': wall_distance,
'x_center': df.loc[idx, 'x_center'].values, 'y_center': df.loc[idx, 'y_center'].values,
'dwell': dwell_time_group
}))
df = df.merge(pd.concat(dfs, axis=0, ignore_index=True),
on=['animal', 'experiment_id', 'wall_distance', 'x_center', 'y_center'],
how='left')
df = df.groupby(['animal', 'experiment_id', 'wall_distance', 'dwell'])['proportional_field_density'].mean()
df = df.reset_index()
df.rename(columns={'wall_distance': 'distance to wall (cm)'}, inplace=True)
df['environment'] = df['experiment_id'].map(experiment_id_substitutes)
for environment in df['environment'].unique():
df.drop(index=df.loc[(df['distance to wall (cm)'] > FieldDensityByDwell.max_bin_centers[environment])
& (df['environment'] == environment)].index, inplace=True)
return df
@staticmethod
def plot(all_recordings, df_units, df_fields, ax, stat_ax):
df = FieldDensityByDwell.compute(all_recordings, df_units, df_fields)
binned_measure = 'distance to wall (cm)'
value = 'proportional_field_density'
xlabel = 'distance to wall (cm)'
ylabel = 'proportion of fields / m$^2$'
hue = 'dwell'
bin_edges = np.arange(0, df[binned_measure].max() + FieldDensityByDwell.bin_size, FieldDensityByDwell.bin_size)
BinnedByDistancePlots.plot_value_binned_by_distance_as_scatter_for_one_environment(
df.loc[df['environment'] == 'D'], binned_measure, bin_edges, value, 'D', 0.1 * FieldDensityByDwell.bin_size,
ax, stat_ax, {animal: color for animal, color in zip(sorted(df['animal'].unique()), sns_animal_colors)},
filter_to_middle_third=False,
filter_to_first_bin_from_wall=False,
orientation_rule=None, direction_rule=None,
hue=hue, legend=True
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# BinnedByDistancePlots.plot_value_binned_by_measure_as_scatter_by_environment(
# df, ax, stat_ax, binned_measure, bin_edges, value, xlabel, ylabel,
# filter_to_middle_third=False, filter_to_first_bin_from_wall=False,
# orientation_rule=None, direction_rule=None,
# plot_first_bin_comparison_between_environments=False, hue=hue
# )
@staticmethod
def make_figure(all_recordings, df_units, df_fields):
fig, ax = plt.subplots(1, 1, figsize=(3, 4))
plt.subplots_adjust(left=0.28, bottom=0.16, right=0.93, top=0.82, wspace=0.25, hspace=0.4)
stat_fig, stat_ax = plt.subplots(1, 1, figsize=(30, 15))
plt.tight_layout(pad=1.5)
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
FieldDensityByDwell.plot(all_recordings, df_units, df_fields, ax, stat_ax)
return fig, stat_fig
@staticmethod
def write(fpath, all_recordings, df_units, df_fields, prefix='', verbose=True):
figure_name = prefix + 'FieldDensityByDwell'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = FieldDensityByDwell.make_figure(all_recordings, df_units, df_fields)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class FieldSize(object):
experiment_ids = ('exp_scales_a', 'exp_scales_b', 'exp_scales_c', 'exp_scales_d')
@staticmethod
def plot_field_size_by_distance_to_wall(all_recordings, df_units, df_fields, ax, stat_ax, verbose=False):
"""Plotted data:
- field area
- by distance to closest wall
- including all positions
"""
if verbose:
print('Plotting field size by distance to wall')
df = get_field_data_with_distance_to_boundary(
all_recordings, df_units, df_fields, 'area', FieldSize.experiment_ids, verbose=verbose
)
# df['value'] = np.log10(df['value'])
ValueByBinnedDistancePlot.bin_distance_values(df, 'distance to wall (cm)', FieldDensity.bin_size)
df.drop(index=df.loc[df['distance to wall (cm)'] > FieldDensity.max_bin_center].index, inplace=True)
df = df[['animal', 'environment', 'distance to wall (cm)', 'value']]
df = df.groupby(['animal', 'environment', 'distance to wall (cm)']).mean().reset_index()
bin_edges = np.arange(0, df['distance to wall (cm)'].max() + FieldDensity.bin_size, FieldDensity.bin_size)
BinnedByDistancePlots.plot_value_binned_by_measure_as_scatter_by_environment(
df, ax, stat_ax, 'distance to wall (cm)', bin_edges, 'value',
'distance to wall (cm)', 'field area (cm$^2$)',
filter_to_middle_third=False, filter_to_first_bin_from_wall=False,
orientation_rule=None, direction_rule=None, yscale='log', ymax=(10 ** 5)
)
@staticmethod
def make_figure(all_recordings, df_units, df_fields, verbose=False):
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
plt.subplots_adjust(left=0.14, bottom=0.16, right=0.97, top=0.8, wspace=0.25, hspace=0.4)
stat_fig, stat_ax = plt.subplots(1, 1, figsize=(30, 15))
plt.tight_layout(pad=1.5)
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
FieldSize.plot_field_size_by_distance_to_wall(all_recordings, df_units, df_fields, ax, stat_ax,
verbose=verbose)
return fig, stat_fig
@staticmethod
def write(fpath, all_recordings, df_units, df_fields, prefix='', verbose=True):
figure_name = prefix + 'FieldSize'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = FieldSize.make_figure(all_recordings, df_units, df_fields, verbose=verbose)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class FieldWidth(object):
@staticmethod
def plot_width_in_one_orientation_to_wall(df, orientation, ax, stat_ax):
if orientation == 'orthogonal':
values = 'width_orthogonal_to_short_wall'
orientation_rule = 'orthogonal_to_short_wall'
ylabel = r'field width $\bf\bot$ to short wall (cm)'
elif orientation == 'parallel':
values = 'width_parallel_to_short_wall'
orientation_rule = 'parallel_to_short_wall'
ylabel = r'field width $\bf\parallel$ to short wall (cm)'
else:
raise ValueError('Unknown orientation: {}'.format(orientation))
colors_dict = {animal: color for animal, color in zip(sorted(df['animal'].unique()), sns_animal_colors)}
distance_bins = sorted(np.unique(df['distance to short wall (cm)']))
bin_edges = np.arange(0, df['distance to short wall (cm)'].max() + FieldDensity.bin_size, FieldDensity.bin_size)
BinnedByDistancePlots.plot_value_binned_by_distance_as_scatter_for_one_environment(
df, 'distance to short wall (cm)', bin_edges,
values, 'D', 0.1 * np.max(np.diff(distance_bins)), ax, stat_ax, colors_dict,
filter_to_middle_third=True,
filter_to_first_bin_from_wall=False,
orientation_rule=orientation_rule, direction_rule=None
)
ax.set_xlabel('distance to short wall (cm)')
ax.set_ylabel(ylabel)
@staticmethod
def make_figure(all_recordings, df_units, df_fields, verbose=False):
fig, axs = plt.subplots(1, 2, figsize=(6, 4))
plt.subplots_adjust(left=0.18, bottom=0.16, right=0.95, top=0.8, wspace=0.5)
stat_fig, stat_axs = plt.subplots(1, 2, figsize=(20, 20))
plt.tight_layout(pad=1.5)
for stat_ax in stat_axs.flatten():
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
binned_measure = 'distance to short wall (cm)'
measures = ['width_parallel_to_short_wall', 'width_orthogonal_to_short_wall']
df = get_field_data_with_distance_to_boundary(
all_recordings, df_units, df_fields, measures,
FieldSize.experiment_ids, verbose=verbose
)
df = df.loc[df['environment'] == 'D'].copy()
df.dropna(inplace=True)
ValueByBinnedDistancePlot.bin_distance_values(df, binned_measure,
FieldDensity.bin_size)
df.drop(index=df.loc[df[binned_measure] > FieldDensity.max_bin_center].index, inplace=True)
df = ValueByBinnedDistancePlot.filter_to_middle_third(df)
df = df[['animal', 'environment', binned_measure] + measures]
agg_columns = ['animal', 'environment', binned_measure]
df = df.groupby(agg_columns).mean().reset_index()
FieldWidth.plot_width_in_one_orientation_to_wall(df, 'orthogonal', axs[0], stat_axs[0])
FieldWidth.plot_width_in_one_orientation_to_wall(df, 'parallel', axs[1], stat_axs[1])
ylim = get_max_ylim(axs)
axs[0].set_ylim(ylim)
axs[1].set_ylim(ylim)
return fig, stat_fig
@staticmethod
def write(fpath, all_recordings, df_units, df_fields, prefix='', verbose=True):
figure_name = prefix + 'FieldWidth'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = FieldWidth.make_figure(all_recordings, df_units, df_fields, verbose=verbose)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class AverageActivity:
@staticmethod
def plot_one_value_binned_by_distance_to_wall_in_one_environment(df, values, ylabel, ax, stat_ax):
ValueByBinnedDistancePlot.bin_distance_values(df, 'distance to wall (cm)',
FieldDensity.bin_size)
df.drop(index=df.loc[df['distance to wall (cm)'] > FieldDensity.max_bin_center].index, inplace=True)
df = df.loc[df['environment'] == 'D', ['animal', 'environment', 'distance to wall (cm)', values]].copy()
agg_columns = ['animal', 'environment', 'distance to wall (cm)']
df = df.groupby(agg_columns).mean().reset_index()
colors_dict = {animal: color for animal, color in zip(sorted(df['animal'].unique()), sns_animal_colors)}
distance_bins = sorted(np.unique(df['distance to wall (cm)']))
bin_edges = np.arange(0, df['distance to wall (cm)'].max() + FieldDensity.bin_size, FieldDensity.bin_size)
BinnedByDistancePlots.plot_value_binned_by_distance_as_scatter_for_one_environment(
df, 'distance to wall (cm)', bin_edges,
values, 'D', 0.1 * np.max(np.diff(distance_bins)), ax, stat_ax, colors_dict,
filter_to_middle_third=False,
filter_to_first_bin_from_wall=False,
orientation_rule=None, direction_rule=None
)
ax.set_xlabel('distance to wall (cm)')
ax.set_ylabel(ylabel)
@staticmethod
def make_figure(all_recordings):
fig, axs = plt.subplots(1, 2, figsize=(6, 4))
plt.subplots_adjust(left=0.13, bottom=0.16, right=0.95, top=0.8, wspace=0.5)
stat_fig, stat_axs = plt.subplots(1, 2, figsize=(20, 15))
plt.tight_layout(pad=1.5)
for stat_ax in stat_axs.flatten():
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
AverageActivity.plot_one_value_binned_by_distance_to_wall_in_one_environment(
FieldCoverage.get_mean_ratemap_measure_for_each_position(
all_recordings, 'active_unit_count', mean_per_field=False
),
'value', 'proportion of cells co-active', axs[0], stat_axs[0]
)
axs[0].set_ylim(0, 0.4)
AverageActivity.plot_one_value_binned_by_distance_to_wall_in_one_environment(
FieldCoverage.get_mean_ratemap_measure_for_each_position(
all_recordings, 'firing_rate', mean_per_field=False,
),
'value', 'mean spike rate (Hz)', axs[1], stat_axs[1]
)
axs[1].set_ylim(0, 1.2)
return fig, stat_fig
@staticmethod
def write(fpath, all_recordings, prefix='', verbose=True):
figure_name = prefix + 'AverageActivity'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = AverageActivity.make_figure(all_recordings)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class InterneuronMeanRate(object):
@staticmethod
def get_dataframe(all_recordings, min_speed: float = 10, unit_category: str = 'interneuron',
experiment_id: str = 'exp_scales_d', verbose: bool = True):
active_firing_rate = []
x_coords = []
y_coords = []
distances = []
animals = []
experiment_ids = []
environments = []
movement_directions = []
for recordings in all_recordings:
for i_recording, recording in enumerate(recordings[:4]):
if experiment_id is not None and recording.info['experiment_id'] != experiment_id:
continue
if verbose:
print('Computing {} mean rate for animal {} experiment {}'.format(
unit_category, recording.info['animal'], recording.info['experiment_id']
))
idx_samples_in_environment = (
np.all(recording.position['xy'] > 0, axis=1)
& (recording.position['xy'][:, 0] <= recording.info['arena_size'][0])
& (recording.position['xy'][:, 1] <= recording.info['arena_size'][1])
)
idx_samples_with_sufficent_speed = recording.position['speed'] > min_speed
idx_position_samples_to_use = idx_samples_in_environment & idx_samples_with_sufficent_speed
# Compute population vector
population_vectors = []
for i_unit, recordings_unit in enumerate(recordings.units):
if recordings.first_available_recording_unit(i_unit)['analysis']['category'] != unit_category:
continue
unit = recordings_unit[i_recording]
if unit is None:
timestamps = np.array([1])
else:
timestamps = unit['timestamps']
spike_histogram = count_spikes_in_sample_bins(
timestamps, recording.position['sampling_rate'],
0, recording.position['xy'].shape[0] - 1,
sum_samples=9,
sum_samples_kernel='gaussian'
)
spike_histogram *= 0 if unit is None else recording.position['sampling_rate']
population_vectors.append(spike_histogram)
population_vectors = np.stack(population_vectors, axis=1)
population_vectors = population_vectors[idx_position_samples_to_use, :]
sample_xy = recording.position['xy'][idx_position_samples_to_use, :]
# Compute movement direction
movement_direction = Recording.compute_movement_direction(sample_xy)
# Append to list across animals and recordings
active_firing_rate.append(np.mean(population_vectors, axis=1))
x_coords.append(sample_xy[:, 0])
y_coords.append(sample_xy[:, 1])
distances.append(np.array([
snippets.compute_distance_to_nearest_wall(one_sample_xy, recording.info['arena_size'])
for one_sample_xy in list(sample_xy)
]))
animals.append(np.array([recording.info['animal']] * sample_xy.shape[0]))
experiment_ids.append(np.array([recording.info['experiment_id']] * sample_xy.shape[0]))
environments.append(
np.array([experiment_id_substitutes[recording.info['experiment_id']]] * sample_xy.shape[0]))
movement_directions.append(movement_direction)
df = pd.DataFrame({
'animal': np.concatenate(animals),
'environment': np.concatenate(environments),
'experiment_id': np.concatenate(experiment_ids),
'x_coord': np.concatenate(x_coords),
'y_coord': np.concatenate(y_coords),
'distance to wall (cm)': np.concatenate(distances),
'mean rate (Hz)': np.concatenate(active_firing_rate),
'direction': np.concatenate(movement_directions)
})
compute_distances_to_landmarks(df, np.stack((df['x_coord'].values, df['y_coord'].values), axis=1))
return df
@staticmethod
def plot_all(all_recordings, ax, stat_ax):
df = InterneuronMeanRate.get_dataframe(all_recordings)
AverageActivity.plot_one_value_binned_by_distance_to_wall_in_one_environment(
df, 'mean rate (Hz)', 'interneuron mean spike rate (Hz)', ax, stat_ax
)
ax.set_ylim(0, 40)
@staticmethod
def make_figure(all_recordings):
fig, ax = plt.subplots(1, 1, figsize=(3.5, 4))
plt.subplots_adjust(left=0.25, bottom=0.16, right=0.9, top=0.8, wspace=0.5)
stat_fig, stat_ax = plt.subplots(1, 1, figsize=(10, 10))
plt.tight_layout(pad=1.5)
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
InterneuronMeanRate.plot_all(all_recordings, ax, stat_ax)
return fig, stat_fig
@staticmethod
def write(fpath, all_recordings, prefix='', verbose=True):
figure_name = prefix + 'InterneuronMeanRate'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = InterneuronMeanRate.make_figure(all_recordings)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class FieldWidthAll(object):
experiment_ids = ('exp_scales_a', 'exp_scales_b', 'exp_scales_c', 'exp_scales_d')
@staticmethod
def plot_field_size_by_distance_to_wall(all_recordings, df_units, df_fields, measure, binned_measure,
orientation_rule, xlabel, ylabel, ax, stat_ax, verbose=False):
df = get_field_data_with_distance_to_boundary(
all_recordings, df_units, df_fields, measure,
FieldWidthAll.experiment_ids, verbose=verbose
)
ValueByBinnedDistancePlot.bin_distance_values(df, binned_measure, FieldDensity.bin_size)
df.drop(index=df.loc[df[binned_measure] > FieldDensity.max_bin_center].index, inplace=True)
df = ValueByBinnedDistancePlot.filter_to_middle_third(df)
df = df[['animal', 'environment', binned_measure, 'value']]
df = df.groupby(['animal', 'environment', binned_measure]).mean().reset_index()
bin_edges = np.arange(0, df[binned_measure].max() + FieldDensity.bin_size, FieldDensity.bin_size)
return BinnedByDistancePlots.plot_value_binned_by_measure_as_scatter_by_environment(
df, ax, stat_ax, binned_measure, bin_edges, 'value', xlabel, ylabel,
filter_to_middle_third=True, filter_to_first_bin_from_wall=False,
orientation_rule=orientation_rule, direction_rule=None,
plot_first_bin_comparison_between_environments=False
)
@staticmethod
def make_figure(all_recordings, df_units, df_fields, verbose=False):
fig, axs = plt.subplots(1, 2, figsize=(10, 4))
plt.subplots_adjust(left=0.08, bottom=0.16, right=0.98, top=0.86, wspace=0.25, hspace=0.4)
stat_fig, stat_axs = plt.subplots(1, 2, figsize=(40, 15))
plt.tight_layout(pad=1.5)
for stat_ax in stat_axs:
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
sub_axs_1 = FieldWidthAll.plot_field_size_by_distance_to_wall(
all_recordings, df_units, df_fields, 'width_parallel_to_long_wall',
'distance to short wall (cm)', 'orthogonal_to_short_wall',
'distance to short wall (cm)', r'field width $\bf\bot$ to short wall (cm)',
axs[0], stat_axs[0], verbose=verbose
)
sub_axs_2 = FieldWidthAll.plot_field_size_by_distance_to_wall(
all_recordings, df_units, df_fields, 'width_parallel_to_short_wall',
'distance to short wall (cm)', 'parallel_to_short_wall',
'distance to short wall (cm)', r'field width $\bf\parallel$ to short wall (cm)',
axs[1], stat_axs[1], verbose=verbose
)
all_axs = sub_axs_1 + sub_axs_2
ylim = get_max_ylim(all_axs)
for ax in all_axs:
ax.set_ylim(ylim)
return fig, stat_fig
@staticmethod
def write(fpath, all_recordings, df_units, df_fields, prefix='', verbose=True):
figure_name = prefix + 'FieldWidthAll'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = FieldWidthAll.make_figure(all_recordings, df_units, df_fields, verbose=verbose)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class AverageActivityAll(object):
experiment_ids = ('exp_scales_a', 'exp_scales_b', 'exp_scales_c', 'exp_scales_d')
@staticmethod
def plot_activity_by_distance_to_wall(all_recordings, df_units, df_fields, measure, binned_measure,
xlabel, ylabel, ax, stat_ax, verbose=False):
df = FieldCoverage.get_mean_ratemap_measure_for_each_position(all_recordings, measure, mean_per_field=False)
ValueByBinnedDistancePlot.bin_distance_values(df, binned_measure, FieldDensity.bin_size)
df.drop(index=df.loc[df[binned_measure] > FieldDensity.max_bin_center].index, inplace=True)
df = ValueByBinnedDistancePlot.filter_to_middle_third(df)
df = df[['animal', 'environment', binned_measure, 'value']]
df = df.groupby(['animal', 'environment', binned_measure]).mean().reset_index()
bin_edges = np.arange(0, df[binned_measure].max() + FieldDensity.bin_size, FieldDensity.bin_size)
return BinnedByDistancePlots.plot_value_binned_by_measure_as_scatter_by_environment(
df, ax, stat_ax, binned_measure, bin_edges, 'value', xlabel, ylabel,
filter_to_middle_third=False, filter_to_first_bin_from_wall=False,
orientation_rule=None, direction_rule=None,
plot_first_bin_comparison_between_environments=True
)
@staticmethod
def make_figure(all_recordings, df_units, df_fields, verbose=False):
fig, axs = plt.subplots(1, 2, figsize=(12, 4))
plt.subplots_adjust(left=0.07, bottom=0.16, right=0.98, top=0.84, wspace=0.25, hspace=0.3)
stat_fig, stat_axs = plt.subplots(1, 2, figsize=(40, 15))
plt.tight_layout(pad=1.5)
for stat_ax in stat_axs:
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
sub_axs = AverageActivityAll.plot_activity_by_distance_to_wall(
all_recordings, df_units, df_fields, 'active_unit_count',
'distance to wall (cm)', 'distance to wall (cm)', 'proportion of cells co-active',
axs[0], stat_axs[0], verbose=verbose
)
for ax in sub_axs:
ax.set_ylim((0, 0.4))
sub_axs = AverageActivityAll.plot_activity_by_distance_to_wall(
all_recordings, df_units, df_fields, 'firing_rate',
'distance to wall (cm)', 'distance to wall (cm)', 'mean spike rate (Hz)',
axs[1], stat_axs[1], verbose=verbose
)
for ax in sub_axs:
ax.set_ylim((0, 1.2))
return fig, stat_fig
@staticmethod
def write(fpath, all_recordings, df_units, df_fields, prefix='', verbose=True):
figure_name = prefix + 'AverageActivityAll'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = AverageActivityAll.make_figure(all_recordings, df_units, df_fields, verbose=verbose)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class FiringRateDistribution(object):
@staticmethod
def compute(all_recordings,
only_take_every_nth_sample: int = 15,
min_speed: float = 10,
min_spike_rate: float = 1.0,
verbose: bool = True):
smoothing_position_samples = 15
smoothing_method = 'gaussian'
x_coords = []
y_coords = []
distances = []
directions = []
spike_rates = []
animals = []
experiment_ids = []
environments = []
for recordings in all_recordings:
for i_recording, recording in enumerate(recordings[:4]):
if recording.info['experiment_id'] != 'exp_scales_d':
continue
if verbose:
print('Computing firing rates for animal {} experiment {}'.format(
recording.info['animal'], recording.info['experiment_id']
))
inds = None
if only_take_every_nth_sample is None:
xy = recording.position['xy']
speed = recording.position['speed']
movement_direction = Recording.compute_movement_direction(xy)
else:
inds = np.arange(0, recording.position['xy'].shape[0], only_take_every_nth_sample)
xy = recording.position['xy'][inds, :]
speed = recording.position['speed'][inds]
movement_direction = Recording.compute_movement_direction(recording.position['xy'])[inds]
idx_samples_in_environment = (
np.all(xy > 0, axis=1)
& (xy[:, 0] <= recording.info['arena_size'][0])
& (xy[:, 1] <= recording.info['arena_size'][1])
)
idx_samples_with_sufficent_speed = speed > min_speed
idx_position_samples_to_use = idx_samples_in_environment & idx_samples_with_sufficent_speed
xy = xy[idx_position_samples_to_use, :]
movement_direction = movement_direction[idx_position_samples_to_use]
distance = np.array([
snippets.compute_distance_to_nearest_wall(one_sample_xy, recording.info['arena_size'])
for one_sample_xy in list(xy)
])
animal = np.array([recording.info['animal']] * xy.shape[0])
experiment_id = np.array([recording.info['experiment_id']] * xy.shape[0])
environment = np.array([experiment_id_substitutes[recording.info['experiment_id']]] * xy.shape[0])
# Compute population vector
for i_unit, recordings_unit in enumerate(recordings.units):
if recordings.first_available_recording_unit(i_unit)['analysis']['category'] != 'place_cell':
continue
unit = recordings_unit[i_recording]
if unit is None:
timestamps = np.array([1])
else:
timestamps = unit['timestamps']
spike_histogram = count_spikes_in_sample_bins(
timestamps, recording.position['sampling_rate'],
0, recording.position['xy'].shape[0] - 1,
sum_samples=smoothing_position_samples,
sum_samples_kernel=smoothing_method
)
spike_histogram *= 0 if unit is None else recording.position['sampling_rate']
if only_take_every_nth_sample is None:
spike_rates.append(spike_histogram[idx_position_samples_to_use])
else:
spike_rates.append(spike_histogram[inds][idx_position_samples_to_use])
x_coords.append(xy[:, 0])
y_coords.append(xy[:, 1])
distances.append(distance)
directions.append(movement_direction)
animals.append(animal)
experiment_ids.append(experiment_id)
environments.append(environment)
df = pd.DataFrame({
'animal': np.concatenate(animals),
'environment': np.concatenate(environments),
'experiment_id': np.concatenate(experiment_ids),
'spike rate (Hz)': np.concatenate(spike_rates),
'x_coord': np.concatenate(x_coords),
'y_coord': np.concatenate(y_coords),
'distance to wall (cm)': np.concatenate(distances),
'direction': np.concatenate(directions)
})
df = df.loc[df['spike rate (Hz)'] >= min_spike_rate].copy()
df['log10( spike rate (Hz) )'] = np.log10(df['spike rate (Hz)'])
# compute_distances_to_landmarks(df, np.stack((df['x_coord'].values, df['y_coord'].values), axis=1))
return df
@staticmethod
def label_yaxis_as_log10(ax):
ax.yaxis.set_major_locator(MultipleLocator(1))
ax.yaxis.set_minor_locator(MultipleLocator(0.1))
yticklabels = ['10$^{:d}$'.format(int(y)) for y in ax.get_yticks(False)]
ax.set_yticklabels([], minor=True)
ax.set_yticklabels(yticklabels, minor=False)
@staticmethod
def plot(all_recordings, axs, stat_ax):
df = FiringRateDistribution.compute(all_recordings)
ValueByBinnedDistancePlot.bin_distance_values(df, 'distance to wall (cm)',
FieldDensity.bin_size)
df.drop(index=df.loc[df['distance to wall (cm)'] > FieldDensity.max_bin_center].index, inplace=True)
bin_edges = np.arange(0, df['distance to wall (cm)'].max() + FieldDensity.bin_size, FieldDensity.bin_size)
if np.all([x % 1 == 0 for x in bin_edges]):
bin_edges = bin_edges.astype(np.int32)
df['distance to wall (cm)'] = list(map(str, df['distance to wall (cm)'].astype(np.int16)))
distance_bins = sorted(df['distance to wall (cm)'].unique())
df['recording halves'] = ''
for animal in sorted(df['animal'].unique()):
for distance_to_wall in distance_bins:
indices = df.loc[(df['distance to wall (cm)'] == distance_to_wall) & (df['animal'] == animal)].index
first_half_index_count = int(round(indices.size / 2.))
df.loc[indices[:first_half_index_count], 'recording halves'] = '1st'
df.loc[indices[first_half_index_count:], 'recording halves'] = '2nd'
sns.violinplot(x='distance to wall (cm)', y='log10( spike rate (Hz) )', hue='recording halves', data=df,
split=False, scale='width', ax=axs[0],
palette=sns.color_palette(sns_other_colors[:2], n_colors=2))
xticks = axs[0].get_xticks()
xtick_spacing = xticks[1] - xticks[0]
xticks = xticks - xtick_spacing / 2.
xticks = np.append(xticks, xticks[-1] + xtick_spacing)
axs[0].set_xticks(xticks)
axs[0].set_xticklabels(list(map(str, bin_edges)))
axs[0].set_ylim((-0.25, 2.25))
FiringRateDistribution.label_yaxis_as_log10(axs[0])
axs[0].set_ylabel('active place cell\nspike rate (Hz)')
distribution_bin_edges = np.linspace(0, df['log10( spike rate (Hz) )'].max(), 100)
groups = []
distances = []
for distance_to_wall in distance_bins:
groups.append('different recording halves')
first_half_distribution, _ = np.histogram(
df.loc[(df['distance to wall (cm)'] == distance_to_wall)
& (df['recording halves'] == '1st'), 'log10( spike rate (Hz) )'],
distribution_bin_edges
)
first_half_distribution = first_half_distribution / np.sum(first_half_distribution)
second_half_distribution, _ = np.histogram(
df.loc[(df['distance to wall (cm)'] == distance_to_wall)
& (df['recording halves'] == '2nd'), 'log10( spike rate (Hz) )'],
distribution_bin_edges
)
second_half_distribution = second_half_distribution / np.sum(second_half_distribution)
distances.append(
jensenshannon(first_half_distribution, second_half_distribution)
)
for i, bin_i in enumerate(distance_bins[:-1]):
for bin_j in distance_bins[i + 1:]:
for halves in [('1st', '1nd'), ('1st', '2nd'), ('2nd', '1st'), ('2nd', '2nd')]:
groups.append('different distances to wall')
first_distribution, _ = np.histogram(
df.loc[(df['distance to wall (cm)'] == bin_i) & (df['recording halves'] == halves[0]),
'log10( spike rate (Hz) )'],
distribution_bin_edges
)
first_distribution = first_distribution / np.sum(first_distribution)
second_distribution, _ = np.histogram(
df.loc[(df['distance to wall (cm)'] == bin_j) & (df['recording halves'] == halves[1]),
'log10( spike rate (Hz) )'],
distribution_bin_edges
)
second_distribution = second_distribution / np.sum(second_distribution)
distances.append(
jensenshannon(first_distribution, second_distribution)
)
df_distances = pd.DataFrame({'': groups, 'Jensen–Shannon divergence': distances})
sns.swarmplot(x='Jensen–Shannon divergence', y='', data=df_distances, ax=axs[1],
palette=sns.color_palette(sns_other_colors[2:4], n_colors=2))
statistic, pvalue = mannwhitneyu(
df_distances.loc[df_distances[''] == 'different recording halves', 'Jensen–Shannon divergence'],
df_distances.loc[df_distances[''] == 'different distances to wall', 'Jensen–Shannon divergence']
)
axs[1].text(
(df_distances['Jensen–Shannon divergence'].max() + axs[1].get_xlim()[1]) / 2., 0.5,
'ns' if pvalue > 0.05 else 'p = {:.3f}'.format(pvalue),
va='center', ha='center'
)
table_cell_text = [['Mann-Whitney', 'U statistic', 'p-value'],
['', '{:.3e}'.format(statistic), '{:.3e}'.format(pvalue)]]
stat_ax.table(cellText=table_cell_text, cellLoc='left', loc='upper left', edges='open')
@staticmethod
def make_figure(all_recordings):
fig, axs = plt.subplots(2, 1, figsize=(6, 4), gridspec_kw={'height_ratios': [4, 1]})
plt.subplots_adjust(left=0.16, bottom=0.13, right=0.97, top=0.95, hspace=0.5)
axs[1].axis('off')
gs = GridSpecFromSubplotSpec(1, 2, axs[1], width_ratios=[1, 3])
ax_bottom = fig.add_subplot(gs[1])
stat_fig, stat_ax = plt.subplots(1, 1, figsize=(10, 10))
plt.tight_layout(pad=1.5)
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
FiringRateDistribution.plot(all_recordings, [axs[0], ax_bottom], stat_ax)
return fig, stat_fig
@staticmethod
def write(fpath, all_recordings, prefix='', verbose=True):
figure_name = prefix + 'FiringRateDistribution'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = FiringRateDistribution.make_figure(all_recordings)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class FieldAreaDistribution(object):
experiment_ids = ('exp_scales_a', 'exp_scales_b', 'exp_scales_c', 'exp_scales_d')
@staticmethod
def compute_field_area_distribution(df_units, df_fields):
# Create a copy of df_fields with only the relevant columns
df = df_fields[['animal', 'animal_unit', 'experiment_id', 'area']].copy(deep=True)
df = df.merge(df_units[['animal', 'animal_unit', 'category']].copy(deep=True),
how='left', on=['animal', 'animal_unit'])
df = df[df['category'] == 'place_cell'] # Only keep place cell fields
df = df[['experiment_id', 'animal', 'area']]
# Drop fields in exp_scales_a2
df = df[df['experiment_id'] != 'exp_scales_a2']
# Get total field area per animal
total_field_area = {}
for animal in sorted(df['animal'].unique()):
total_field_area[animal] = df.loc[df['animal'] == animal, 'area'].sum()
# Replace experiment_id values for plotting and rename the column
df.replace(to_replace={'experiment_id': experiment_id_substitutes}, inplace=True)
df.rename(columns={'experiment_id': 'environment'}, inplace=True)
# Compute total field area in each environment for each animal
df = df.groupby(['animal', 'environment']).sum()
df = df.sort_values('animal')
df.reset_index(inplace=True)
# Compute total field area in each environment as percentage of total field area per animal
df['area'] = df['area'] / df.groupby(['animal'])['area'].transform('sum')
# Convert environment labels to environment area values
df['environment'] = np.array([arena_areas_meters_short_env[x] for x in df['environment']])
# Compute field area as percentage per square metre
df['area per m2'] = df['area'] / df['environment']
return df
@staticmethod
def plot_field_area_distribution(df_units, df_fields, ax, stat_ax):
df = FieldAreaDistribution.compute_field_area_distribution(df_units, df_fields)
# Create variables for plotting
environments = sorted(np.unique(df['environment']))
colors_dict = {animal: color for animal, color in zip(sorted(df['animal'].unique()), sns_animal_colors)}
# Plot field area distribution
ax.scatter(df['environment'] + np.random.uniform(-0.3, 0.3, df['environment'].size),
df['area'], s=50, c=[colors_dict[x] for x in df['animal']],
linewidth=1, edgecolors='black', zorder=1, alpha=0.75)
# Fit and plot linear model to data
fit_x_vals = np.linspace(0, 9, 100)
line_slope, line_intercept, line_r_value, line_p_value, line_std_err = \
linregress(df['environment'], df['area'])
ax.plot(fit_x_vals, line_intercept + line_slope * fit_x_vals, color='black', linestyle=':', zorder=-1)
# Plot linear model r value
ax.text(0.05, 0.95, '$\it{r}$' + ' = {:.{prec}f}'.format(line_r_value, prec=3),
ha='left', va='top', transform=ax.transAxes)
# Plot place field density
ax_inset_height = (line_intercept + line_slope * environments[-1] * 0.6) / ax.get_ylim()[1] * 100 * 0.60
ax_inset = inset_axes(ax, width='40%', height='{:.0f}%'.format(ax_inset_height), loc='lower right')
ax_inset.xaxis.tick_top()
ax_inset.scatter(df['environment'] + np.random.uniform(-0.25, 0.25, df['environment'].size),
df['area per m2'], s=25, c=[colors_dict[x] for x in df['animal']],
linewidth=1, edgecolors='black', zorder=1, alpha=0.75)
# Adjust axes parameters
ax.set_ylim((0, ax.get_ylim()[1]))
ax.set_xticks(experiment_ids_with_areas_ticks['ticks'])
ax.set_xticklabels(experiment_ids_with_areas_ticks['ticklabels'])
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
ax.set_xlabel('environment, size (m$^2$)')
ax.set_ylabel('proportion of field area in environment')
ax_inset.set_ylim((0, ax_inset.get_ylim()[1] * 2))
ax_inset.set_xlim((0, ax_inset.get_xlim()[1]))
ax_inset.set_xticks(experiment_ids_with_areas_ticks['ticks'])
ax_inset.set_xlabel('size (m$^2$)')
ax_inset.set_ylabel('proportional\narea / m$^2$')
plt.setp(ax_inset.get_xticklabels(), rotation=45, ha='left', rotation_mode='anchor')
ax_inset.xaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax_inset.xaxis.set_label_position('top')
ax_inset.xaxis.set_ticks_position('top')
ax_inset.yaxis.labelpad = 0
# Fit and plot inset linear model to data
inset_line_slope, inset_line_intercept, inset_line_r_value, inset_line_p_value, inset_line_std_err = \
linregress(df['environment'], df['area per m2'])
ax_inset.plot(fit_x_vals, inset_line_intercept + inset_line_slope * fit_x_vals,
color='black', linestyle=':', zorder=-1)
# Compute stats
kruskal_h_value, kruskal_pvalue = \
kruskal(*[df[df['environment'] == group]['area'] for group in environments])
kruskal_h_value_density, kruskal_pvalue_density = \
kruskal(*[df[df['environment'] == group]['area per m2'] for group in environments])
df_sorted = df.sort_values('animal')
friedman_chisq_value, friedman_pvalue = \
friedmanchisquare(*[df_sorted[df_sorted['environment'] == group]['area']
for group in environments])
friedman_chisq_value_density, friedman_pvalue_density = \
friedmanchisquare(*[df_sorted[df_sorted['environment'] == group]['area per m2']
for group in environments])
# Plot stats to stat_ax
stat_ax.set_title('Place field formation')
table_cell_text = [['Field areas', 'H-value', 'p-value'],
['Kruskal-Wallis test',
'{:.2e}'.format(kruskal_h_value), '{:.2e}'.format(kruskal_pvalue)],
['', '', ''],
['Field areas per m2', 'H-value', 'p-value'],
['Kruskal-Wallis test',
'{:.2e}'.format(kruskal_h_value_density), '{:.2e}'.format(kruskal_pvalue_density)],
['', '', ''],
['Field areas', 'chi-square statistic', 'p-value'],
['Friedman test',
'{:.2e}'.format(friedman_chisq_value), '{:.2e}'.format(friedman_pvalue)],
['', '', ''],
['Field areas per m2', 'chi-square statistic', 'p-value'],
['Friedman test',
'{:.2e}'.format(friedman_chisq_value_density), '{:.2e}'.format(friedman_pvalue_density)],
['', '', ''],
['fitted linear model', 'parameters', ''],
['', 'line_slope', '{:.3f}'.format(line_slope)],
['', 'line_intercept', '{:.3f}'.format(line_intercept)],
['', 'line_r_value', '{:.3f}'.format(line_r_value)],
['', 'line_p_value', '{:.3e}'.format(line_p_value)],
['', 'line_std_err', '{:.3f}'.format(line_std_err)],
['', '', ''],
['Inset:', '', ''],
['fitted linear model', 'parameters', ''],
['', 'line_slope', '{:.3f}'.format(inset_line_slope)],
['', 'line_intercept', '{:.3f}'.format(inset_line_intercept)],
['', 'line_r_value', '{:.3f}'.format(inset_line_r_value)],
['', 'line_p_value', '{:.3e}'.format(inset_line_p_value)],
['', 'line_std_err', '{:.3f}'.format(inset_line_std_err)]]
stat_ax.table(cellText=table_cell_text, cellLoc='left', loc='upper left', edges='open')
@staticmethod
def make_figure(df_units, df_fields):
fig, ax = plt.subplots(1, 1, figsize=(6, 5))
plt.subplots_adjust(left=0.15, bottom=0.2, right=0.993, top=0.95, hspace=0.45)
stat_fig, stat_ax = plt.subplots(1, 1, figsize=(10, 10))
plt.tight_layout(pad=1)
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
FieldAreaDistribution.plot_field_area_distribution(df_units, df_fields, ax, stat_ax)
return fig, stat_fig
@staticmethod
def write(fpath, df_units, df_fields, prefix='', verbose=True):
figure_name = prefix + 'FieldAreaDistribution'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = FieldAreaDistribution.make_figure(df_units, df_fields)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class FiringRateChange(object):
@staticmethod
def plot_change_in_runs_in_one_direction_to_wall(df, value, filtering_rule, ax, stat_ax):
if filtering_rule == 'alonglongwall':
direction_rule = 'orthogonal_to_short_wall'
ylabel = 'population vector change\n' + r'in runs $\bf\bot$ to short wall (Hz/cm)'
elif filtering_rule == 'alongshortwall':
direction_rule = 'parallel_to_short_wall'
ylabel = 'population vector change\n' + r'in runs $\bf\parallel$ to short wall (Hz/cm)'
else:
raise ValueError('Unknown orientation: {}'.format(filtering_rule))
colors_dict = {animal: color for animal, color in zip(sorted(df['animal'].unique()), sns_animal_colors)}
distance_bins = sorted(np.unique(df['distance to short wall (cm)']))
bin_edges = np.arange(0, df['distance to short wall (cm)'].max() + FieldDensity.bin_size, FieldDensity.bin_size)
BinnedByDistancePlots.plot_value_binned_by_distance_as_scatter_for_one_environment(
df, 'distance to short wall (cm)', bin_edges,
value, 'D', 0.1 * np.max(np.diff(distance_bins)), ax, stat_ax, colors_dict,
filter_to_middle_third=False,
filter_to_first_bin_from_wall=False,
orientation_rule=None, direction_rule=direction_rule
)
ax.set_xlabel('distance to short wall (cm)')
ax.set_ylabel(ylabel)
@staticmethod
def plot_stats_comparing_orthogonal_and_parallel_runs(df_orthogonal, df_parallel, binned_measure, value, stat_ax):
agg_columns = ['animal', 'environment', binned_measure]
df_orthogonal = df_orthogonal[['animal', 'environment', binned_measure, value]]
df_orthogonal = df_orthogonal.groupby(agg_columns).mean().reset_index()
df_parallel = df_parallel[['animal', 'environment', binned_measure, value]]
df_parallel = df_parallel.groupby(agg_columns).mean().reset_index()
bins = sorted(df_orthogonal[binned_measure].unique())
diff = {}
for name, df in zip(('orthogonal', 'parallel'), (df_orthogonal, df_parallel)):
df_first = df.loc[df[binned_measure] == bins[0], ['animal', value]].rename(columns={value: '1st'})
df_second = df.loc[df[binned_measure] == bins[1], ['animal', value]].rename(columns={value: '2nd'})
df = df_first.merge(df_second, on='animal', how='inner')
diff[name] = df['2nd'] - df['1st']
diff_statistic, diff_pvalue = mannwhitneyu(diff['orthogonal'], diff['parallel'])
bin_stats = []
for bin in bins:
bin_stats.append(
mannwhitneyu(df_orthogonal.loc[df_orthogonal[binned_measure] == bin, value],
df_parallel.loc[df_parallel[binned_measure] == bin, value])
)
table_cell_text = [['diff', 'stat', 'p-value'],
['', '{:.4f}'.format(diff_statistic), '{:.4f}'.format(diff_pvalue)],
['', '', ''],
['diff name', 'diff value', '']]
for name, diff_value in diff.items():
table_cell_text.append([name, '{:.3f}'.format(np.mean(diff[name])), ''])
table_cell_text.append(['', '', ''])
table_cell_text.append(['bin', 'stat', 'pvalue'])
for bin, stats in zip(bins, bin_stats):
table_cell_text.append([str(bin), '{:.4f}'.format(stats[0]), '{:.4f}'.format(stats[1])])
stat_ax.table(cellText=table_cell_text, cellLoc='left', loc='upper left', edges='open')
@staticmethod
def add_theta_frequency_column(all_recordings, df, df_units):
df['theta_frequency'] = np.nan
for animal in df['animal'].unique():
for experiment_id in df['experiment_id'].unique():
animal_and_experiment_done = False
for recordings in all_recordings:
for recording in recordings:
if animal_and_experiment_done:
continue
if recording.info['animal'] != animal or recording.info['experiment_id'] != experiment_id:
continue
place_cell_count_per_hemisphere = \
df_units.loc[
(df_units['animal'] == animal) & (df_units['category'] == 'place_cell')
, 'channel_group'
].value_counts()
theta_frequency = FrequencyBandFrequency(recording, 'theta_frequency')
tetrode_index = \
theta_frequency.data['channel_labels'].index(place_cell_count_per_hemisphere.idxmax())
idx = (df['animal'] == animal) & (df['experiment_id'] == experiment_id)
df.loc[idx, 'theta_frequency'] = \
theta_frequency.get_values_interpolated_to_timestamps(
df.loc[idx, 'timestamp'].values
)[:, tetrode_index]
animal_and_experiment_done = True
@staticmethod
def add_smoothed_speed_column(all_recordings, df):
df['running_speed'] = np.nan
for animal in df['animal'].unique():
for experiment_id in df['experiment_id'].unique():
animal_and_experiment_done = False
for recordings in all_recordings:
for recording in recordings:
if animal_and_experiment_done:
continue
if recording.info['animal'] != animal or recording.info['experiment_id'] != experiment_id:
continue
idx = (df['animal'] == animal) & (df['experiment_id'] == experiment_id)
speed = recording.get_smoothed_speed(Params.xy_masking['speed_smoothing_window'])
df.loc[idx, 'running_speed'] = \
np.interp(df.loc[idx, 'timestamp'].values, recording.position['timestamps'], speed)
animal_and_experiment_done = True
@staticmethod
def make_figure(fpath, all_recordings, df_units, verbose=False):
fig, axs = plt.subplots(1, 2, figsize=(7, 4))
plt.subplots_adjust(left=0.13, bottom=0.16, right=0.95, top=0.8, wspace=0.65)
stat_fig, stat_axs = plt.subplots(1, 3, figsize=(30, 20))
plt.tight_layout(pad=1.5)
for stat_ax in stat_axs.flatten():
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
binned_measure = 'distance to short wall (cm)'
value = 'rate change\n(euclidean)'
df = PopulationVectorChangeRate.get_dataframe(all_recordings)
df.dropna(inplace=True)
df = df.loc[df['environment'] == 'D'].copy()
FiringRateChange.add_theta_frequency_column(all_recordings, df, df_units)
FiringRateChange.add_smoothed_speed_column(all_recordings, df)
# Write DataFrame to disk for use in other analyses
population_vector_change_file_path = construct_df_population_vector_change_file_path(fpath)
df.to_pickle(population_vector_change_file_path)
print('Population vector change values written to {}'.format(population_vector_change_file_path))
ValueByBinnedDistancePlot.bin_distance_values(df, binned_measure,
FieldDensity.bin_size)
df.drop(index=df.loc[df[binned_measure] > FieldDensity.max_bin_center].index, inplace=True)
df_alonglongwall = filter_dataframe_by_direction(
df.copy(deep=True), 'alonglongwall', section_width='quadrants'
)
df_alongshortwall = filter_dataframe_by_direction(
df.copy(deep=True), 'alongshortwall', section_width='quadrants'
)
for df_tmp, filtering_rule, ax, stat_ax in zip((df_alonglongwall, df_alongshortwall),
('alonglongwall', 'alongshortwall'),
axs, stat_axs[:2]):
df_tmp = df_tmp[['animal', 'environment', binned_measure, value]]
agg_columns = ['animal', 'environment', binned_measure]
df_tmp = df_tmp.groupby(agg_columns).mean().reset_index()
FiringRateChange.plot_change_in_runs_in_one_direction_to_wall(df_tmp, value, filtering_rule, ax, stat_ax)
ylim = get_max_ylim(axs)
axs[0].set_ylim(ylim)
axs[1].set_ylim(ylim)
FiringRateChange.plot_stats_comparing_orthogonal_and_parallel_runs(
df_alonglongwall, df_alongshortwall, binned_measure, value, stat_axs[2]
)
return fig, stat_fig
@staticmethod
def write(fpath, all_recordings, df_units, prefix='', verbose=True):
figure_name = prefix + 'FiringRateChange'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = FiringRateChange.make_figure(fpath, all_recordings, df_units, verbose=verbose)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class FiringRateChangeAll(object):
experiment_ids = ('exp_scales_a', 'exp_scales_b', 'exp_scales_c', 'exp_scales_d')
@staticmethod
def plot_field_change_by_distance_to_wall(df, value, binned_measure, direction_rule,
ylabel, ax, stat_ax):
df = df[['animal', 'environment', binned_measure, value]].dropna()
df = df.groupby(['animal', 'environment', binned_measure]).mean().reset_index()
bin_edges = np.arange(0, df[binned_measure].max() + FieldDensity.bin_size, FieldDensity.bin_size)
return BinnedByDistancePlots.plot_value_binned_by_measure_as_scatter_by_environment(
df, ax, stat_ax, binned_measure, bin_edges, value,
'distance to short wall (cm)', ylabel,
filter_to_middle_third=False, filter_to_first_bin_from_wall=False,
orientation_rule=None, direction_rule=direction_rule,
plot_first_bin_comparison_between_environments=False
)
@staticmethod
def make_figure(all_recordings):
fig, axs = plt.subplots(1, 2, figsize=(11, 4))
plt.subplots_adjust(left=0.1, bottom=0.16, right=0.98, top=0.86, wspace=0.4)
stat_fig, stat_axs = plt.subplots(1, 2, figsize=(40, 15))
plt.tight_layout(pad=1.5)
for stat_ax in stat_axs:
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
binned_measure = 'distance to short wall (cm)'
value = 'rate change\n(euclidean)'
df = PopulationVectorChangeRate.get_dataframe(all_recordings)
ValueByBinnedDistancePlot.bin_distance_values(df, binned_measure,
FieldDensity.bin_size)
df.drop(index=df.loc[df[binned_measure] > FieldDensity.max_bin_center].index, inplace=True)
df_alonglongwall = filter_dataframe_by_direction(
df.copy(deep=True), 'alonglongwall', section_width='quadrants'
)
df_alongshortwall = filter_dataframe_by_direction(
df.copy(deep=True), 'alongshortwall', section_width='quadrants'
)
ylabels = ('population vector change\n' + r'in runs $\bf\bot$ to short wall (Hz/cm)',
'population vector change\n' + r'in runs $\bf\parallel$ to short wall (Hz/cm)')
direction_rules = ('orthogonal_to_short_wall', 'parallel_to_short_wall')
all_axs = []
for df_tmp, direction_rule, ylabel, ax, stat_ax in zip((df_alonglongwall, df_alongshortwall),
direction_rules,
ylabels, axs, stat_axs):
all_axs += FiringRateChangeAll.plot_field_change_by_distance_to_wall(
df_tmp, value, binned_measure, direction_rule, ylabel, ax, stat_ax
)
ylim = get_max_ylim(all_axs)
for ax in all_axs:
ax.set_ylim(ylim)
return fig, stat_fig
@staticmethod
def write(fpath, all_recordings, prefix='', verbose=True):
figure_name = prefix + 'FiringRateChangeAll'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = FiringRateChangeAll.make_figure(all_recordings)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.svg'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
class FiringRateChangeAndTheta(object):
@staticmethod
def binned_values(values, quantiles=10):
return np.array(list(map(lambda c: float(c.mid.mean()), pd.qcut(values, quantiles))))
@staticmethod
def plot_single_relation(df, x_axis_column, y_axis_column, covar_column, ax, stat_ax):
df = df.copy(deep=True)
df[x_axis_column] = FiringRateChangeAndTheta.binned_values(df[x_axis_column])
df.loc[df[x_axis_column] == df[x_axis_column].min(), x_axis_column] = np.nan
df.loc[df[x_axis_column] == df[x_axis_column].max(), x_axis_column] = np.nan
df = df.dropna()
dfg = df.groupby(['animal', x_axis_column])[[y_axis_column, covar_column]].mean().reset_index()
sns.scatterplot(
data=dfg,
x=x_axis_column,
y=y_axis_column,
hue='animal',
ax=ax)
pcorr_stats = partial_corr(
data=dfg,
x=x_axis_column,
y=y_axis_column,
covar=covar_column
)
ax.set_title('r = {:.3f} | p = {:e}'.format(pcorr_stats.loc['pearson', 'r'],
pcorr_stats.loc['pearson', 'p-val']))
table_cell_text = []
table_cell_text.append(['x', x_axis_column])
table_cell_text.append(['', ''])
table_cell_text.append(['', ''])
table_cell_text.append(['y', y_axis_column])
table_cell_text.append(['', ''])
table_cell_text.append(['', ''])
table_cell_text.append(['covar', covar_column])
table_cell_text.append(['', ''])
table_cell_text.append(['', ''])
for stat_column in pcorr_stats.columns:
table_cell_text.append([stat_column, str(pcorr_stats.loc['pearson', stat_column])])
table_cell_text.append(['', ''])
stat_ax.table(cellText=table_cell_text, cellLoc='left', loc='upper left', edges='open')
@staticmethod
def normalise_values_per_animal(df, columns):
for column in columns:
for animal in df['animal'].unique():
idx = df['animal'] == animal
df.loc[idx, column] = zscore(df.loc[idx, column])
@staticmethod
def plot(df, axs, stat_axs):
df.rename(columns={'rate change\n(euclidean)': 'population activity change rate (z score)',
'theta_frequency': 'theta frequency (z score)',
'running_speed': 'running speed (z score)'},
inplace=True)
FiringRateChangeAndTheta.normalise_values_per_animal(df, ['theta frequency (z score)', 'running speed (z score)',
'population activity change rate (z score)'])
FiringRateChangeAndTheta.plot_single_relation(df, 'theta frequency (z score)', 'running speed (z score)',
'population activity change rate (z score)',
axs[0], stat_axs[0])
FiringRateChangeAndTheta.plot_single_relation(df, 'population activity change rate (z score)',
'theta frequency (z score)', 'running speed (z score)',
axs[1], stat_axs[1])
@staticmethod
def make_figure(fpath):
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
plt.subplots_adjust(left=0.15, bottom=0.15, right=0.98, top=0.92, wspace=0.4, hspace=0.4)
stat_fig, stat_axs = plt.subplots(1, 2, figsize=(14, 8))
plt.tight_layout(pad=1.5)
for stat_ax in stat_axs.flatten():
stat_ax.set_xticks([], [])
stat_ax.set_yticks([], [])
FiringRateChangeAndTheta.plot(
pd.read_pickle(construct_df_population_vector_change_file_path(fpath)), axs, stat_axs
)
return fig, stat_fig
@staticmethod
def write(fpath, prefix='', verbose=True):
figure_name = prefix + 'FiringRateChangeAndTheta'
if verbose:
print('Writing Figure {}'.format(figure_name))
sns.set(context='paper', style='ticks', palette='muted', font_scale=seaborn_font_scale)
fig, stat_fig = FiringRateChangeAndTheta.make_figure(fpath)
fig.savefig(os.path.join(paper_figures_path(fpath), '{}.png'.format(figure_name)))
stat_fig.savefig(os.path.join(paper_figures_path(fpath), '{}_stats.png'.format(figure_name)))
plt.close(fig)
plt.close(stat_fig)
if verbose:
print('Writing Figure {} Done.'.format(figure_name))
def print_field_count_per_cell_correlation_with_clustering_quality(df_units, df_fields):
df = df_fields.loc[df_fields['experiment_id'] == 'exp_scales_d',
['animal', 'animal_unit', 'area']].copy(deep=True)
df = df.merge(df_units[['animal', 'animal_unit', 'category']].copy(deep=True),
how='left', on=['animal', 'animal_unit'])
df = df.loc[df['category'] == 'place_cell', ['animal', 'animal_unit', 'area']] # Only keep place cell fields
df['count'] = 1
df = df.groupby(['animal', 'animal_unit'])['count', 'area'].sum().reset_index()
df['mean_area'] = df['area'] / df['count']
df = df.merge(df_units[['animal', 'animal_unit', 'isolation_distance', 'l_ratio']],
on=['animal', 'animal_unit'],
how='left')
df = df.dropna()
for correlate in ('count', 'mean_area'):
print()
print('Comparing {} correlation with clustering quality measures'.format(correlate))
for measure in ('isolation_distance', 'l_ratio'):
print()
print('Clustering quality measure: {}'.format(measure))
print('Across animals correlation of {} to field {}: r={:.3f} p={:.6f}'.format(measure, correlate,
*pearsonr(df[measure],
df[correlate])))
for animal in df['animal'].unique():
idx = df['animal'] == animal
print(
'Animal {} correlation of {} to field {}: r={:.3f} p={:.6f} | total units = {} | {} per unit {:.2f}'.format(
animal, measure, correlate, *pearsonr(df.loc[idx, measure], df.loc[idx, correlate]), np.sum(idx),
correlate, np.mean(df.loc[idx, correlate])))
def load_data_preprocessed_if_available(fpath, recompute=False, verbose=False):
# This ensures all possible pre-processing is completed before loading data
# If pre-processing has not been run on the data yet, this step is very slow
# and requires large amounts of CPU memory to run.
# Preferably, the file barrylab_ephys_analysis/scripts/exp_scales/paper_preprocess.py
# would be run as a script with the same input as this file on a powerful machine.
# The script will compute and save the computationally expensive parts of the analysis
# to the NWB files. If this is done before launching barrylab_ephys_analysis/scripts/exp_scales/paper_figures.py
# then the following line will purely do some verification that computations have completed.
preprocess_and_save_all_animals(fpath, recompute=recompute, verbose=verbose)
print('Preprocessing complete for all animals.')
# Load data from all animals into memory
all_recordings = load.load_recordings_of_all_animals(
fpath, Params.animal_ids, continuous_data_type=None, no_waveforms=True,
clustering_name=Params.clustering_name, verbose=verbose
)
# Load pre-processing results to memory
for recordings in all_recordings:
recordings.load_analysis(ignore=('waveform_properties',))
return all_recordings
def get_full_df_units(all_recordings):
return pd.concat([recordings.df_units for recordings in all_recordings], ignore_index=True)
def get_full_df_fields(all_recordings):
return pd.concat([recordings.df_fields for recordings in all_recordings], ignore_index=True)
def link_df_units_and_df_fields_with_common_unit(df_units, df_fields):
df_units_tmp = df_units[['animal', 'animal_unit']].copy()
df_units_tmp['unit'] = list(range(df_units_tmp.shape[0]))
df_merge = pd.merge(left=df_units_tmp, right=df_fields[['animal', 'animal_unit']],
on=['animal', 'animal_unit'])
df_fields.insert(0, 'unit', df_merge['unit'])
def main(fpath):
all_recordings = load_data_preprocessed_if_available(fpath, recompute=False, verbose=True)
# Rename experiment name in last recording so it would not have the same as the first
for recordings in all_recordings:
snippets.rename_last_recording_a2(recordings)
for recordings in all_recordings:
create_df_fields_for_recordings(recordings)
for recordings in all_recordings:
create_unit_data_frames_for_recordings(recordings)
df_fields = get_full_df_fields(all_recordings)
df_units = get_full_df_units(all_recordings)
link_df_units_and_df_fields_with_common_unit(df_units, df_fields)
df_fields.to_pickle(os.path.join(fpath, Params.analysis_path, 'df_fields.p'))
df_units.to_pickle(os.path.join(fpath, Params.analysis_path, 'df_units.p'))
with open(os.path.join(fpath, 'Analysis', 'all_recordings.p'), 'wb') as pfile:
pickle.dump(all_recordings, pfile)
# # Use this instead if data has already been loaded once
# with open(os.path.join(fpath, 'Analysis', 'all_recordings.p'), 'rb') as pfile:
# all_recordings = pickle.load(pfile)
#
# df_units = pd.read_pickle(os.path.join(fpath, 'Analysis', 'df_units.p'))
# df_fields = pd.read_pickle(os.path.join(fpath, 'Analysis', 'df_fields.p'))
# Compute and write figures
ExampleUnit.write(fpath, all_recordings, df_units, prefix='Figure_1_')
FieldDetectionMethod.write(fpath, all_recordings, df_units, prefix='Figure_1_sup_2_')
IntraTrialCorrelations.write(fpath, all_recordings, df_units, df_fields, prefix='Figure_1_sup_3_')
PlaceCellAndFieldCounts.write(fpath, df_units, df_fields, prefix='Figure_2AB_')
FieldsPerCellAcrossEnvironmentsSimple.write(fpath, df_units, df_fields, prefix='Figure_2C_')
Remapping.write(fpath, all_recordings, prefix='Figure_2_sup_1_')
environment_field_density_model_parameters = \
FieldsDetectedAcrossEnvironments.write(fpath, df_units, df_fields, prefix='Figure_2E_')
ConservationOfFieldFormationPropensity.write(fpath, df_units, df_fields,
environment_field_density_model_parameters, prefix='Figure_2_sup_2_')
gamma_model_fit = \
FieldsPerCellAcrossEnvironments.write(fpath, df_units, df_fields, environment_field_density_model_parameters,
prefix='Figure_2_sup_3_')
PlaceCellsDetectedAcrossEnvironments.write(fpath, df_units, df_fields,
environment_field_density_model_parameters, gamma_model_fit,
prefix='Figure_2D_')
FieldDensity.write(fpath, df_units, df_fields, prefix='Figure_3A_')
FieldSize.write(fpath, all_recordings, df_units, df_fields, prefix='Figure_3B_')
FieldWidth.write(fpath, all_recordings, df_units, df_fields, prefix='Figure_3CD_')
AverageActivity.write(fpath, all_recordings, prefix='Figure_4AB_')
FiringRateDistribution.write(fpath, all_recordings, prefix='Figure_4C_')
FieldAreaDistribution.write(fpath, df_units, df_fields, prefix='Figure_4D_')
FieldDensityByDwell.write(fpath, all_recordings, df_units, df_fields, prefix='Figure_3_sup_1_')
FieldWidthAll.write(fpath, all_recordings, df_units, df_fields, prefix='Figure_3_sup_2_')
AverageActivityAll.write(fpath, all_recordings, df_units, df_fields, prefix='Figure_4_sup_1_')
InterneuronMeanRate.write(fpath, all_recordings, prefix='Figure_4_sup_2_')
FiringRateChange.write(fpath, all_recordings, df_units, prefix='Figure_5AB_')
FiringRateChangeAll.write(fpath, all_recordings, prefix='Figure_5_sup_1_')
FiringRateChangeAndTheta.write(fpath, prefix='Figure_R1_')
print_field_count_per_cell_correlation_with_clustering_quality(df_units, df_fields)
if __name__ == '__main__':
main(sys.argv[1])
| 46.053519
| 128
| 0.623237
|
51522e8d4ee8f3c8fbfad8d0a1b2f0b9bb3a221d
| 3,602
|
py
|
Python
|
src/interactive_conditional_samples.py
|
MasayukiTanaka0412/SpeechGen
|
41d443fb5cdec907c26a7ccc44e8b846110904aa
|
[
"MIT"
] | null | null | null |
src/interactive_conditional_samples.py
|
MasayukiTanaka0412/SpeechGen
|
41d443fb5cdec907c26a7ccc44e8b846110904aa
|
[
"MIT"
] | null | null | null |
src/interactive_conditional_samples.py
|
MasayukiTanaka0412/SpeechGen
|
41d443fb5cdec907c26a7ccc44e8b846110904aa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import fire
import json
import os
import numpy as np
import tensorflow as tf
import model, sample, encoder
"""
def interact_model(
model_name='124M',
seed=None,
nsamples=1,
batch_size=1,
length=None,
temperature=1,
top_k=0,
top_p=1,
models_dir='models',
):
"""
def interact_model(
model_name='345M',
seed=None,
nsamples=3,
batch_size=3,
length=100,
temperature=1,
top_k=40,
top_p=1,
models_dir='models',
):
"""
Interactively run the model
:model_name=124M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:nsamples=1 : Number of samples to return total
:batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:models_dir : path to parent folder containing model subfolders
(i.e. contains the <model_name> folder)
"""
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
if batch_size is None:
batch_size = 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx // 2
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
with tf.Session(graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))
saver.restore(sess, ckpt)
while True:
raw_text = input("Model prompt >>> ")
while not raw_text:
print('Prompt should not be empty!')
raw_text = input("Model prompt >>> ")
context_tokens = enc.encode(raw_text)
generated = 0
for _ in range(nsamples // batch_size):
out = sess.run(output, feed_dict={
context: [context_tokens for _ in range(batch_size)]
})[:, len(context_tokens):]
for i in range(batch_size):
generated += 1
text = enc.decode(out[i])
print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
print(text)
print("=" * 80)
if __name__ == '__main__':
fire.Fire(interact_model)
| 33.981132
| 89
| 0.631594
|
8444465215a99aa596cfc8ea819c21383948184d
| 26,395
|
py
|
Python
|
partialflow/manager.py
|
jakob-bauer/partialflow
|
bea1b46ca66fb5c10aefbcd1570aff922a903118
|
[
"MIT"
] | 3
|
2017-02-03T15:59:10.000Z
|
2020-05-23T07:26:10.000Z
|
partialflow/manager.py
|
jakob-bauer/partialflow
|
bea1b46ca66fb5c10aefbcd1570aff922a903118
|
[
"MIT"
] | null | null | null |
partialflow/manager.py
|
jakob-bauer/partialflow
|
bea1b46ca66fb5c10aefbcd1570aff922a903118
|
[
"MIT"
] | 1
|
2018-08-02T02:16:34.000Z
|
2018-08-02T02:16:34.000Z
|
import tensorflow as tf
from .sections import GraphSection, section_from_name
from tensorflow.python.client.session import _FetchMapper
from .utils import VerboseTimer
from contextlib import ExitStack
def _flatten_list(nested_list):
"""
Given a nested list, returns flat list of all its elements
:param nested_list: nested list
:return: flat list
"""
if not isinstance(nested_list, list):
return [nested_list]
res = []
for sub_list in nested_list:
res += _flatten_list(sub_list)
return res
class GraphSectionManager(object):
"""
Optimizes a graph with GraphSections by running partial backward passes.
This reduces the memory consumption at the expense of an additional forward pass and multiple data transfers
between GPU and main memory.
"""
def __init__(self, graph=None):
"""
Constructor
:param graph: graph that is split into sections, defaults to tf.get_default_graph()
"""
self._graph = tf.get_default_graph() if graph is None else graph
self._sections = []
self._cache = {}
# which tensors to cache during full forward pass
self._tensors_to_cache_in_fwd = []
# caches for request op information
self._req_input_tensors = {} # requested op -> [list of input tensors]
self._req_eval_sections = {} # requested op -> section to evaluate in (or None for forward pass)
self._req_reduced_inputs = {} # requested op -> [list of input tensors minus those from eval_section]
####################################################################################################################
# Section handling
####################################################################################################################
def new_section(self):
"""
Constructs a new graph section and adds it to the manager.
:return: newly created section
"""
section = GraphSection(graph=self._graph, manager=self)
return section
def add_section(self, section):
"""
Adds a GraphSection to the list of managed sections.
:param section: instance of GraphSection
:return: index of added section
"""
self._sections.append(section)
return len(self._sections) - 1
def get_sections(self):
"""
Returns a list of the sections
Sections are ordered in the way there were added.
:return: list of sections
"""
return self._sections
####################################################################################################################
# Methods to prepare the graph for training
####################################################################################################################
def add_training_ops(self, optimizer, loss=None, var_list=None, grads=None, global_step=None, verbose=False,
summaries=None):
"""
Constructs a training operation for each section. If `grads` is not given, it is computed by
grads = optimizer.compute_gradients(loss, var_list)
Each section's training operations applies the gradients of the section's variables and runs all operations in
the section's GraphKeys.UPDATE_OPS collection. All variables are assumed to be contained in the section's
GLOBAL_VARIABLES collection.
:param optimizer: tensorflow optimizer to use
:param loss: loss tensor to optimize
:param var_list: variable list to compute gradients on
:param grads: gradients as returned by optimizer.compute_gradients, alternative to loss and var_list
:param global_step: global step tensor to increment after full backward pass
:param verbose: if True, adds tf.Print operations to log backward passes over sections
:param summaries: optional list of collections to add gradient histogram summaries to. Defaults to None
"""
# add gradient computation nodes for all trainable variables
if grads is None:
assert loss is not None, 'Either gradients or loss have to be given.'
grads = optimizer.compute_gradients(loss, var_list=var_list)
# store references to gradient ops for simple access
grad_dict = {v: g for g, v in grads}
for s, section in reversed(list(enumerate(self.get_sections()))):
# construct gradient application
xs = section.get_collection(tf.GraphKeys.GLOBAL_VARIABLES).intersection(grad_dict.keys())
apply_op = None
if len(xs) > 0:
cur_grads = [(grad_dict[v], v) for v in xs]
if summaries is not None:
for v in xs:
tf.summary.histogram('gradients/%s' + v.name, grad_dict[v], collections=summaries)
# if we should be verbose, log backward passes
if verbose:
cur_grads[0] = (tf.Print(cur_grads[0][0], [cur_grads[0][0]],
'Running backward pass on section %d' % s), cur_grads[0][1])
# only increment global step in last partial backward pass
apply_op = optimizer.apply_gradients(cur_grads, global_step=global_step if s == 0 else None)
print("Found %d gradient application operations in section %d. Adding to training op."
% (len(cur_grads), s))
# no gradients to apply
else:
print("Section %d does not contain gradient application operations." % s)
# in last section's backward pass -> need to increment global step separately
if s == 0 and global_step is not None:
apply_op = tf.assign_add(global_step, 1)
# group update operations
update_op = None
update_ops = section.get_collection(tf.GraphKeys.UPDATE_OPS)
if len(update_ops) > 0:
print("Found %d update operations in section %d. Adding to training op." % (len(update_ops), s))
update_op = tf.group(*update_ops)
else:
print("Section %d does not contain update operations." % s)
# construct final training operation
if apply_op is not None and update_op is not None:
train_op = tf.group(apply_op, update_op)
elif apply_op is not None:
train_op = apply_op
elif update_op is not None:
train_op = update_op
else:
train_op = tf.no_op()
section.set_training_op(train_op)
def prepare_training(self):
"""
Prepares the partial training by computing metadata about sections and creating training operations.
Should be run after the full construction of the graph, including training ops.
"""
assert len(self.get_sections()) > 0, 'There has to be at least one GraphSection in this graph.'
# compute list of unique tensors to cache in forward pass
self._tensors_to_cache_in_fwd = list(set([t for s in self.get_sections() for t in s.get_incoming()]))
# compute tensors to feed into backward passes
all_tensors_to_feed = set()
for s, section in reversed(list(enumerate(self.get_sections()))):
# find list of tensors to feed into training operation
tensors_to_feed = self._find_feeds_from_other_sections(section.get_training_op(),
ignore=[section], given=section.get_incoming())
# store info in section
section.set_tensors_to_feed([t[0] for t in tensors_to_feed])
all_tensors_to_feed.update(tensors_to_feed)
# tell sections to cache those tensors needed by other sections
self._mark_tensors_for_caching(all_tensors_to_feed, only_next_run=False)
def _mark_tensors_for_caching(self, tensors, only_next_run=False):
"""
Stores which tensors to cache in each section's backward pass.
:param tensors: list of tuples as output by _find_feeds_from_other_sections
:param only_next_run: if True, only store these temporarily for the next run
"""
for s, section in list(enumerate(self.get_sections())):
to_cache = [t[0] for t in tensors if t[1] == section]
section.add_tensors_to_cache(to_cache, only_next_run)
####################################################################################################################
# Methods for running the graph forward or backward, full or section-wise
####################################################################################################################
def run_forward(self, sess, fetches=None, basic_feed=None):
"""
Runs a forward pass over all sections, clears cache, and caches intermediate results for backward passes.
Should not be used to request tensors for which gradients need to be computed! Otherwise potential OOM
:param sess: session to run in
:param fetches: list of tensors to fetch during forward pass, e.g. loss
:param basic_feed:
:return results for fetched tensors
"""
if fetches is None:
fetches = []
if basic_feed is None:
basic_feed = {}
cache_values, results = sess.run([self._tensors_to_cache_in_fwd, fetches], basic_feed)
# clear cache and store intermediate results of forward pass
self._cache = basic_feed.copy()
for v, k in zip(cache_values, self._tensors_to_cache_in_fwd):
self._cache[k] = v
return results
def run_backward(self, sess, fetches=None, verbose_timing=False):
"""
Runs section-wise training pass over the graph, caches intermediate results as defined by sections.
:param sess: session to run in
:param fetches: list of fetches for each section.
Tensors in i-th sub-list are fetched in backward pass of i-th section
:param verbose_timing: if True, time forward and backward passes verbosely
:return: list of results, same structure as `fetches`
"""
results = []
if fetches is None:
fetches = [[] for _ in self.get_sections()]
timer = VerboseTimer if verbose_timing else lambda _: ExitStack()
for s, section in reversed(list(enumerate(self.get_sections()))):
# cache intermediate results to be used in other sections
tensors_to_cache = list(section.get_tensors_to_cache())
# construct feed dictionary
feed = {}
for t in list(section.get_tensors_to_feed()):
feed[t] = self._cache[t]
request = fetches[s] if len(fetches) > s else []
tensors_to_compute = [section.get_training_op(), tensors_to_cache, request]
with timer('backward on section %d' % s):
_, cache_vals, result = sess.run(tensors_to_compute, feed)
results.append(result)
# store all computed values in cache
for i in range(len(tensors_to_cache)):
self._cache[tensors_to_cache[i]] = cache_vals[i]
results.reverse()
return results
def run_full_cycle(self, sess, fetches=None, basic_feed=None, verbose_timing=False):
"""
Runs forward and backward pass through the graph and fetches results, similar to session.run().
Mimics tensorflow's session.run for structure of fetches and returned values.
:param sess: session to run in
:param fetches: arbitrarily nested structure of graph elements to fetch
:param basic_feed: dictionary of tensors/placeholders and values to feed into the graph
:param verbose_timing: if True, time forward and backward passes verbosely
:return: resulting values for fetches
"""
if fetches is None:
fetches = []
if basic_feed is None:
basic_feed = {}
timer = VerboseTimer if verbose_timing else lambda _: ExitStack()
# multiple GraphSections -> train step-wise
if len(self.get_sections()) > 1:
# for all requested tensors, find sections in which they are computed
with timer('split fetches'):
unique_fetches, fwd_requests, bwd_requests, fetch_mapper = self._split_requests_for_sections(fetches)
# run cycle
with timer('forward'):
fwd_values = self.run_forward(sess, fwd_requests, basic_feed=basic_feed)
with timer('backward'):
bwd_values = self.run_backward(sess, bwd_requests, verbose_timing=verbose_timing)
with timer('post cycle'):
# reconstruct output
flat_requests = _flatten_list([fwd_requests, bwd_requests])
flat_values = _flatten_list([fwd_values, bwd_values])
req_val = list(zip(flat_requests, flat_values))
values = [e[1] for fetch in unique_fetches for e in req_val if e[0] == fetch]
results = fetch_mapper.build_results(values)
# clean intermediate cache fetches
for section in self.get_sections():
section.cleanup_after_cycle()
# only a single GraphSection (no real splits) -> fall back to default training in one go
else:
train_op = self.get_sections()[0].get_training_op()
_, results = sess.run([train_op, fetches], basic_feed)
return results
def _split_requests_for_sections(self, fetches):
"""
Internal helper function that assigns each fetch to a specific part of the graph evaluation.
:param fetches: arbitrarily nested structure of graph elements
:return: list of unique fetches, list of requests for forward pass,
list ob sub-lists with requests for backward passes, fetch_mapper to reconstruct result
"""
# TODO: avoid using a tf core class here
fetch_mapper = _FetchMapper.for_fetch(fetches)
unique_fetches = fetch_mapper.unique_fetches()
sections = []
all_input_tensors = set()
forward_fetches = []
backward_fetches = [[] for _ in self.get_sections()]
for fetch in unique_fetches:
fetch_op = fetch if isinstance(fetch, tf.Operation) else fetch.op
section = self._get_op_section(fetch_op)
sections.append(section)
# check cache for pre-computed input tensors
if fetch_op in self._req_input_tensors:
input_tensors = self._req_input_tensors[fetch_op]
# not cached, compute and store
else:
input_tensors = self._find_feeds_from_other_sections(fetch_op)
self._req_input_tensors[fetch_op] = input_tensors
# fetch independent from all sections? -> evaluate in first general forward pass
if section is None and len(input_tensors) == 0:
forward_fetches.append(fetch)
# print('will evaluate ', fetch, 'in forward pass')
# fetch depends on at least one section
else:
# not yet cached
if fetch_op not in self._req_eval_sections:
# fetch is part of a section? -> evaluate in backward pass of this section (includes forward pass)
if section is not None:
eval_section = section
# fetch is not part of any section
elif section is None:
# select the last section this fetch depends on, in order of backward pass
eval_section = min([a for t, a in input_tensors], key=lambda a: a.get_index())
# remove input tensors inside this section, since they are evaluated anyway
input_tensors = self._find_feeds_from_other_sections(fetch_op, ignore=[eval_section])
# print('will evaluate ', fetch, 'in backward pass of section %d' % eval_section.get_index())
# cache infos
self._req_eval_sections[fetch_op] = eval_section
self._req_reduced_inputs[fetch_op] = input_tensors
# load data from cache
else:
eval_section = self._req_eval_sections[fetch_op]
input_tensors = self._req_reduced_inputs[fetch_op]
# fetch from backward pass of determined section
backward_fetches[eval_section.get_index()].append(fetch)
# needs input from other sections
if len(input_tensors) > 0:
eval_section.add_tensors_to_feed([t[0] for t in input_tensors], only_next_run=True)
all_input_tensors.update(input_tensors)
# store which tensors need to be cached
# print('will cache', all_input_tensors)
self._mark_tensors_for_caching(all_input_tensors, only_next_run=True)
return unique_fetches, forward_fetches, backward_fetches, fetch_mapper
####################################################################################################################
# Methods for analyzing the graph
####################################################################################################################
def _get_op_section(self, op):
"""
Returns the section which a given op resides in
:param op: tf.Operation in question
:return: GraphSection object or None if not in a section
"""
if hasattr(op, 'graph_section'):
return op.graph_section
# backward pass ops are tracked using name scopes
m = section_from_name.match(op.name)
if not m:
return None
else:
return self.get_sections()[int(m.group(2))]
def _find_feeds_from_other_sections(self, op_or_t, ignore=None, given=None):
"""
Traverses the graph to find tensors that need to be fed into an evaluation of a given operation or tensor.
Only considers tensors that are computed by operations outside of `op_or_t`'s section (if any) and
outside of the `ignore`d sections. Assumes that operations outside all sections are not cache-able.
Example:
We want to know which tensors are needed by a training operation of section S. Because
the training includes a forward and backward pass of S anyway, we can ignore all tensors computed in S
and only need to consider inputs from other sections. Since the training operation is typically not
contained in the section though, one needs to specify which section to ignore.
In a simple feed-forward network, this method would then return the output tensors of the predecessor
section and the gradient tensors of the successor section.
Since gradient information is often aggregated outside of sections, caching the immediate (non-aggregated)
outputs of a section may be suboptimal. The method therefore enriches the graph by permanently merging such
aggregation operations into the corresponding section if possible. Note that this is a heuristic based on the
assumption that the output of an operation is usually smaller than the set of its inputs. It is therefore not
guaranteed to always yield optimal results, i.e. smallest cache volumes.
:param op_or_t: operation or tensor to evaluate
:param ignore: list of sections whose operations are assumed to be non-cache-able, i.e. are computed anyway
:param given: list of tensors that are assumed to be given to be fed in the evaluation
:return: list of (tensor, section) tuples:
- tensor is the tensor to be cached
- section is the GraphSection whose backward pass it can be cached from, or -1 if tensor is `given`
"""
def _remove_tensors_by_section(tensors, section):
return filter(lambda t: t[1] != section, tensors)
def _clear_graph_info():
for op in self._graph.get_operations():
if hasattr(op, 'section_deps'):
del op.section_deps
# buffer initialization
if given is None:
given = set()
if ignore is None:
ignore = []
ignore += [None] # ops outside of sections are never cached
def _recursion_helper(op_or_t):
"""
This method enriches the graph with temporary information in the `section_deps` attribute of operations.
The graph should be cleaned using _clean_graph_info() before running again, otherwise the results
might be incorrect.
:param op_or_t: current operation or tensor in question
:return: tuple of two lists. First list is in same format as parent method output, second list contains all
sections the op_or_t depends on (projection of first list to second argument in each tuple)
"""
# if tensor is given, convert to corresponding op
op = op_or_t.op if isinstance(op_or_t, tf.Tensor) else op_or_t
origin_section = self._get_op_section(op)
depends_on_tensors = set()
depends_on_sections = set()
# we have already been here before -> just return cached results
if hasattr(op, 'section_deps') and set(op.section_deps[1]).isdisjoint(ignore):
return op.section_deps
# mark this node as visited to avoid loops
op.section_deps = ([], [])
# check all the input tensors to this op
for t in op.inputs:
is_leaf = False
input_dep_tensors = set()
input_dep_sections = set()
# which section does the current tensor belong to?
cur_section = self._get_op_section(t.op)
# this is a variable? -> no dependency
if t.op.op_def is not None and t.op.op_def.name == 'Variable':
continue
# this tensor is given -> add to dependencies and continue to next
if t in given:
is_leaf = True
cur_section = -1 # section dependency unclear since expected to be given
# this tensor is computed in a different section that is not ignored
# i.e., we are at a leaf -> add to results and stop recursion
elif cur_section != origin_section and cur_section not in ignore:
is_leaf = True
# this tensor is computed in same section or in ignored section -> recursion
else:
# compute all dependencies for the current input tensor
input_dep_tensors, input_dep_sections = _recursion_helper(t.op)
# we do in no case depend on our own section (since this is assumed anyway)
input_dep_tensors = _remove_tensors_by_section(input_dep_tensors, origin_section)
if origin_section in input_dep_sections:
input_dep_sections.remove(origin_section)
# if all dependencies belong to the same section and the current op is outside all sections,
# we might be able to aggregate the inputs and only cache the output tensor
if len(input_dep_sections) == 1 and cur_section is None \
and -1 not in input_dep_sections: # do not merge dependencies on given inputs
# check if we are at the border of some ignored section
# in this case, we may not count our current tensor as belonging to the section it depends on,
# since we traversed some ignored section on the way.
immediate_input_sections = set([self._get_op_section(o)
for o in list(t.op.inputs) + t.op.control_inputs])
traversed_other_section = not immediate_input_sections.issubset(input_dep_sections)
# we can merge this tensor's op into its input section
if not traversed_other_section:
cur_section = list(input_dep_sections)[0]
t.op.graph_section = cur_section
# ignore all other ops in the tensor's subtree
is_leaf = True
# this tensor is a leaf (or should be handled as one)
# add to dependencies for current op
if is_leaf:
depends_on_tensors.add((t, cur_section))
depends_on_sections.add(cur_section)
# other dependencies (i.e. either multiple sections or None)
# -> we inherit those
else:
depends_on_tensors.update(input_dep_tensors)
depends_on_sections.update(input_dep_sections)
# recursion on control inputs
for dep in op.control_inputs:
input_dep_tensors, input_dep_sections = _recursion_helper(dep)
input_dep_tensors = _remove_tensors_by_section(input_dep_tensors, origin_section)
if origin_section in input_dep_sections:
input_dep_sections.remove(origin_section)
depends_on_tensors.update(input_dep_tensors)
depends_on_sections.update(input_dep_sections)
# store which tensors this op depends on
op.section_deps = (list(depends_on_tensors), list(depends_on_sections))
return op.section_deps
# compute result and clear temporary graph information afterwards
result = _recursion_helper(op_or_t)
_clear_graph_info()
return result[0]
| 46.064572
| 120
| 0.60072
|
9a5e1c33def23c24c13beb24e49e268792b2e90e
| 2,634
|
py
|
Python
|
src/rpm2json/main.py
|
paul-blankenbaker/rpm2json
|
85af7ad818a599fb8cbcb30150d33dea26ad4944
|
[
"MIT"
] | null | null | null |
src/rpm2json/main.py
|
paul-blankenbaker/rpm2json
|
85af7ad818a599fb8cbcb30150d33dea26ad4944
|
[
"MIT"
] | null | null | null |
src/rpm2json/main.py
|
paul-blankenbaker/rpm2json
|
85af7ad818a599fb8cbcb30150d33dea26ad4944
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This is entry point for the console script. To enable/package this script,
add the following lines in the [options.entry_points] section in setup.cfg:
console_scripts =
rpm2json = rpm2json.main:run
Then run `python setup.py install` which will install the command `rpm2json`
inside your current environment.
"""
import argparse
import os
import rpm
import sys
import logging
from rpm2json import __version__
from rpm2json import rpmList
__author__ = "Paul Blankenbaker"
__copyright__ = "Paul Blankenbaker"
__license__ = "mit"
_logger = logging.getLogger(__name__)
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Generates JSON file(s) of information from RPMs")
parser.add_argument(
"--version",
action="version",
version="rpm2json {ver}".format(ver=__version__))
parser.add_argument(
"--dir",
required=True,
help="Directory where the RPM repository lives")
parser.add_argument(
"--outdir",
help="If you want the JSON files written to a different directory")
parser.add_argument(
"-v",
"--verbose",
dest="loglevel",
help="set loglevel to INFO",
action="store_const",
const=logging.INFO)
parser.add_argument(
"-vv",
"--very-verbose",
dest="loglevel",
help="set loglevel to DEBUG",
action="store_const",
const=logging.DEBUG)
return parser.parse_args(args)
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(level=loglevel, stream=sys.stdout,
format=logformat, datefmt="%Y-%m-%d %H:%M:%S")
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
setup_logging(args.loglevel)
#_logger.debug("Starting crazy calculations...")
if (args.dir != None):
#rpm.addMacro('_dpath', args.dir)
outdir = args.outdir
if outdir == None:
outdir = os.path.join(args.dir, "json")
rpmList(args.dir, outdir)
def run():
"""Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
| 25.572816
| 76
| 0.636295
|
e2187b9173206955e0f19de4f1e22eba8fa05926
| 4,291
|
py
|
Python
|
StorageManager.py
|
TLIsolator/AppDevelopmentProject
|
4708e6eff315bf1a84cf98f8e32e2c36ca75c56d
|
[
"MIT"
] | null | null | null |
StorageManager.py
|
TLIsolator/AppDevelopmentProject
|
4708e6eff315bf1a84cf98f8e32e2c36ca75c56d
|
[
"MIT"
] | null | null | null |
StorageManager.py
|
TLIsolator/AppDevelopmentProject
|
4708e6eff315bf1a84cf98f8e32e2c36ca75c56d
|
[
"MIT"
] | null | null | null |
# Please read the instruction before using
# This will control all persistent storage (eg. Add, Delete, Modify)
# Note: Do not modify this before asking me (HF)
# HF
import shelve
class StorageManager():
def __init__(self):
# error checking
try:
self.__db = shelve.open('storage.db', 'r')
self.__db.close()
except Exception:
print("Storage not found")
self.delete_storage('TEMP')
def is_key_found(self, name):
keys = self.__db.keys()
if name in keys:
return True
else:
return False
def reset(self):
self.__db = shelve.open('storage.db', 'c')
keys = list(self.__db.keys())
for p in keys:
del self.__db[p]
self.__db.close()
def create_new_storage(self, name, items=None, dict=True):
self.__db = shelve.open('storage.db', 'c')
# items must be a dictionary or list
if(self.is_key_found(name) == False):
if items == None:
if dict == True:
self.__db[name] = {}
print("Created dictionary")
elif dict == False:
self.__db[name] = []
print("Created list")
else:
self.__db[name] = items
print("Created storage")
else:
print("existing name of storage found")
self.__db.close()
def delete_storage(self, name):
self.__db = shelve.open('storage.db', 'c')
if(self.is_key_found(name) == True):
del self.__db[name]
print("Deleted storage")
else:
print("no keys found with the given name")
self.__db.close()
def set_storage(self, name, item):
self.__db = shelve.open('storage.db', 'c')
if(self.is_key_found(name) == True):
self.__db[name] = item
print("modified storage")
else:
print("Unable to set item due to storage name not found")
self.__db.close()
def add_item(self, storage_name, key_to_use, item):
self.__db = shelve.open('storage.db', 'c')
if(self.is_key_found(storage_name) == True):
print("storage name found")
print(self.__db[storage_name])
if key_to_use in self.__db[storage_name].keys():
print("Key is in used")
print("ALL USERS: ")
print(self.__db[storage_name].keys())
else:
temp = self.__db[storage_name]
print("Key is not in used")
temp[key_to_use] = item
self.__db[storage_name] = temp
print("ALL USERS: ")
print(self.__db[storage_name].keys())
else:
print("Unable to set item due to storage name not found")
self.__db.close()
def get_storage(self, name, create=False, dict=False):
self.__db = shelve.open('storage.db', 'c')
if (self.is_key_found(name) == True):
temp = self.__db[name]
self.__db.close()
print("Storage found")
return temp
else:
print("storage name not found")
if create == True:
print("proceeds to create a new one")
if dict == True:
self.__db[name] = {}
print("Created dictionary")
else:
self.__db[name] = []
print("Created List")
self.__db.close()
def check_exist(self, name):
self.__db = shelve.open('storage.db', 'c')
if (self.is_key_found(name) == True):
self.__db.close()
return True
else:
self.__db.close()
return False
# TEST USE ONLY
def return_keys(self, name = None):
self.__db = shelve.open('storage.db', 'c')
if(name == None):
temp = list(self.__db.keys())
self.__db.close()
return temp
elif(name in list(self.__db.keys())):
temp = list(self.__db[name].keys())
self.__db.close()
return temp
else:
return None
| 28.606667
| 69
| 0.509205
|
affddf27700bee4e8cf132b94eb1c3cdfe6530a7
| 584
|
py
|
Python
|
rdmo/projects/migrations/0020_data_migration.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | 77
|
2016-08-09T11:40:20.000Z
|
2022-03-06T11:03:26.000Z
|
rdmo/projects/migrations/0020_data_migration.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | 377
|
2016-07-01T13:59:36.000Z
|
2022-03-30T13:53:19.000Z
|
rdmo/projects/migrations/0020_data_migration.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | 47
|
2016-06-23T11:32:19.000Z
|
2022-03-01T11:34:37.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def set_questionentity_is_collection(apps, schema_editor):
Value = apps.get_model('projects', 'Value')
for value in Value.objects.all():
if value.value_type == 'options':
value.value_type = 'option'
value.save()
class Migration(migrations.Migration):
dependencies = [
('projects', '0019_option'),
('questions', '0023_option'),
]
operations = [
migrations.RunPython(set_questionentity_is_collection),
]
| 21.62963
| 63
| 0.652397
|
d0bef93560c919012b6db27b208d172d9d5bfba2
| 84,646
|
py
|
Python
|
charmhelpers/contrib/openstack/utils.py
|
slashdd/charm-helpers
|
dc8b7ea0ecb6df30cac1e940f0e1b603c09c8906
|
[
"Apache-2.0"
] | null | null | null |
charmhelpers/contrib/openstack/utils.py
|
slashdd/charm-helpers
|
dc8b7ea0ecb6df30cac1e940f0e1b603c09c8906
|
[
"Apache-2.0"
] | null | null | null |
charmhelpers/contrib/openstack/utils.py
|
slashdd/charm-helpers
|
dc8b7ea0ecb6df30cac1e940f0e1b603c09c8906
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Common python helper functions used for OpenStack charms.
from collections import OrderedDict, namedtuple
from functools import wraps
import subprocess
import json
import operator
import os
import sys
import re
import itertools
import functools
import six
import traceback
import uuid
import yaml
from charmhelpers import deprecate
from charmhelpers.contrib.network import ip
from charmhelpers.core import decorators, unitdata
from charmhelpers.core.hookenv import (
WORKLOAD_STATES,
action_fail,
action_set,
config,
expected_peer_units,
expected_related_units,
log as juju_log,
charm_dir,
INFO,
ERROR,
metadata,
related_units,
relation_get,
relation_id,
relation_ids,
relation_set,
status_set,
hook_name,
application_version_set,
cached,
leader_set,
leader_get,
local_unit,
)
from charmhelpers.core.strutils import (
BasicStringComparator,
bool_from_string,
)
from charmhelpers.contrib.storage.linux.lvm import (
deactivate_lvm_volume_group,
is_lvm_physical_volume,
remove_lvm_physical_volume,
)
from charmhelpers.contrib.network.ip import (
get_ipv6_addr,
is_ipv6,
port_has_listener,
)
from charmhelpers.core.host import (
lsb_release,
mounts,
umount,
service_running,
service_pause,
service_resume,
service_stop,
service_start,
restart_on_change_helper,
)
from charmhelpers.fetch import (
apt_cache,
apt_install,
import_key as fetch_import_key,
add_source as fetch_add_source,
SourceConfigError,
GPGKeyError,
get_upstream_version,
filter_installed_packages,
filter_missing_packages,
ubuntu_apt_pkg as apt,
)
from charmhelpers.fetch.snap import (
snap_install,
snap_refresh,
valid_snap_channel,
)
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
from charmhelpers.contrib.openstack.exceptions import OSContextError
from charmhelpers.contrib.openstack.policyd import (
policyd_status_message_prefix,
POLICYD_CONFIG_NAME,
)
from charmhelpers.contrib.openstack.ha.utils import (
expect_ha,
)
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
'restricted main multiverse universe')
OPENSTACK_RELEASES = (
'diablo',
'essex',
'folsom',
'grizzly',
'havana',
'icehouse',
'juno',
'kilo',
'liberty',
'mitaka',
'newton',
'ocata',
'pike',
'queens',
'rocky',
'stein',
'train',
'ussuri',
'victoria',
)
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
('cosmic', 'rocky'),
('disco', 'stein'),
('eoan', 'train'),
('focal', 'ussuri'),
('groovy', 'victoria'),
])
OPENSTACK_CODENAMES = OrderedDict([
('2011.2', 'diablo'),
('2012.1', 'essex'),
('2012.2', 'folsom'),
('2013.1', 'grizzly'),
('2013.2', 'havana'),
('2014.1', 'icehouse'),
('2014.2', 'juno'),
('2015.1', 'kilo'),
('2015.2', 'liberty'),
('2016.1', 'mitaka'),
('2016.2', 'newton'),
('2017.1', 'ocata'),
('2017.2', 'pike'),
('2018.1', 'queens'),
('2018.2', 'rocky'),
('2019.1', 'stein'),
('2019.2', 'train'),
('2020.1', 'ussuri'),
('2020.2', 'victoria'),
])
# The ugly duckling - must list releases oldest to newest
SWIFT_CODENAMES = OrderedDict([
('diablo',
['1.4.3']),
('essex',
['1.4.8']),
('folsom',
['1.7.4']),
('grizzly',
['1.7.6', '1.7.7', '1.8.0']),
('havana',
['1.9.0', '1.9.1', '1.10.0']),
('icehouse',
['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
('juno',
['2.0.0', '2.1.0', '2.2.0']),
('kilo',
['2.2.1', '2.2.2']),
('liberty',
['2.3.0', '2.4.0', '2.5.0']),
('mitaka',
['2.5.0', '2.6.0', '2.7.0']),
('newton',
['2.8.0', '2.9.0', '2.10.0']),
('ocata',
['2.11.0', '2.12.0', '2.13.0']),
('pike',
['2.13.0', '2.15.0']),
('queens',
['2.16.0', '2.17.0']),
('rocky',
['2.18.0', '2.19.0']),
('stein',
['2.20.0', '2.21.0']),
('train',
['2.22.0', '2.23.0']),
('ussuri',
['2.24.0', '2.25.0']),
('victoria',
['2.25.0', '2.26.0']),
])
# >= Liberty version->codename mapping
PACKAGE_CODENAMES = {
'nova-common': OrderedDict([
('12', 'liberty'),
('13', 'mitaka'),
('14', 'newton'),
('15', 'ocata'),
('16', 'pike'),
('17', 'queens'),
('18', 'rocky'),
('19', 'stein'),
('20', 'train'),
('21', 'ussuri'),
('22', 'victoria'),
]),
'neutron-common': OrderedDict([
('7', 'liberty'),
('8', 'mitaka'),
('9', 'newton'),
('10', 'ocata'),
('11', 'pike'),
('12', 'queens'),
('13', 'rocky'),
('14', 'stein'),
('15', 'train'),
('16', 'ussuri'),
('17', 'victoria'),
]),
'cinder-common': OrderedDict([
('7', 'liberty'),
('8', 'mitaka'),
('9', 'newton'),
('10', 'ocata'),
('11', 'pike'),
('12', 'queens'),
('13', 'rocky'),
('14', 'stein'),
('15', 'train'),
('16', 'ussuri'),
('17', 'victoria'),
]),
'keystone': OrderedDict([
('8', 'liberty'),
('9', 'mitaka'),
('10', 'newton'),
('11', 'ocata'),
('12', 'pike'),
('13', 'queens'),
('14', 'rocky'),
('15', 'stein'),
('16', 'train'),
('17', 'ussuri'),
('18', 'victoria'),
]),
'horizon-common': OrderedDict([
('8', 'liberty'),
('9', 'mitaka'),
('10', 'newton'),
('11', 'ocata'),
('12', 'pike'),
('13', 'queens'),
('14', 'rocky'),
('15', 'stein'),
('16', 'train'),
('18', 'ussuri'),
('19', 'victoria'),
]),
'ceilometer-common': OrderedDict([
('5', 'liberty'),
('6', 'mitaka'),
('7', 'newton'),
('8', 'ocata'),
('9', 'pike'),
('10', 'queens'),
('11', 'rocky'),
('12', 'stein'),
('13', 'train'),
('14', 'ussuri'),
('15', 'victoria'),
]),
'heat-common': OrderedDict([
('5', 'liberty'),
('6', 'mitaka'),
('7', 'newton'),
('8', 'ocata'),
('9', 'pike'),
('10', 'queens'),
('11', 'rocky'),
('12', 'stein'),
('13', 'train'),
('14', 'ussuri'),
('15', 'victoria'),
]),
'glance-common': OrderedDict([
('11', 'liberty'),
('12', 'mitaka'),
('13', 'newton'),
('14', 'ocata'),
('15', 'pike'),
('16', 'queens'),
('17', 'rocky'),
('18', 'stein'),
('19', 'train'),
('20', 'ussuri'),
('21', 'victoria'),
]),
'openstack-dashboard': OrderedDict([
('8', 'liberty'),
('9', 'mitaka'),
('10', 'newton'),
('11', 'ocata'),
('12', 'pike'),
('13', 'queens'),
('14', 'rocky'),
('15', 'stein'),
('16', 'train'),
('18', 'ussuri'),
('19', 'victoria'),
]),
}
DEFAULT_LOOPBACK_SIZE = '5G'
DB_SERIES_UPGRADING_KEY = 'cluster-series-upgrading'
DB_MAINTENANCE_KEYS = [DB_SERIES_UPGRADING_KEY]
class CompareOpenStackReleases(BasicStringComparator):
"""Provide comparisons of OpenStack releases.
Use in the form of
if CompareOpenStackReleases(release) > 'mitaka':
# do something with mitaka
"""
_list = OPENSTACK_RELEASES
def error_out(msg):
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
sys.exit(1)
def get_installed_semantic_versioned_packages():
'''Get a list of installed packages which have OpenStack semantic versioning
:returns List of installed packages
:rtype: [pkg1, pkg2, ...]
'''
return filter_missing_packages(PACKAGE_CODENAMES.keys())
def get_os_codename_install_source(src):
'''Derive OpenStack release codename from a given installation source.'''
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = ''
if src is None:
return rel
if src in ['distro', 'distro-proposed', 'proposed']:
try:
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
except KeyError:
e = 'Could not derive openstack release for '\
'this Ubuntu release: %s' % ubuntu_rel
error_out(e)
return rel
if src.startswith('cloud:'):
ca_rel = src.split(':')[1]
ca_rel = ca_rel.split('-')[1].split('/')[0]
return ca_rel
# Best guess match based on deb string provided
if (src.startswith('deb') or
src.startswith('ppa') or
src.startswith('snap')):
for v in OPENSTACK_CODENAMES.values():
if v in src:
return v
def get_os_version_install_source(src):
codename = get_os_codename_install_source(src)
return get_os_version_codename(codename)
def get_os_codename_version(vers):
'''Determine OpenStack codename from version number.'''
try:
return OPENSTACK_CODENAMES[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
'''Determine OpenStack version number from codename.'''
for k, v in six.iteritems(version_map):
if v == codename:
return k
e = 'Could not derive OpenStack version for '\
'codename: %s' % codename
error_out(e)
def get_os_version_codename_swift(codename):
'''Determine OpenStack version number of swift from codename.'''
for k, v in six.iteritems(SWIFT_CODENAMES):
if k == codename:
return v[-1]
e = 'Could not derive swift version for '\
'codename: %s' % codename
error_out(e)
def get_swift_codename(version):
'''Determine OpenStack codename that corresponds to swift version.'''
codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
if len(codenames) > 1:
# If more than one release codename contains this version we determine
# the actual codename based on the highest available install source.
for codename in reversed(codenames):
releases = UBUNTU_OPENSTACK_RELEASE
release = [k for k, v in six.iteritems(releases) if codename in v]
ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
if six.PY3:
ret = ret.decode('UTF-8')
if codename in ret or release[0] in ret:
return codename
elif len(codenames) == 1:
return codenames[0]
# NOTE: fallback - attempt to match with just major.minor version
match = re.match(r'^(\d+)\.(\d+)', version)
if match:
major_minor_version = match.group(0)
for codename, versions in six.iteritems(SWIFT_CODENAMES):
for release_version in versions:
if release_version.startswith(major_minor_version):
return codename
return None
def get_os_codename_package(package, fatal=True):
"""Derive OpenStack release codename from an installed package.
Initially, see if the openstack-release pkg is available (by trying to
install it) and use it instead.
If it isn't then it falls back to the existing method of checking the
version of the package passed and then resolving the version from that
using lookup tables.
Note: if possible, charms should use get_installed_os_version() to
determine the version of the "openstack-release" pkg.
:param package: the package to test for version information.
:type package: str
:param fatal: If True (default), then die via error_out()
:type fatal: bool
:returns: the OpenStack release codename (e.g. ussuri)
:rtype: str
"""
codename = get_installed_os_version()
if codename:
return codename
if snap_install_requested():
cmd = ['snap', 'list', package]
try:
out = subprocess.check_output(cmd)
if six.PY3:
out = out.decode('UTF-8')
except subprocess.CalledProcessError:
return None
lines = out.split('\n')
for line in lines:
if package in line:
# Second item in list is Version
return line.split()[1]
cache = apt_cache()
try:
pkg = cache[package]
except Exception:
if not fatal:
return None
# the package is unknown to the current apt cache.
e = 'Could not determine version of package with no installation '\
'candidate: %s' % package
error_out(e)
if not pkg.current_ver:
if not fatal:
return None
# package is known, but no version is currently installed.
e = 'Could not determine version of uninstalled package: %s' % package
error_out(e)
vers = apt.upstream_version(pkg.current_ver.ver_str)
if 'swift' in pkg.name:
# Fully x.y.z match for swift versions
match = re.match(r'^(\d+)\.(\d+)\.(\d+)', vers)
else:
# x.y match only for 20XX.X
# and ignore patch level for other packages
match = re.match(r'^(\d+)\.(\d+)', vers)
if match:
vers = match.group(0)
# Generate a major version number for newer semantic
# versions of openstack projects
major_vers = vers.split('.')[0]
# >= Liberty independent project versions
if (package in PACKAGE_CODENAMES and
major_vers in PACKAGE_CODENAMES[package]):
return PACKAGE_CODENAMES[package][major_vers]
else:
# < Liberty co-ordinated project versions
try:
if 'swift' in pkg.name:
return get_swift_codename(vers)
else:
return OPENSTACK_CODENAMES[vers]
except KeyError:
if not fatal:
return None
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_package(pkg, fatal=True):
'''Derive OpenStack version number from an installed package.'''
codename = get_os_codename_package(pkg, fatal=fatal)
if not codename:
return None
if 'swift' in pkg:
vers_map = SWIFT_CODENAMES
for cname, version in six.iteritems(vers_map):
if cname == codename:
return version[-1]
else:
vers_map = OPENSTACK_CODENAMES
for version, cname in six.iteritems(vers_map):
if cname == codename:
return version
# e = "Could not determine OpenStack version for package: %s" % pkg
# error_out(e)
def get_installed_os_version():
"""Determine the OpenStack release code name from openstack-release pkg.
This uses the "openstack-release" pkg (if it exists) to return the
OpenStack release codename (e.g. usurri, mitaka, ocata, etc.)
Note, it caches the result so that it is only done once per hook.
:returns: the OpenStack release codename, if available
:rtype: Optional[str]
"""
@cached
def _do_install():
apt_install(filter_installed_packages(['openstack-release']),
fatal=False, quiet=True)
_do_install()
return openstack_release().get('OPENSTACK_CODENAME')
@cached
def openstack_release():
"""Return /etc/os-release in a dict."""
d = {}
try:
with open('/etc/openstack-release', 'r') as lsb:
for l in lsb:
s = l.split('=')
if len(s) != 2:
continue
d[s[0].strip()] = s[1].strip()
except FileNotFoundError:
pass
return d
# Module local cache variable for the os_release.
_os_rel = None
def reset_os_release():
'''Unset the cached os_release version'''
global _os_rel
_os_rel = None
def os_release(package, base=None, reset_cache=False, source_key=None):
"""Returns OpenStack release codename from a cached global.
If reset_cache then unset the cached os_release version and return the
freshly determined version.
If the codename can not be determined from either an installed package or
the installation source, the earliest release supported by the charm should
be returned.
:param package: Name of package to determine release from
:type package: str
:param base: Fallback codename if endavours to determine from package fail
:type base: Optional[str]
:param reset_cache: Reset any cached codename value
:type reset_cache: bool
:param source_key: Name of source configuration option
(default: 'openstack-origin')
:type source_key: Optional[str]
:returns: OpenStack release codename
:rtype: str
"""
source_key = source_key or 'openstack-origin'
if not base:
base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']]
global _os_rel
if reset_cache:
reset_os_release()
if _os_rel:
return _os_rel
_os_rel = (
get_os_codename_package(package, fatal=False) or
get_os_codename_install_source(config(source_key)) or
base)
return _os_rel
@deprecate("moved to charmhelpers.fetch.import_key()", "2017-07", log=juju_log)
def import_key(keyid):
"""Import a key, either ASCII armored, or a GPG key id.
@param keyid: the key in ASCII armor format, or a GPG key id.
@raises SystemExit() via sys.exit() on failure.
"""
try:
return fetch_import_key(keyid)
except GPGKeyError as e:
error_out("Could not import key: {}".format(str(e)))
def get_source_and_pgp_key(source_and_key):
"""Look for a pgp key ID or ascii-armor key in the given input.
:param source_and_key: Sting, "source_spec|keyid" where '|keyid' is
optional.
:returns (source_spec, key_id OR None) as a tuple. Returns None for key_id
if there was no '|' in the source_and_key string.
"""
try:
source, key = source_and_key.split('|', 2)
return source, key or None
except ValueError:
return source_and_key, None
@deprecate("use charmhelpers.fetch.add_source() instead.",
"2017-07", log=juju_log)
def configure_installation_source(source_plus_key):
"""Configure an installation source.
The functionality is provided by charmhelpers.fetch.add_source()
The difference between the two functions is that add_source() signature
requires the key to be passed directly, whereas this function passes an
optional key by appending '|<key>' to the end of the source specificiation
'source'.
Another difference from add_source() is that the function calls sys.exit(1)
if the configuration fails, whereas add_source() raises
SourceConfigurationError(). Another difference, is that add_source()
silently fails (with a juju_log command) if there is no matching source to
configure, whereas this function fails with a sys.exit(1)
:param source: String_plus_key -- see above for details.
Note that the behaviour on error is to log the error to the juju log and
then call sys.exit(1).
"""
if source_plus_key.startswith('snap'):
# Do nothing for snap installs
return
# extract the key if there is one, denoted by a '|' in the rel
source, key = get_source_and_pgp_key(source_plus_key)
# handle the ordinary sources via add_source
try:
fetch_add_source(source, key, fail_invalid=True)
except SourceConfigError as se:
error_out(str(se))
def config_value_changed(option):
"""
Determine if config value changed since last call to this function.
"""
hook_data = unitdata.HookData()
with hook_data():
db = unitdata.kv()
current = config(option)
saved = db.get(option)
db.set(option, current)
if saved is None:
return False
return current != saved
def get_endpoint_key(service_name, relation_id, unit_name):
"""Return the key used to refer to an ep changed notification from a unit.
:param service_name: Service name eg nova, neutron, placement etc
:type service_name: str
:param relation_id: The id of the relation the unit is on.
:type relation_id: str
:param unit_name: The name of the unit publishing the notification.
:type unit_name: str
:returns: The key used to refer to an ep changed notification from a unit
:rtype: str
"""
return '{}-{}-{}'.format(
service_name,
relation_id.replace(':', '_'),
unit_name.replace('/', '_'))
def get_endpoint_notifications(service_names, rel_name='identity-service'):
"""Return all notifications for the given services.
:param service_names: List of service name.
:type service_name: List
:param rel_name: Name of the relation to query
:type rel_name: str
:returns: A dict containing the source of the notification and its nonce.
:rtype: Dict[str, str]
"""
notifications = {}
for rid in relation_ids(rel_name):
for unit in related_units(relid=rid):
ep_changed_json = relation_get(
rid=rid,
unit=unit,
attribute='ep_changed')
if ep_changed_json:
ep_changed = json.loads(ep_changed_json)
for service in service_names:
if ep_changed.get(service):
key = get_endpoint_key(service, rid, unit)
notifications[key] = ep_changed[service]
return notifications
def endpoint_changed(service_name, rel_name='identity-service'):
"""Whether a new notification has been recieved for an endpoint.
:param service_name: Service name eg nova, neutron, placement etc
:type service_name: str
:param rel_name: Name of the relation to query
:type rel_name: str
:returns: Whether endpoint has changed
:rtype: bool
"""
changed = False
with unitdata.HookData()() as t:
db = t[0]
notifications = get_endpoint_notifications(
[service_name],
rel_name=rel_name)
for key, nonce in notifications.items():
if db.get(key) != nonce:
juju_log(('New endpoint change notification found: '
'{}={}').format(key, nonce),
'INFO')
changed = True
break
return changed
def save_endpoint_changed_triggers(service_names, rel_name='identity-service'):
"""Save the enpoint triggers in db so it can be tracked if they changed.
:param service_names: List of service name.
:type service_name: List
:param rel_name: Name of the relation to query
:type rel_name: str
"""
with unitdata.HookData()() as t:
db = t[0]
notifications = get_endpoint_notifications(
service_names,
rel_name=rel_name)
for key, nonce in notifications.items():
db.set(key, nonce)
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
"""
Write an rc file in the charm-delivered directory containing
exported environment variables provided by env_vars. Any charm scripts run
outside the juju hook environment can source this scriptrc to obtain
updated config information necessary to perform health checks or
service changes.
"""
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
if not os.path.exists(os.path.dirname(juju_rc_path)):
os.mkdir(os.path.dirname(juju_rc_path))
with open(juju_rc_path, 'wt') as rc_script:
rc_script.write(
"#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p))
for u, p in six.iteritems(env_vars) if u != "script_path"]
def openstack_upgrade_available(package):
"""
Determines if an OpenStack upgrade is available from installation
source, based on version of installed package.
:param package: str: Name of installed package.
:returns: bool: : Returns True if configured installation source offers
a newer version of package.
"""
src = config('openstack-origin')
cur_vers = get_os_version_package(package)
if not cur_vers:
# The package has not been installed yet do not attempt upgrade
return False
if "swift" in package:
codename = get_os_codename_install_source(src)
avail_vers = get_os_version_codename_swift(codename)
else:
try:
avail_vers = get_os_version_install_source(src)
except Exception:
avail_vers = cur_vers
apt.init()
return apt.version_compare(avail_vers, cur_vers) >= 1
def ensure_block_device(block_device):
'''
Confirm block_device, create as loopback if necessary.
:param block_device: str: Full path of block device to ensure.
:returns: str: Full path of ensured block device.
'''
_none = ['None', 'none', None]
if (block_device in _none):
error_out('prepare_storage(): Missing required input: block_device=%s.'
% block_device)
if block_device.startswith('/dev/'):
bdev = block_device
elif block_device.startswith('/'):
_bd = block_device.split('|')
if len(_bd) == 2:
bdev, size = _bd
else:
bdev = block_device
size = DEFAULT_LOOPBACK_SIZE
bdev = ensure_loopback_device(bdev, size)
else:
bdev = '/dev/%s' % block_device
if not is_block_device(bdev):
error_out('Failed to locate valid block device at %s' % bdev)
return bdev
def clean_storage(block_device):
'''
Ensures a block device is clean. That is:
- unmounted
- any lvm volume groups are deactivated
- any lvm physical device signatures removed
- partition table wiped
:param block_device: str: Full path to block device to clean.
'''
for mp, d in mounts():
if d == block_device:
juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
(d, mp), level=INFO)
umount(mp, persist=True)
if is_lvm_physical_volume(block_device):
deactivate_lvm_volume_group(block_device)
remove_lvm_physical_volume(block_device)
else:
zap_disk(block_device)
is_ip = ip.is_ip
ns_query = ip.ns_query
get_host_ip = ip.get_host_ip
get_hostname = ip.get_hostname
def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
mm_map = {}
if os.path.isfile(mm_file):
with open(mm_file, 'r') as f:
mm_map = json.load(f)
return mm_map
def sync_db_with_multi_ipv6_addresses(database, database_user,
relation_prefix=None):
hosts = get_ipv6_addr(dynamic_only=False)
if config('vip'):
vips = config('vip').split()
for vip in vips:
if vip and is_ipv6(vip):
hosts.append(vip)
kwargs = {'database': database,
'username': database_user,
'hostname': json.dumps(hosts)}
if relation_prefix:
for key in list(kwargs.keys()):
kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
del kwargs[key]
for rid in relation_ids('shared-db'):
relation_set(relation_id=rid, **kwargs)
def os_requires_version(ostack_release, pkg):
"""
Decorator for hook to specify minimum supported release
"""
def wrap(f):
@wraps(f)
def wrapped_f(*args):
if os_release(pkg) < ostack_release:
raise Exception("This hook is not supported on releases"
" before %s" % ostack_release)
f(*args)
return wrapped_f
return wrap
def os_workload_status(configs, required_interfaces, charm_func=None):
"""
Decorator to set workload status based on complete contexts
"""
def wrap(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
# Run the original function first
f(*args, **kwargs)
# Set workload status now that contexts have been
# acted on
set_os_workload_status(configs, required_interfaces, charm_func)
return wrapped_f
return wrap
def set_os_workload_status(configs, required_interfaces, charm_func=None,
services=None, ports=None):
"""Set the state of the workload status for the charm.
This calls _determine_os_workload_status() to get the new state, message
and sets the status using status_set()
@param configs: a templating.OSConfigRenderer() object
@param required_interfaces: {generic: [specific, specific2, ...]}
@param charm_func: a callable function that returns state, message. The
signature is charm_func(configs) -> (state, message)
@param services: list of strings OR dictionary specifying services/ports
@param ports: OPTIONAL list of port numbers.
@returns state, message: the new workload status, user message
"""
state, message = _determine_os_workload_status(
configs, required_interfaces, charm_func, services, ports)
status_set(state, message)
def _determine_os_workload_status(
configs, required_interfaces, charm_func=None,
services=None, ports=None):
"""Determine the state of the workload status for the charm.
This function returns the new workload status for the charm based
on the state of the interfaces, the paused state and whether the
services are actually running and any specified ports are open.
This checks:
1. if the unit should be paused, that it is actually paused. If so the
state is 'maintenance' + message, else 'broken'.
2. that the interfaces/relations are complete. If they are not then
it sets the state to either 'broken' or 'waiting' and an appropriate
message.
3. If all the relation data is set, then it checks that the actual
services really are running. If not it sets the state to 'broken'.
If everything is okay then the state returns 'active'.
@param configs: a templating.OSConfigRenderer() object
@param required_interfaces: {generic: [specific, specific2, ...]}
@param charm_func: a callable function that returns state, message. The
signature is charm_func(configs) -> (state, message)
@param services: list of strings OR dictionary specifying services/ports
@param ports: OPTIONAL list of port numbers.
@returns state, message: the new workload status, user message
"""
state, message = _ows_check_if_paused(services, ports)
if state is None:
state, message = _ows_check_generic_interfaces(
configs, required_interfaces)
if state != 'maintenance' and charm_func:
# _ows_check_charm_func() may modify the state, message
state, message = _ows_check_charm_func(
state, message, lambda: charm_func(configs))
if state is None:
state, message = _ows_check_services_running(services, ports)
if state is None:
state = 'active'
message = "Unit is ready"
juju_log(message, 'INFO')
try:
if config(POLICYD_CONFIG_NAME):
message = "{} {}".format(policyd_status_message_prefix(), message)
except Exception:
pass
return state, message
def _ows_check_if_paused(services=None, ports=None):
"""Check if the unit is supposed to be paused, and if so check that the
services/ports (if passed) are actually stopped/not being listened to.
If the unit isn't supposed to be paused, just return None, None
If the unit is performing a series upgrade, return a message indicating
this.
@param services: OPTIONAL services spec or list of service names.
@param ports: OPTIONAL list of port numbers.
@returns state, message or None, None
"""
if is_unit_upgrading_set():
state, message = check_actually_paused(services=services,
ports=ports)
if state is None:
# we're paused okay, so set maintenance and return
state = "blocked"
message = ("Ready for do-release-upgrade and reboot. "
"Set complete when finished.")
return state, message
if is_unit_paused_set():
state, message = check_actually_paused(services=services,
ports=ports)
if state is None:
# we're paused okay, so set maintenance and return
state = "maintenance"
message = "Paused. Use 'resume' action to resume normal service."
return state, message
return None, None
def _ows_check_generic_interfaces(configs, required_interfaces):
"""Check the complete contexts to determine the workload status.
- Checks for missing or incomplete contexts
- juju log details of missing required data.
- determines the correct workload status
- creates an appropriate message for status_set(...)
if there are no problems then the function returns None, None
@param configs: a templating.OSConfigRenderer() object
@params required_interfaces: {generic_interface: [specific_interface], }
@returns state, message or None, None
"""
incomplete_rel_data = incomplete_relation_data(configs,
required_interfaces)
state = None
message = None
missing_relations = set()
incomplete_relations = set()
for generic_interface, relations_states in incomplete_rel_data.items():
related_interface = None
missing_data = {}
# Related or not?
for interface, relation_state in relations_states.items():
if relation_state.get('related'):
related_interface = interface
missing_data = relation_state.get('missing_data')
break
# No relation ID for the generic_interface?
if not related_interface:
juju_log("{} relation is missing and must be related for "
"functionality. ".format(generic_interface), 'WARN')
state = 'blocked'
missing_relations.add(generic_interface)
else:
# Relation ID eists but no related unit
if not missing_data:
# Edge case - relation ID exists but departings
_hook_name = hook_name()
if (('departed' in _hook_name or 'broken' in _hook_name) and
related_interface in _hook_name):
state = 'blocked'
missing_relations.add(generic_interface)
juju_log("{} relation's interface, {}, "
"relationship is departed or broken "
"and is required for functionality."
"".format(generic_interface, related_interface),
"WARN")
# Normal case relation ID exists but no related unit
# (joining)
else:
juju_log("{} relations's interface, {}, is related but has"
" no units in the relation."
"".format(generic_interface, related_interface),
"INFO")
# Related unit exists and data missing on the relation
else:
juju_log("{} relation's interface, {}, is related awaiting "
"the following data from the relationship: {}. "
"".format(generic_interface, related_interface,
", ".join(missing_data)), "INFO")
if state != 'blocked':
state = 'waiting'
if generic_interface not in missing_relations:
incomplete_relations.add(generic_interface)
if missing_relations:
message = "Missing relations: {}".format(", ".join(missing_relations))
if incomplete_relations:
message += "; incomplete relations: {}" \
"".format(", ".join(incomplete_relations))
state = 'blocked'
elif incomplete_relations:
message = "Incomplete relations: {}" \
"".format(", ".join(incomplete_relations))
state = 'waiting'
return state, message
def _ows_check_charm_func(state, message, charm_func_with_configs):
"""Run a custom check function for the charm to see if it wants to
change the state. This is only run if not in 'maintenance' and
tests to see if the new state is more important that the previous
one determined by the interfaces/relations check.
@param state: the previously determined state so far.
@param message: the user orientated message so far.
@param charm_func: a callable function that returns state, message
@returns state, message strings.
"""
if charm_func_with_configs:
charm_state, charm_message = charm_func_with_configs()
if (charm_state != 'active' and
charm_state != 'unknown' and
charm_state is not None):
state = workload_state_compare(state, charm_state)
if message:
charm_message = charm_message.replace("Incomplete relations: ",
"")
message = "{}, {}".format(message, charm_message)
else:
message = charm_message
return state, message
def _ows_check_services_running(services, ports):
"""Check that the services that should be running are actually running
and that any ports specified are being listened to.
@param services: list of strings OR dictionary specifying services/ports
@param ports: list of ports
@returns state, message: strings or None, None
"""
messages = []
state = None
if services is not None:
services = _extract_services_list_helper(services)
services_running, running = _check_running_services(services)
if not all(running):
messages.append(
"Services not running that should be: {}"
.format(", ".join(_filter_tuples(services_running, False))))
state = 'blocked'
# also verify that the ports that should be open are open
# NB, that ServiceManager objects only OPTIONALLY have ports
map_not_open, ports_open = (
_check_listening_on_services_ports(services))
if not all(ports_open):
# find which service has missing ports. They are in service
# order which makes it a bit easier.
message_parts = {service: ", ".join([str(v) for v in open_ports])
for service, open_ports in map_not_open.items()}
message = ", ".join(
["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
messages.append(
"Services with ports not open that should be: {}"
.format(message))
state = 'blocked'
if ports is not None:
# and we can also check ports which we don't know the service for
ports_open, ports_open_bools = _check_listening_on_ports_list(ports)
if not all(ports_open_bools):
messages.append(
"Ports which should be open, but are not: {}"
.format(", ".join([str(p) for p, v in ports_open
if not v])))
state = 'blocked'
if state is not None:
message = "; ".join(messages)
return state, message
return None, None
def _extract_services_list_helper(services):
"""Extract a OrderedDict of {service: [ports]} of the supplied services
for use by the other functions.
The services object can either be:
- None : no services were passed (an empty dict is returned)
- a list of strings
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- An array of [{'service': service_name, ...}, ...]
@param services: see above
@returns OrderedDict(service: [ports], ...)
"""
if services is None:
return {}
if isinstance(services, dict):
services = services.values()
# either extract the list of services from the dictionary, or if
# it is a simple string, use that. i.e. works with mixed lists.
_s = OrderedDict()
for s in services:
if isinstance(s, dict) and 'service' in s:
_s[s['service']] = s.get('ports', [])
if isinstance(s, str):
_s[s] = []
return _s
def _check_running_services(services):
"""Check that the services dict provided is actually running and provide
a list of (service, boolean) tuples for each service.
Returns both a zipped list of (service, boolean) and a list of booleans
in the same order as the services.
@param services: OrderedDict of strings: [ports], one for each service to
check.
@returns [(service, boolean), ...], : results for checks
[boolean] : just the result of the service checks
"""
services_running = [service_running(s) for s in services]
return list(zip(services, services_running)), services_running
def _check_listening_on_services_ports(services, test=False):
"""Check that the unit is actually listening (has the port open) on the
ports that the service specifies are open. If test is True then the
function returns the services with ports that are open rather than
closed.
Returns an OrderedDict of service: ports and a list of booleans
@param services: OrderedDict(service: [port, ...], ...)
@param test: default=False, if False, test for closed, otherwise open.
@returns OrderedDict(service: [port-not-open, ...]...), [boolean]
"""
test = not(not(test)) # ensure test is True or False
all_ports = list(itertools.chain(*services.values()))
ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
map_ports = OrderedDict()
matched_ports = [p for p, opened in zip(all_ports, ports_states)
if opened == test] # essentially opened xor test
for service, ports in services.items():
set_ports = set(ports).intersection(matched_ports)
if set_ports:
map_ports[service] = set_ports
return map_ports, ports_states
def _check_listening_on_ports_list(ports):
"""Check that the ports list given are being listened to
Returns a list of ports being listened to and a list of the
booleans.
@param ports: LIST of port numbers.
@returns [(port_num, boolean), ...], [boolean]
"""
ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
return zip(ports, ports_open), ports_open
def _filter_tuples(services_states, state):
"""Return a simple list from a list of tuples according to the condition
@param services_states: LIST of (string, boolean): service and running
state.
@param state: Boolean to match the tuple against.
@returns [LIST of strings] that matched the tuple RHS.
"""
return [s for s, b in services_states if b == state]
def workload_state_compare(current_workload_state, workload_state):
""" Return highest priority of two states"""
hierarchy = {'unknown': -1,
'active': 0,
'maintenance': 1,
'waiting': 2,
'blocked': 3,
}
if hierarchy.get(workload_state) is None:
workload_state = 'unknown'
if hierarchy.get(current_workload_state) is None:
current_workload_state = 'unknown'
# Set workload_state based on hierarchy of statuses
if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
return current_workload_state
else:
return workload_state
def incomplete_relation_data(configs, required_interfaces):
"""Check complete contexts against required_interfaces
Return dictionary of incomplete relation data.
configs is an OSConfigRenderer object with configs registered
required_interfaces is a dictionary of required general interfaces
with dictionary values of possible specific interfaces.
Example:
required_interfaces = {'database': ['shared-db', 'pgsql-db']}
The interface is said to be satisfied if anyone of the interfaces in the
list has a complete context.
Return dictionary of incomplete or missing required contexts with relation
status of interfaces and any missing data points. Example:
{'message':
{'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
'zeromq-configuration': {'related': False}},
'identity':
{'identity-service': {'related': False}},
'database':
{'pgsql-db': {'related': False},
'shared-db': {'related': True}}}
"""
complete_ctxts = configs.complete_contexts()
incomplete_relations = [
svc_type
for svc_type, interfaces in required_interfaces.items()
if not set(interfaces).intersection(complete_ctxts)]
return {
i: configs.get_incomplete_context_data(required_interfaces[i])
for i in incomplete_relations}
def do_action_openstack_upgrade(package, upgrade_callback, configs):
"""Perform action-managed OpenStack upgrade.
Upgrades packages to the configured openstack-origin version and sets
the corresponding action status as a result.
If the charm was installed from source we cannot upgrade it.
For backwards compatibility a config flag (action-managed-upgrade) must
be set for this code to run, otherwise a full service level upgrade will
fire on config-changed.
@param package: package name for determining if upgrade available
@param upgrade_callback: function callback to charm's upgrade function
@param configs: templating object derived from OSConfigRenderer class
@return: True if upgrade successful; False if upgrade failed or skipped
"""
ret = False
if openstack_upgrade_available(package):
if config('action-managed-upgrade'):
juju_log('Upgrading OpenStack release')
try:
upgrade_callback(configs=configs)
action_set({'outcome': 'success, upgrade completed.'})
ret = True
except Exception:
action_set({'outcome': 'upgrade failed, see traceback.'})
action_set({'traceback': traceback.format_exc()})
action_fail('do_openstack_upgrade resulted in an '
'unexpected error')
else:
action_set({'outcome': 'action-managed-upgrade config is '
'False, skipped upgrade.'})
else:
action_set({'outcome': 'no upgrade available.'})
return ret
def remote_restart(rel_name, remote_service=None):
trigger = {
'restart-trigger': str(uuid.uuid4()),
}
if remote_service:
trigger['remote-service'] = remote_service
for rid in relation_ids(rel_name):
# This subordinate can be related to two seperate services using
# different subordinate relations so only issue the restart if
# the principle is conencted down the relation we think it is
if related_units(relid=rid):
relation_set(relation_id=rid,
relation_settings=trigger,
)
def check_actually_paused(services=None, ports=None):
"""Check that services listed in the services object and ports
are actually closed (not listened to), to verify that the unit is
properly paused.
@param services: See _extract_services_list_helper
@returns status, : string for status (None if okay)
message : string for problem for status_set
"""
state = None
message = None
messages = []
if services is not None:
services = _extract_services_list_helper(services)
services_running, services_states = _check_running_services(services)
if any(services_states):
# there shouldn't be any running so this is a problem
messages.append("these services running: {}"
.format(", ".join(
_filter_tuples(services_running, True))))
state = "blocked"
ports_open, ports_open_bools = (
_check_listening_on_services_ports(services, True))
if any(ports_open_bools):
message_parts = {service: ", ".join([str(v) for v in open_ports])
for service, open_ports in ports_open.items()}
message = ", ".join(
["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
messages.append(
"these service:ports are open: {}".format(message))
state = 'blocked'
if ports is not None:
ports_open, bools = _check_listening_on_ports_list(ports)
if any(bools):
messages.append(
"these ports which should be closed, but are open: {}"
.format(", ".join([str(p) for p, v in ports_open if v])))
state = 'blocked'
if messages:
message = ("Services should be paused but {}"
.format(", ".join(messages)))
return state, message
def set_unit_paused():
"""Set the unit to a paused state in the local kv() store.
This does NOT actually pause the unit
"""
with unitdata.HookData()() as t:
kv = t[0]
kv.set('unit-paused', True)
def clear_unit_paused():
"""Clear the unit from a paused state in the local kv() store
This does NOT actually restart any services - it only clears the
local state.
"""
with unitdata.HookData()() as t:
kv = t[0]
kv.set('unit-paused', False)
def is_unit_paused_set():
"""Return the state of the kv().get('unit-paused').
This does NOT verify that the unit really is paused.
To help with units that don't have HookData() (testing)
if it excepts, return False
"""
try:
with unitdata.HookData()() as t:
kv = t[0]
# transform something truth-y into a Boolean.
return not(not(kv.get('unit-paused')))
except Exception:
return False
def manage_payload_services(action, services=None, charm_func=None):
"""Run an action against all services.
An optional charm_func() can be called. It should raise an Exception to
indicate that the function failed. If it was succesfull it should return
None or an optional message.
The signature for charm_func is:
charm_func() -> message: str
charm_func() is executed after any services are stopped, if supplied.
The services object can either be:
- None : no services were passed (an empty dict is returned)
- a list of strings
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- An array of [{'service': service_name, ...}, ...]
:param action: Action to run: pause, resume, start or stop.
:type action: str
:param services: See above
:type services: See above
:param charm_func: function to run for custom charm pausing.
:type charm_func: f()
:returns: Status boolean and list of messages
:rtype: (bool, [])
:raises: RuntimeError
"""
actions = {
'pause': service_pause,
'resume': service_resume,
'start': service_start,
'stop': service_stop}
action = action.lower()
if action not in actions.keys():
raise RuntimeError(
"action: {} must be one of: {}".format(action,
', '.join(actions.keys())))
services = _extract_services_list_helper(services)
messages = []
success = True
if services:
for service in services.keys():
rc = actions[action](service)
if not rc:
success = False
messages.append("{} didn't {} cleanly.".format(service,
action))
if charm_func:
try:
message = charm_func()
if message:
messages.append(message)
except Exception as e:
success = False
messages.append(str(e))
return success, messages
def make_wait_for_ports_barrier(ports, retry_count=5):
"""Make a function to wait for port shutdowns.
Create a function which closes over the provided ports. The function will
retry probing ports until they are closed or the retry count has been reached.
"""
@decorators.retry_on_predicate(retry_count, operator.not_, base_delay=0.1)
def retry_port_check():
_, ports_states = _check_listening_on_ports_list(ports)
juju_log("Probe ports {}, result: {}".format(ports, ports_states), level="DEBUG")
return any(ports_states)
return retry_port_check
def pause_unit(assess_status_func, services=None, ports=None,
charm_func=None):
"""Pause a unit by stopping the services and setting 'unit-paused'
in the local kv() store.
Also checks that the services have stopped and ports are no longer
being listened to.
An optional charm_func() can be called that can either raise an
Exception or return non None, None to indicate that the unit
didn't pause cleanly.
The signature for charm_func is:
charm_func() -> message: string
charm_func() is executed after any services are stopped, if supplied.
The services object can either be:
- None : no services were passed (an empty dict is returned)
- a list of strings
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- An array of [{'service': service_name, ...}, ...]
@param assess_status_func: (f() -> message: string | None) or None
@param services: OPTIONAL see above
@param ports: OPTIONAL list of port
@param charm_func: function to run for custom charm pausing.
@returns None
@raises Exception(message) on an error for action_fail().
"""
_, messages = manage_payload_services(
'pause',
services=services,
charm_func=charm_func)
set_unit_paused()
if assess_status_func:
message = assess_status_func()
if message:
messages.append(message)
if messages and not is_unit_upgrading_set():
raise Exception("Couldn't pause: {}".format("; ".join(messages)))
def resume_unit(assess_status_func, services=None, ports=None,
charm_func=None):
"""Resume a unit by starting the services and clearning 'unit-paused'
in the local kv() store.
Also checks that the services have started and ports are being listened to.
An optional charm_func() can be called that can either raise an
Exception or return non None to indicate that the unit
didn't resume cleanly.
The signature for charm_func is:
charm_func() -> message: string
charm_func() is executed after any services are started, if supplied.
The services object can either be:
- None : no services were passed (an empty dict is returned)
- a list of strings
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- An array of [{'service': service_name, ...}, ...]
@param assess_status_func: (f() -> message: string | None) or None
@param services: OPTIONAL see above
@param ports: OPTIONAL list of port
@param charm_func: function to run for custom charm resuming.
@returns None
@raises Exception(message) on an error for action_fail().
"""
_, messages = manage_payload_services(
'resume',
services=services,
charm_func=charm_func)
clear_unit_paused()
if assess_status_func:
message = assess_status_func()
if message:
messages.append(message)
if messages:
raise Exception("Couldn't resume: {}".format("; ".join(messages)))
def make_assess_status_func(*args, **kwargs):
"""Creates an assess_status_func() suitable for handing to pause_unit()
and resume_unit().
This uses the _determine_os_workload_status(...) function to determine
what the workload_status should be for the unit. If the unit is
not in maintenance or active states, then the message is returned to
the caller. This is so an action that doesn't result in either a
complete pause or complete resume can signal failure with an action_fail()
"""
def _assess_status_func():
state, message = _determine_os_workload_status(*args, **kwargs)
status_set(state, message)
if state not in ['maintenance', 'active']:
return message
return None
return _assess_status_func
def pausable_restart_on_change(restart_map, stopstart=False,
restart_functions=None,
can_restart_now_f=None,
post_svc_restart_f=None,
pre_restarts_wait_f=None):
"""A restart_on_change decorator that checks to see if the unit is
paused. If it is paused then the decorated function doesn't fire.
This is provided as a helper, as the @restart_on_change(...) decorator
is in core.host, yet the openstack specific helpers are in this file
(contrib.openstack.utils). Thus, this needs to be an optional feature
for openstack charms (or charms that wish to use the openstack
pause/resume type features).
It is used as follows:
from contrib.openstack.utils import (
pausable_restart_on_change as restart_on_change)
@restart_on_change(restart_map, stopstart=<boolean>)
def some_hook(...):
pass
see core.utils.restart_on_change() for more details.
Note restart_map can be a callable, in which case, restart_map is only
evaluated at runtime. This means that it is lazy and the underlying
function won't be called if the decorated function is never called. Note,
retains backwards compatibility for passing a non-callable dictionary.
:param f: function to decorate.
:type f: Callable
:param restart_map: Optionally callable, which then returns the restart_map or
the restart map {conf_file: [services]}
:type restart_map: Union[Callable[[],], Dict[str, List[str,]]
:param stopstart: whether to stop, start or restart a service
:type stopstart: booleean
:param restart_functions: nonstandard functions to use to restart services
{svc: func, ...}
:type restart_functions: Dict[str, Callable[[str], None]]
:param can_restart_now_f: A function used to check if the restart is
permitted.
:type can_restart_now_f: Callable[[str, List[str]], boolean]
:param post_svc_restart_f: A function run after a service has
restarted.
:type post_svc_restart_f: Callable[[str], None]
:param pre_restarts_wait_f: A function callled before any restarts.
:type pre_restarts_wait_f: Callable[None, None]
:returns: decorator to use a restart_on_change with pausability
:rtype: decorator
"""
def wrap(f):
# py27 compatible nonlocal variable. When py3 only, replace with
# nonlocal keyword
__restart_map_cache = {'cache': None}
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
if is_unit_paused_set():
return f(*args, **kwargs)
if __restart_map_cache['cache'] is None:
__restart_map_cache['cache'] = restart_map() \
if callable(restart_map) else restart_map
# otherwise, normal restart_on_change functionality
return restart_on_change_helper(
(lambda: f(*args, **kwargs)),
__restart_map_cache['cache'],
stopstart,
restart_functions,
can_restart_now_f,
post_svc_restart_f,
pre_restarts_wait_f)
return wrapped_f
return wrap
def ordered(orderme):
"""Converts the provided dictionary into a collections.OrderedDict.
The items in the returned OrderedDict will be inserted based on the
natural sort order of the keys. Nested dictionaries will also be sorted
in order to ensure fully predictable ordering.
:param orderme: the dict to order
:return: collections.OrderedDict
:raises: ValueError: if `orderme` isn't a dict instance.
"""
if not isinstance(orderme, dict):
raise ValueError('argument must be a dict type')
result = OrderedDict()
for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]):
if isinstance(v, dict):
result[k] = ordered(v)
else:
result[k] = v
return result
def config_flags_parser(config_flags):
"""Parses config flags string into dict.
This parsing method supports a few different formats for the config
flag values to be parsed:
1. A string in the simple format of key=value pairs, with the possibility
of specifying multiple key value pairs within the same string. For
example, a string in the format of 'key1=value1, key2=value2' will
return a dict of:
{'key1': 'value1', 'key2': 'value2'}.
2. A string in the above format, but supporting a comma-delimited list
of values for the same key. For example, a string in the format of
'key1=value1, key2=value3,value4,value5' will return a dict of:
{'key1': 'value1', 'key2': 'value2,value3,value4'}
3. A string containing a colon character (:) prior to an equal
character (=) will be treated as yaml and parsed as such. This can be
used to specify more complex key value pairs. For example,
a string in the format of 'key1: subkey1=value1, subkey2=value2' will
return a dict of:
{'key1', 'subkey1=value1, subkey2=value2'}
The provided config_flags string may be a list of comma-separated values
which themselves may be comma-separated list of values.
"""
# If we find a colon before an equals sign then treat it as yaml.
# Note: limit it to finding the colon first since this indicates assignment
# for inline yaml.
colon = config_flags.find(':')
equals = config_flags.find('=')
if colon > 0:
if colon < equals or equals < 0:
return ordered(yaml.safe_load(config_flags))
if config_flags.find('==') >= 0:
juju_log("config_flags is not in expected format (key=value)",
level=ERROR)
raise OSContextError
# strip the following from each value.
post_strippers = ' ,'
# we strip any leading/trailing '=' or ' ' from the string then
# split on '='.
split = config_flags.strip(' =').split('=')
limit = len(split)
flags = OrderedDict()
for i in range(0, limit - 1):
current = split[i]
next = split[i + 1]
vindex = next.rfind(',')
if (i == limit - 2) or (vindex < 0):
value = next
else:
value = next[:vindex]
if i == 0:
key = current
else:
# if this not the first entry, expect an embedded key.
index = current.rfind(',')
if index < 0:
juju_log("Invalid config value(s) at index %s" % (i),
level=ERROR)
raise OSContextError
key = current[index + 1:]
# Add to collection.
flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
return flags
def os_application_version_set(package):
'''Set version of application for Juju 2.0 and later'''
application_version = get_upstream_version(package)
# NOTE(jamespage) if not able to figure out package version, fallback to
# openstack codename version detection.
if not application_version:
application_version_set(os_release(package))
else:
application_version_set(application_version)
def os_application_status_set(check_function):
"""Run the supplied function and set the application status accordingly.
:param check_function: Function to run to get app states and messages.
:type check_function: function
"""
state, message = check_function()
status_set(state, message, application=True)
def enable_memcache(source=None, release=None, package=None):
"""Determine if memcache should be enabled on the local unit
@param release: release of OpenStack currently deployed
@param package: package to derive OpenStack version deployed
@returns boolean Whether memcache should be enabled
"""
_release = None
if release:
_release = release
else:
_release = os_release(package)
if not _release:
_release = get_os_codename_install_source(source)
return CompareOpenStackReleases(_release) >= 'mitaka'
def token_cache_pkgs(source=None, release=None):
"""Determine additional packages needed for token caching
@param source: source string for charm
@param release: release of OpenStack currently deployed
@returns List of package to enable token caching
"""
packages = []
if enable_memcache(source=source, release=release):
packages.extend(['memcached', 'python-memcache'])
return packages
def update_json_file(filename, items):
"""Updates the json `filename` with a given dict.
:param filename: path to json file (e.g. /etc/glance/policy.json)
:param items: dict of items to update
"""
if not items:
return
with open(filename) as fd:
policy = json.load(fd)
# Compare before and after and if nothing has changed don't write the file
# since that could cause unnecessary service restarts.
before = json.dumps(policy, indent=4, sort_keys=True)
policy.update(items)
after = json.dumps(policy, indent=4, sort_keys=True)
if before == after:
return
with open(filename, "w") as fd:
fd.write(after)
@cached
def snap_install_requested():
""" Determine if installing from snaps
If openstack-origin is of the form snap:track/channel[/branch]
and channel is in SNAPS_CHANNELS return True.
"""
origin = config('openstack-origin') or ""
if not origin.startswith('snap:'):
return False
_src = origin[5:]
if '/' in _src:
channel = _src.split('/')[1]
else:
# Handle snap:track with no channel
channel = 'stable'
return valid_snap_channel(channel)
def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
"""Generate a dictionary of snap install information from origin
@param snaps: List of snaps
@param src: String of openstack-origin or source of the form
snap:track/channel
@param mode: String classic, devmode or jailmode
@returns: Dictionary of snaps with channels and modes
"""
if not src.startswith('snap:'):
juju_log("Snap source is not a snap origin", 'WARN')
return {}
_src = src[5:]
channel = '--channel={}'.format(_src)
return {snap: {'channel': channel, 'mode': mode}
for snap in snaps}
def install_os_snaps(snaps, refresh=False):
"""Install OpenStack snaps from channel and with mode
@param snaps: Dictionary of snaps with channels and modes of the form:
{'snap_name': {'channel': 'snap_channel',
'mode': 'snap_mode'}}
Where channel is a snapstore channel and mode is --classic, --devmode
or --jailmode.
@param post_snap_install: Callback function to run after snaps have been
installed
"""
def _ensure_flag(flag):
if flag.startswith('--'):
return flag
return '--{}'.format(flag)
if refresh:
for snap in snaps.keys():
snap_refresh(snap,
_ensure_flag(snaps[snap]['channel']),
_ensure_flag(snaps[snap]['mode']))
else:
for snap in snaps.keys():
snap_install(snap,
_ensure_flag(snaps[snap]['channel']),
_ensure_flag(snaps[snap]['mode']))
def set_unit_upgrading():
"""Set the unit to a upgrading state in the local kv() store.
"""
with unitdata.HookData()() as t:
kv = t[0]
kv.set('unit-upgrading', True)
def clear_unit_upgrading():
"""Clear the unit from a upgrading state in the local kv() store
"""
with unitdata.HookData()() as t:
kv = t[0]
kv.set('unit-upgrading', False)
def is_unit_upgrading_set():
"""Return the state of the kv().get('unit-upgrading').
To help with units that don't have HookData() (testing)
if it excepts, return False
"""
try:
with unitdata.HookData()() as t:
kv = t[0]
# transform something truth-y into a Boolean.
return not(not(kv.get('unit-upgrading')))
except Exception:
return False
def series_upgrade_prepare(pause_unit_helper=None, configs=None):
""" Run common series upgrade prepare tasks.
:param pause_unit_helper: function: Function to pause unit
:param configs: OSConfigRenderer object: Configurations
:returns None:
"""
set_unit_upgrading()
if pause_unit_helper and configs:
if not is_unit_paused_set():
pause_unit_helper(configs)
def series_upgrade_complete(resume_unit_helper=None, configs=None):
""" Run common series upgrade complete tasks.
:param resume_unit_helper: function: Function to resume unit
:param configs: OSConfigRenderer object: Configurations
:returns None:
"""
clear_unit_paused()
clear_unit_upgrading()
if configs:
configs.write_all()
if resume_unit_helper:
resume_unit_helper(configs)
def is_db_initialised():
"""Check leader storage to see if database has been initialised.
:returns: Whether DB has been initialised
:rtype: bool
"""
db_initialised = None
if leader_get('db-initialised') is None:
juju_log(
'db-initialised key missing, assuming db is not initialised',
'DEBUG')
db_initialised = False
else:
db_initialised = bool_from_string(leader_get('db-initialised'))
juju_log('Database initialised: {}'.format(db_initialised), 'DEBUG')
return db_initialised
def set_db_initialised():
"""Add flag to leader storage to indicate database has been initialised.
"""
juju_log('Setting db-initialised to True', 'DEBUG')
leader_set({'db-initialised': True})
def is_db_maintenance_mode(relid=None):
"""Check relation data from notifications of db in maintenance mode.
:returns: Whether db has notified it is in maintenance mode.
:rtype: bool
"""
juju_log('Checking for maintenance notifications', 'DEBUG')
if relid:
r_ids = [relid]
else:
r_ids = relation_ids('shared-db')
rids_units = [(r, u) for r in r_ids for u in related_units(r)]
notifications = []
for r_id, unit in rids_units:
settings = relation_get(unit=unit, rid=r_id)
for key, value in settings.items():
if value and key in DB_MAINTENANCE_KEYS:
juju_log(
'Unit: {}, Key: {}, Value: {}'.format(unit, key, value),
'DEBUG')
try:
notifications.append(bool_from_string(value))
except ValueError:
juju_log(
'Could not discern bool from {}'.format(value),
'WARN')
pass
return True in notifications
@cached
def container_scoped_relations():
"""Get all the container scoped relations
:returns: List of relation names
:rtype: List
"""
md = metadata()
relations = []
for relation_type in ('provides', 'requires', 'peers'):
for relation in md.get(relation_type, []):
if md[relation_type][relation].get('scope') == 'container':
relations.append(relation)
return relations
def is_db_ready(use_current_context=False, rel_name=None):
"""Check remote database is ready to be used.
Database relations are expected to provide a list of 'allowed' units to
confirm that the database is ready for use by those units.
If db relation has provided this information and local unit is a member,
returns True otherwise False.
:param use_current_context: Whether to limit checks to current hook
context.
:type use_current_context: bool
:param rel_name: Name of relation to check
:type rel_name: string
:returns: Whether remote db is ready.
:rtype: bool
:raises: Exception
"""
key = 'allowed_units'
rel_name = rel_name or 'shared-db'
this_unit = local_unit()
if use_current_context:
if relation_id() in relation_ids(rel_name):
rids_units = [(None, None)]
else:
raise Exception("use_current_context=True but not in {} "
"rel hook contexts (currently in {})."
.format(rel_name, relation_id()))
else:
rids_units = [(r_id, u)
for r_id in relation_ids(rel_name)
for u in related_units(r_id)]
for rid, unit in rids_units:
allowed_units = relation_get(rid=rid, unit=unit, attribute=key)
if allowed_units and this_unit in allowed_units.split():
juju_log("This unit ({}) is in allowed unit list from {}".format(
this_unit,
unit), 'DEBUG')
return True
juju_log("This unit was not found in any allowed unit list")
return False
def is_expected_scale(peer_relation_name='cluster'):
"""Query juju goal-state to determine whether our peer- and dependency-
relations are at the expected scale.
Useful for deferring per unit per relation housekeeping work until we are
ready to complete it successfully and without unnecessary repetiton.
Always returns True if version of juju used does not support goal-state.
:param peer_relation_name: Name of peer relation
:type rel_name: string
:returns: True or False
:rtype: bool
"""
def _get_relation_id(rel_type):
return next((rid for rid in relation_ids(reltype=rel_type)), None)
Relation = namedtuple('Relation', 'rel_type rel_id')
peer_rid = _get_relation_id(peer_relation_name)
# Units with no peers should still have a peer relation.
if not peer_rid:
juju_log('Not at expected scale, no peer relation found', 'DEBUG')
return False
expected_relations = [
Relation(rel_type='shared-db', rel_id=_get_relation_id('shared-db'))]
if expect_ha():
expected_relations.append(
Relation(
rel_type='ha',
rel_id=_get_relation_id('ha')))
juju_log(
'Checking scale of {} relations'.format(
','.join([r.rel_type for r in expected_relations])),
'DEBUG')
try:
if (len(related_units(relid=peer_rid)) <
len(list(expected_peer_units()))):
return False
for rel in expected_relations:
if not rel.rel_id:
juju_log(
'Expected to find {} relation, but it is missing'.format(
rel.rel_type),
'DEBUG')
return False
# Goal state returns every unit even for container scoped
# relations but the charm only ever has a relation with
# the local unit.
if rel.rel_type in container_scoped_relations():
expected_count = 1
else:
expected_count = len(
list(expected_related_units(reltype=rel.rel_type)))
if len(related_units(relid=rel.rel_id)) < expected_count:
juju_log(
('Not at expected scale, not enough units on {} '
'relation'.format(rel.rel_type)),
'DEBUG')
return False
except NotImplementedError:
return True
juju_log('All checks have passed, unit is at expected scale', 'DEBUG')
return True
def get_peer_key(unit_name):
"""Get the peer key for this unit.
The peer key is the key a unit uses to publish its status down the peer
relation
:param unit_name: Name of unit
:type unit_name: string
:returns: Peer key for given unit
:rtype: string
"""
return 'unit-state-{}'.format(unit_name.replace('/', '-'))
UNIT_READY = 'READY'
UNIT_NOTREADY = 'NOTREADY'
UNIT_UNKNOWN = 'UNKNOWN'
UNIT_STATES = [UNIT_READY, UNIT_NOTREADY, UNIT_UNKNOWN]
def inform_peers_unit_state(state, relation_name='cluster'):
"""Inform peers of the state of this unit.
:param state: State of unit to publish
:type state: string
:param relation_name: Name of relation to publish state on
:type relation_name: string
"""
if state not in UNIT_STATES:
raise ValueError(
"Setting invalid state {} for unit".format(state))
this_unit = local_unit()
for r_id in relation_ids(relation_name):
juju_log('Telling peer behind relation {} that {} is {}'.format(
r_id, this_unit, state), 'DEBUG')
relation_set(relation_id=r_id,
relation_settings={
get_peer_key(this_unit): state})
def get_peers_unit_state(relation_name='cluster'):
"""Get the state of all peers.
:param relation_name: Name of relation to check peers on.
:type relation_name: string
:returns: Unit states keyed on unit name.
:rtype: dict
:raises: ValueError
"""
r_ids = relation_ids(relation_name)
rids_units = [(r, u) for r in r_ids for u in related_units(r)]
unit_states = {}
for r_id, unit in rids_units:
settings = relation_get(unit=unit, rid=r_id)
unit_states[unit] = settings.get(get_peer_key(unit), UNIT_UNKNOWN)
if unit_states[unit] not in UNIT_STATES:
raise ValueError(
"Unit in unknown state {}".format(unit_states[unit]))
return unit_states
def are_peers_ready(relation_name='cluster'):
"""Check if all peers are ready.
:param relation_name: Name of relation to check peers on.
:type relation_name: string
:returns: Whether all units are ready.
:rtype: bool
"""
unit_states = get_peers_unit_state(relation_name).values()
juju_log('{} peers are in the following states: {}'.format(
relation_name, unit_states), 'DEBUG')
return all(state == UNIT_READY for state in unit_states)
def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'):
"""Inform peers if this unit is ready.
The check function should return a tuple (state, message). A state
of 'READY' indicates the unit is READY.
:param check_unit_ready_func: Function to run to check readiness
:type check_unit_ready_func: function
:param relation_name: Name of relation to check peers on.
:type relation_name: string
"""
unit_ready, msg = check_unit_ready_func()
if unit_ready:
state = UNIT_READY
else:
state = UNIT_NOTREADY
juju_log('Telling peers this unit is: {}'.format(state), 'DEBUG')
inform_peers_unit_state(state, relation_name)
def check_api_unit_ready(check_db_ready=True):
"""Check if this unit is ready.
:param check_db_ready: Include checks of database readiness.
:type check_db_ready: bool
:returns: Whether unit state is ready and status message
:rtype: (bool, str)
"""
unit_state, msg = get_api_unit_status(check_db_ready=check_db_ready)
return unit_state == WORKLOAD_STATES.ACTIVE, msg
def get_api_unit_status(check_db_ready=True):
"""Return a workload status and message for this unit.
:param check_db_ready: Include checks of database readiness.
:type check_db_ready: bool
:returns: Workload state and message
:rtype: (bool, str)
"""
unit_state = WORKLOAD_STATES.ACTIVE
msg = 'Unit is ready'
if is_db_maintenance_mode():
unit_state = WORKLOAD_STATES.MAINTENANCE
msg = 'Database in maintenance mode.'
elif is_unit_paused_set():
unit_state = WORKLOAD_STATES.BLOCKED
msg = 'Unit paused.'
elif check_db_ready and not is_db_ready():
unit_state = WORKLOAD_STATES.WAITING
msg = 'Allowed_units list provided but this unit not present'
elif not is_db_initialised():
unit_state = WORKLOAD_STATES.WAITING
msg = 'Database not initialised'
elif not is_expected_scale():
unit_state = WORKLOAD_STATES.WAITING
msg = 'Charm and its dependencies not yet at expected scale'
juju_log(msg, 'DEBUG')
return unit_state, msg
def check_api_application_ready():
"""Check if this application is ready.
:returns: Whether application state is ready and status message
:rtype: (bool, str)
"""
app_state, msg = get_api_application_status()
return app_state == WORKLOAD_STATES.ACTIVE, msg
def get_api_application_status():
"""Return a workload status and message for this application.
:returns: Workload state and message
:rtype: (bool, str)
"""
app_state, msg = get_api_unit_status()
if app_state == WORKLOAD_STATES.ACTIVE:
if are_peers_ready():
msg = 'Application Ready'
else:
app_state = WORKLOAD_STATES.WAITING
msg = 'Some units are not ready'
juju_log(msg, 'DEBUG')
return app_state, msg
def sequence_status_check_functions(*functions):
"""Sequence the functions passed so that they all get a chance to run as
the charm status check functions.
:param *functions: a list of functions that return (state, message)
:type *functions: List[Callable[[OSConfigRender], (str, str)]]
:returns: the Callable that takes configs and returns (state, message)
:rtype: Callable[[OSConfigRender], (str, str)]
"""
def _inner_sequenced_functions(configs):
state, message = 'unknown', ''
for f in functions:
new_state, new_message = f(configs)
state = workload_state_compare(state, new_state)
if message:
message = "{}, {}".format(message, new_message)
else:
message = new_message
return state, message
return _inner_sequenced_functions
| 33.8584
| 89
| 0.62433
|
b95f12d9317dd10382e698ff7e0e4cebb3ad9cfe
| 5,138
|
py
|
Python
|
great_expectations/expectations/metrics/column_map_metrics/column_values_decreasing.py
|
cbonilla20/great_expectations
|
e4f8c70ce1b137133e19eb73589fb1b1f422a380
|
[
"Apache-2.0"
] | 1
|
2021-10-02T06:59:48.000Z
|
2021-10-02T06:59:48.000Z
|
great_expectations/expectations/metrics/column_map_metrics/column_values_decreasing.py
|
chsigjan/great_expectations
|
c5a587a3b1bc5d72d433950aaceb4d09f199690a
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/expectations/metrics/column_map_metrics/column_values_decreasing.py
|
chsigjan/great_expectations
|
c5a587a3b1bc5d72d433950aaceb4d09f199690a
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, Dict, Optional, Tuple
from great_expectations.core import ExpectationConfiguration
from great_expectations.execution_engine import (
ExecutionEngine,
PandasExecutionEngine,
SparkDFExecutionEngine,
)
from great_expectations.execution_engine.execution_engine import (
MetricDomainTypes,
MetricPartialFunctionTypes,
)
from great_expectations.expectations.metrics.import_manager import F, Window, sparktypes
from great_expectations.expectations.metrics.map_metric_provider import (
ColumnMapMetricProvider,
column_condition_partial,
)
from great_expectations.expectations.metrics.metric_provider import (
metric_partial,
metric_value,
)
from great_expectations.validator.metric_configuration import MetricConfiguration
class ColumnValuesDecreasing(ColumnMapMetricProvider):
condition_metric_name = "column_values.decreasing"
condition_value_keys = ("strictly",)
default_kwarg_values = {"strictly": False}
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, strictly, **kwargs):
series_diff = column.diff()
# The first element is null, so it gets a bye and is always treated as True
series_diff[series_diff.isnull()] = -1
if strictly:
return series_diff < 0
else:
return series_diff <= 0
@metric_partial(
engine=SparkDFExecutionEngine,
partial_fn_type=MetricPartialFunctionTypes.WINDOW_CONDITION_FN,
domain_type=MetricDomainTypes.COLUMN,
)
def _spark(
cls,
execution_engine: SparkDFExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
# check if column is any type that could have na (numeric types)
column_name = metric_domain_kwargs["column"]
table_columns = metrics["table.column_types"]
column_metadata = [col for col in table_columns if col["name"] == column_name][
0
]
if isinstance(
column_metadata["type"],
(
sparktypes.LongType,
sparktypes.DoubleType,
sparktypes.IntegerType,
),
):
# if column is any type that could have NA values, remove them (not filtered by .isNotNull())
compute_domain_kwargs = execution_engine.add_column_row_condition(
metric_domain_kwargs,
filter_null=cls.filter_column_isnull,
filter_nan=True,
)
else:
compute_domain_kwargs = metric_domain_kwargs
(
df,
compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
compute_domain_kwargs, MetricDomainTypes.COLUMN
)
# NOTE: 20201105 - parse_strings_as_datetimes is not supported here;
# instead detect types naturally
column = F.col(column_name)
if isinstance(
column_metadata["type"], (sparktypes.TimestampType, sparktypes.DateType)
):
diff = F.datediff(
column, F.lag(column).over(Window.orderBy(F.lit("constant")))
)
else:
diff = column - F.lag(column).over(Window.orderBy(F.lit("constant")))
diff = F.when(diff.isNull(), -1).otherwise(diff)
# NOTE: because in spark we are implementing the window function directly,
# we have to return the *unexpected* condition
if metric_value_kwargs["strictly"]:
return (
F.when(diff >= 0, F.lit(True)).otherwise(F.lit(False)),
compute_domain_kwargs,
accessor_domain_kwargs,
)
# If we expect values to be flat or decreasing then unexpected values are those
# that are decreasing
else:
return (
F.when(diff > 0, F.lit(True)).otherwise(F.lit(False)),
compute_domain_kwargs,
accessor_domain_kwargs,
)
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
table_domain_kwargs: dict = {
k: v for k, v in metric.metric_domain_kwargs.items() if k != "column"
}
dependencies["table.column_types"] = MetricConfiguration(
metric_name="table.column_types",
metric_domain_kwargs=table_domain_kwargs,
metric_value_kwargs={
"include_nested": True,
},
metric_dependencies=None,
)
return dependencies
| 36.439716
| 105
| 0.640327
|
1b77e0737db130c9411e78551af340eb737d78a3
| 5,819
|
py
|
Python
|
src/Create.py
|
ytyaru/GitHub.Database.Create.Auto.Add.Repositories.201703230742
|
cfe68f881ca8d0eb85bb84aff98f479b4d1cf6ef
|
[
"CC0-1.0"
] | 1
|
2017-06-08T10:53:31.000Z
|
2017-06-08T10:53:31.000Z
|
src/Create.py
|
ytyaru/GitHub.Database.Create.Auto.Add.Repositories.201703230742
|
cfe68f881ca8d0eb85bb84aff98f479b4d1cf6ef
|
[
"CC0-1.0"
] | null | null | null |
src/Create.py
|
ytyaru/GitHub.Database.Create.Auto.Add.Repositories.201703230742
|
cfe68f881ca8d0eb85bb84aff98f479b4d1cf6ef
|
[
"CC0-1.0"
] | null | null | null |
#!python3
#encoding:utf-8
import subprocess
import shlex
import shutil
import os.path
import getpass
import language.insert.Main
import gnu_license.create.Main
import gnu_license.insert.main
import license.insert.Main
import other_repo.insert.Main
import account.Main
import repo.insert.Main
class InitializeMasterDbCreator:
def __init__(self, db_dir_path):
self.db_dir_path = db_dir_path
self.db_files = [
# 'GitHub.Languages.sqlite3': CreateLanguage,
# 'GitHub.Licenses.sqlite3': self.CreateLicenses,
{'FileName': 'GitHub.Languages.sqlite3', 'Creator': self.__CreateLanguages, 'Inserter': self.__InsertLanguages},
{'FileName': 'GNU.Licenses.sqlite3', 'Creator': self.__CreateGnuLicenses, 'Inserter': self.__InsertGnuLicenses},
{'FileName': 'GitHub.Licenses.sqlite3', 'Creator': self.__CreateLicenses, 'Inserter': self.__InsertLicenses},
{'FileName': 'GitHub.Repositories.__other__.sqlite3', 'Creator': self.__CreateOtherRepo, 'Inserter': self.__InsertOtherRepo},
{'FileName': 'GitHub.Accounts.sqlite3', 'Creator': self.__CreateAccounts, 'Inserter': self.__InsertAccounts},
{'FileName': 'GitHub.Repositories.{user}.sqlite3', 'Creator': self.__CreateRepo, 'Inserter': self.__InsertRepo},
# 'GitHub.Accounts.sqlite3': CreateAccounts,
# 'GitHub.Repositories.{user}.sqlite3': CreateRepositories,
# 'GitHub.Repositories.__other__.sqlite3': CreateOtherRepositories,
# 'GNU.Licenses.sqlite3': CreateGnuLicenses,
# 'GitHub.Api.sqlite3': CreateApi
]
self.__Setup()
def Run(self):
if not(os.path.isdir(self.db_dir_path)):
print('DBディレクトリを作る----------------')
os.mkdir(self.db_dir_path)
for db in self.db_files:
db_path = os.path.join(self.db_dir_path, db['FileName'])
if 'GitHub.Repositories.{user}.sqlite3' == db['FileName']:
if not(os.path.isfile(db_path)):
db['Creator'](db_path)
db_path_new = db_path.replace("{user}", self.github_user_name)
if not(os.path.isfile(db_path_new)):
shutil.copyfile(db_path, db_path_new)
db['Inserter'](db_path_new)
else:
if not(os.path.isfile(db_path)):
print('DBファイルを作る: {0} ----------------'.format(db_path))
db['Creator'](db_path)
db['Inserter'](db_path)
def __CreateRepo(self, db_path):
subprocess.call(shlex.split("bash ./repo/create/Create.sh \"{0}\"".format(db_path)))
def __InsertRepo(self, db_path):
m = repo.insert.Main.Main(self.github_user_name, self.path_db_account, db_path, self.path_db_license, self.path_db_api)
m.Initialize()
def __CreateAccounts(self, db_path):
a = account.Main.Main(db_path)
a.Run()
def __InsertAccounts(self, db_path):
pass
def __CreateOtherRepo(self, db_path):
subprocess.call(shlex.split("bash ./other_repo/create/Create.sh \"{0}\"".format(db_path)))
def __InsertOtherRepo(self, db_path):
path_db_license = os.path.join(self.db_dir_path, "GitHub.Licenses.sqlite3")
print(path_db_license)
main = other_repo.insert.Main.Main(self.github_user_name, self.path_db_account, db_path, path_db_license)
main.Initialize()
def __CreateLanguages(self, db_path):
subprocess.call(shlex.split("bash ./language/create/Create.sh \"{0}\"".format(db_path)))
def __InsertLanguages(self, db_path):
creator_language = language.insert.Main.Main(db_path)
creator_language.Run()
def __CreateGnuLicenses(self, db_path):
creator_language = gnu_license.create.Main.Main(db_path)
creator_language.Run()
def __InsertGnuLicenses(self, db_path):
creator_gnu_license = gnu_license.insert.main.GnuSite(db_path)
creator_gnu_license.GetAll()
def __CreateLicenses(self, db_path):
subprocess.call(shlex.split("bash ./license/create/Create.sh \"{0}\"".format(db_path)))
def __InsertLicenses(self, db_path):
# creator_license = self.__LicenseCreator(db_path)
creator_license = license.insert.Main.Main(self.github_user_name, self.path_db_account, self.path_db_repo, db_path)
creator_license.Initialize()
"""
def __LicenseCreator(self, db_path):
github_user_name = 'ytyaru'
os_user_name = getpass.getuser()
device_name = '85f78c06-a96e-4020-ac36-9419b7e456db'
path_db_base = 'mint/root/db/Account/GitHub'
path_db_account = '/media/{0}/{1}/{2}/private/v0/GitHub.Accounts.sqlite3'.format(os_user_name, device_name, path_db_base)
path_db_repo = '/media/{0}/{1}/{2}/public/v0/GitHub.Repositories.{3}.sqlite3'.format(os_user_name, device_name, path_db_base, github_user_name)
return license.insert.Main.Main(github_user_name, path_db_account, path_db_repo, db_path)
"""
def __Setup(self):
self.github_user_name = 'ytyaru'
os_user_name = getpass.getuser()
device_name = '85f78c06-a96e-4020-ac36-9419b7e456db'
path_db_base = 'mint/root/db/Account/GitHub'
self.path_db_account = '/media/{0}/{1}/{2}/private/v0/GitHub.Accounts.sqlite3'.format(os_user_name, device_name, path_db_base)
self.path_db_repo = '/media/{0}/{1}/{2}/public/v0/GitHub.Repositories.{3}.sqlite3'.format(os_user_name, device_name, path_db_base, self.github_user_name)
self.path_db_license = '/media/{0}/{1}/{2}/public/v0/GitHub.Licenses.sqlite3'.format(os_user_name, device_name, path_db_base)
self.path_db_api = '/media/{0}/{1}/{2}/public/v0/GitHub.Apis.sqlite3'.format(os_user_name, device_name, path_db_base)
| 48.89916
| 161
| 0.668672
|
cc9e93a1f84396d55287e52667e29edba708d8c3
| 45,857
|
py
|
Python
|
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
|
Arenadata/ambari
|
4628267441121779113d98936dcdf5d9be60553c
|
[
"Apache-2.0"
] | 5
|
2017-07-20T11:15:10.000Z
|
2020-04-16T15:42:55.000Z
|
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
|
Arenadata/ambari
|
4628267441121779113d98936dcdf5d9be60553c
|
[
"Apache-2.0"
] | 8
|
2020-06-18T17:31:19.000Z
|
2022-03-02T08:32:03.000Z
|
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
|
Arenadata/ambari
|
4628267441121779113d98936dcdf5d9be60553c
|
[
"Apache-2.0"
] | 12
|
2017-05-17T09:48:01.000Z
|
2021-08-05T19:01:25.000Z
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import status_params
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import os
from urlparse import urlparse
from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_commons.os_check import OSCheck
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.copy_tarball import STACK_ROOT_PATTERN, STACK_NAME_PATTERN, STACK_VERSION_PATTERN
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.get_port_from_url import get_port_from_url
from resource_management.libraries.functions import upgrade_summary
from resource_management.libraries.functions.expect import expect
from resource_management.libraries import functions
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
from resource_management.libraries.functions.get_architecture import get_architecture
from resource_management.libraries.functions.version import get_major_version
from resource_management.core.utils import PasswordString
from resource_management.core.exceptions import Fail
from resource_management.core.shell import checked_call
from ambari_commons.credential_store_helper import get_password_from_credential_store
# Default log4j version; put config files under /etc/hive/conf
log4j_version = '1'
# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
architecture = get_architecture()
sudo = AMBARI_SUDO_BINARY
credential_store_enabled = False
if 'credentialStoreEnabled' in config:
credential_store_enabled = config['credentialStoreEnabled']
stack_root = status_params.stack_root
stack_name = status_params.stack_name
stack_name_uppercase = stack_name.upper()
agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
# Needed since this is an Atlas Hook service.
cluster_name = config['clusterName']
# node hostname
hostname = config["hostname"]
# This is expected to be of the form #.#.#.#
stack_version_unformatted = status_params.stack_version_unformatted
stack_version_formatted_major = status_params.stack_version_formatted_major
# this is not available on INSTALL action because <stack-selector-tool> is not available
stack_version_formatted = functions.get_stack_version('hive-server2')
major_stack_version = get_major_version(stack_version_formatted_major)
# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
# It cannot be used during the initial Cluser Install because the version is not yet known.
version = default("/commandParams/version", None)
# When downgrading the 'version' is pointing to the downgrade-target version
# downgrade_from_version provides the source-version the downgrade is happening from
downgrade_from_version = upgrade_summary.get_downgrade_from_version("HIVE")
# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)
# Upgrade direction
upgrade_direction = default("/commandParams/upgrade_direction", None)
stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_hive_jdbc_url_change = check_stack_feature(StackFeature.RANGER_HIVE_PLUGIN_JDBC_URL, version_for_stack_feature_checks)
stack_supports_atlas_hook_for_hive_interactive = check_stack_feature(StackFeature.HIVE_INTERACTIVE_ATLAS_HOOK_REQUIRED, version_for_stack_feature_checks)
stack_supports_hive_interactive_ga = check_stack_feature(StackFeature.HIVE_INTERACTIVE_GA_SUPPORT, version_for_stack_feature_checks)
# component ROLE directory (like hive-metastore or hive-server2-hive2)
component_directory = status_params.component_directory
component_directory_interactive = status_params.component_directory_interactive
# used to render hadoop configurations, such as writing out its own mapreduce2 configs
hadoop_home = stack_select.get_hadoop_dir("home")
hadoop_lib_home = stack_select.get_hadoop_dir("lib")
hive_bin = format('{stack_root}/current/{component_directory}/bin')
hive_cmd = os.path.join(hive_bin, "hive")
hive_schematool_ver_bin = format('{stack_root}/{version}/hive/bin')
hive_schematool_bin = format('{stack_root}/current/{component_directory}/bin')
hive_lib = format('{stack_root}/current/{component_directory}/lib')
hive_version_lib = format('{stack_root}/{version}/hive/lib')
hive_var_lib = '/var/lib/hive'
hive_user_home_dir = "/home/hive"
# starting on stacks where HSI is supported, we need to begin using the 'hive2' schematool
hive_server2_hive2_dir = None
hive_server2_hive2_lib = None
if check_stack_feature(StackFeature.HIVE_SERVER_INTERACTIVE, version_for_stack_feature_checks):
# the name of the hiveserver2-hive2 component
hive_server2_hive2_component = status_params.SERVER_ROLE_DIRECTORY_MAP["HIVE_SERVER_INTERACTIVE"]
# when using the version, we can just specify the component as "hive2"
hive_schematool_ver_bin = format('{stack_root}/{version}/hive2/bin')
# use the schematool which ships with hive2
hive_schematool_bin = format('{stack_root}/current/{hive_server2_hive2_component}/bin')
# <stack-root>/<version>/hive2 (as opposed to <stack-root>/<version>/hive)
hive_server2_hive2_dir = format('{stack_root}/current/{hive_server2_hive2_component}')
# <stack-root>/<version>/hive2 (as opposed to <stack-root>/<version>/hive)
hive_server2_hive2_version_dir = format('{stack_root}/{version}/hive2')
# <stack-root>/current/hive-server2-hive2/lib -> <stack-root>/<version>/hive2/lib
hive_server2_hive2_lib = format('{hive_server2_hive2_dir}/lib')
# <stack-root>/<version>/hive2/lib
hive_server2_hive2_version_lib = format('{hive_server2_hive2_version_dir}/lib')
hive_interactive_bin = format('{stack_root}/current/{component_directory_interactive}/bin')
hive_interactive_lib = format('{stack_root}/current/{component_directory_interactive}/lib')
# Heap dump related
heap_dump_enabled = default('/configurations/hive-env/enable_heap_dump', None)
heap_dump_opts = "" # Empty if 'heap_dump_enabled' is False.
if heap_dump_enabled:
heap_dump_path = default('/configurations/hive-env/heap_dump_location', "/tmp")
heap_dump_opts = " -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath="+heap_dump_path
# Hive Interactive related paths
hive_interactive_var_lib = '/var/lib/hive2'
# These tar folders were used in previous stack versions, e.g., HDP 2.1
hadoop_streaming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
pig_tar_file = format('/usr/share/{stack_name_uppercase}-webhcat/pig.tar.gz')
hive_tar_file = format('/usr/share/{stack_name_uppercase}-webhcat/hive.tar.gz')
sqoop_tar_file = format('/usr/share/{stack_name_uppercase}-webhcat/sqoop*.tar.gz')
hive_metastore_site_supported = False
hive_etc_dir_prefix = "/etc/hive"
hive_interactive_etc_dir_prefix = "/etc/hive2"
limits_conf_dir = "/etc/security/limits.d"
hive_user_nofile_limit = default("/configurations/hive-env/hive_user_nofile_limit", "32000")
hive_user_nproc_limit = default("/configurations/hive-env/hive_user_nproc_limit", "16000")
# use the directories from status_params as they are already calculated for
# the correct stack version
hadoop_conf_dir = status_params.hadoop_conf_dir
hadoop_bin_dir = status_params.hadoop_bin_dir
webhcat_conf_dir = status_params.webhcat_conf_dir
hive_conf_dir = status_params.hive_conf_dir
hive_home_dir = status_params.hive_home_dir
hive_config_dir = status_params.hive_config_dir
hive_client_conf_dir = status_params.hive_client_conf_dir
hive_server_conf_dir = status_params.hive_server_conf_dir
hcat_conf_dir = '/etc/hive-hcatalog/conf'
config_dir = '/etc/hive-webhcat/conf'
# there are no client versions of these, use server versions directly
hcat_lib = format('{stack_root}/current/hive-webhcat/share/hcatalog')
webhcat_bin_dir = format('{stack_root}/current/hive-webhcat/sbin')
# --- Tarballs ---
# DON'T CHANGE THESE VARIABLE NAMES
# Values don't change from those in copy_tarball.py
webhcat_apps_dir = "/apps/webhcat"
hive_tar_source = "{0}/{1}/hive/hive.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
pig_tar_source = "{0}/{1}/pig/pig.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
hive_tar_dest_file = "/{0}/apps/{1}/hive/hive.tar.gz".format(STACK_NAME_PATTERN,STACK_VERSION_PATTERN)
pig_tar_dest_file = "/{0}/apps/{1}/pig/pig.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)
hadoop_streaming_tar_source = "{0}/{1}/hadoop-mapreduce/hadoop-streaming.jar".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
sqoop_tar_source = "{0}/{1}/sqoop/sqoop.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
hadoop_streaming_tar_dest_dir = "/{0}/apps/{1}/mapreduce/".format(STACK_NAME_PATTERN,STACK_VERSION_PATTERN)
sqoop_tar_dest_dir = "/{0}/apps/{1}/sqoop/".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)
tarballs_mode = 0444
purge_tables = "false"
# Starting from stack version for feature hive_purge_table drop should be executed with purge
if check_stack_feature(StackFeature.HIVE_PURGE_TABLE, version_for_stack_feature_checks):
purge_tables = 'true'
if check_stack_feature(StackFeature.HIVE_WEBHCAT_SPECIFIC_CONFIGS, version_for_stack_feature_checks):
# this is NOT a typo. Configs for hcatalog/webhcat point to a
# specific directory which is NOT called 'conf'
hcat_conf_dir = format('{stack_root}/current/hive-webhcat/etc/hcatalog')
config_dir = format('{stack_root}/current/hive-webhcat/etc/webhcat')
if check_stack_feature(StackFeature.HIVE_METASTORE_SITE_SUPPORT, version_for_stack_feature_checks):
hive_metastore_site_supported = True
execute_path = os.environ['PATH'] + os.pathsep + hive_bin + os.pathsep + hadoop_bin_dir
hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
jdk_location = config['hostLevelParams']['jdk_location']
if credential_store_enabled:
if 'hadoop.security.credential.provider.path' in config['configurations']['hive-site']:
cs_lib_path = config['configurations']['hive-site']['credentialStoreClassPath']
java_home = config['hostLevelParams']['java_home']
alias = 'javax.jdo.option.ConnectionPassword'
provider_path = config['configurations']['hive-site']['hadoop.security.credential.provider.path']
hive_metastore_user_passwd = PasswordString(get_password_from_credential_store(alias, provider_path, cs_lib_path, java_home, jdk_location))
else:
raise Exception("hadoop.security.credential.provider.path property should be set")
else:
hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
hive_metastore_user_passwd = unicode(hive_metastore_user_passwd) if not is_empty(hive_metastore_user_passwd) else hive_metastore_user_passwd
hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
#HACK Temporarily use dbType=azuredb while invoking schematool
if hive_metastore_db_type == "mssql":
hive_metastore_db_type = "azuredb"
#users
hive_user = config['configurations']['hive-env']['hive_user']
# is it a restart command
is_restart_command = False
if 'roleCommand' in config and 'CUSTOM_COMMAND' == config['roleCommand']:
if 'custom_command' in config['hostLevelParams'] and 'RESTART' == config['hostLevelParams']['custom_command']:
is_restart_command = True
#JDBC driver jar name
hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
java_share_dir = '/usr/share/java'
hive_database_name = config['configurations']['hive-env']['hive_database_name']
hive_database = config['configurations']['hive-env']['hive_database']
hive_use_existing_db = hive_database.startswith('Existing')
default_connectors_map = { "com.microsoft.sqlserver.jdbc.SQLServerDriver":"sqljdbc4.jar",
"com.mysql.jdbc.Driver":"mysql-connector-java.jar",
"org.postgresql.Driver":"postgresql-jdbc.jar",
"oracle.jdbc.driver.OracleDriver":"ojdbc.jar",
"sap.jdbc4.sqlanywhere.IDriver":"sajdbc4.jar"}
# NOT SURE THAT IT'S A GOOD IDEA TO USE PATH TO CLASS IN DRIVER, MAYBE IT WILL BE BETTER TO USE DB TYPE.
# BECAUSE PATH TO CLASSES COULD BE CHANGED
sqla_db_used = False
hive_previous_jdbc_jar_name = None
if hive_jdbc_driver == "com.microsoft.sqlserver.jdbc.SQLServerDriver":
jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
elif hive_jdbc_driver == "com.mysql.jdbc.Driver":
jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
elif hive_jdbc_driver == "org.postgresql.Driver":
jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
elif hive_jdbc_driver == "sap.jdbc4.sqlanywhere.IDriver":
jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
sqla_db_used = True
else: raise Fail(format("JDBC driver '{hive_jdbc_driver}' not supported."))
default_mysql_jar_name = "mysql-connector-java.jar"
default_mysql_target = format("{hive_lib}/{default_mysql_jar_name}")
hive_previous_jdbc_jar = format("{hive_lib}/{hive_previous_jdbc_jar_name}")
if not hive_use_existing_db:
jdbc_jar_name = default_mysql_jar_name
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
hive_jdbc_target = format("{hive_lib}/{jdbc_jar_name}")
hive2_jdbc_target = None
if hive_server2_hive2_dir:
hive2_jdbc_target = format("{hive_server2_hive2_lib}/{jdbc_jar_name}")
# during upgrade / downgrade, use the specific version to copy the JDBC JAR to
if upgrade_direction:
hive_jdbc_target = format("{hive_version_lib}/{jdbc_jar_name}")
hive2_jdbc_target = format("{hive_server2_hive2_version_lib}/{jdbc_jar_name}") if hive2_jdbc_target is not None else None
hive2_previous_jdbc_jar = format("{hive_server2_hive2_lib}/{hive_previous_jdbc_jar_name}") if hive_server2_hive2_lib is not None else None
driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
# normally, the JDBC driver would be referenced by <stack-root>/current/.../foo.jar
# but in RU if <stack-selector-tool> is called and the restart fails, then this means that current pointer
# is now pointing to the upgraded version location; that's bad for the cp command
version_for_source_jdbc_file = upgrade_summary.get_source_version(default_version = version_for_stack_feature_checks)
source_jdbc_file = format("{stack_root}/{version_for_source_jdbc_file}/hive/lib/{jdbc_jar_name}")
check_db_connection_jar_name = "DBConnectionVerification.jar"
check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
hive_jdbc_drivers_list = ["com.microsoft.sqlserver.jdbc.SQLServerDriver","com.mysql.jdbc.Driver",
"org.postgresql.Driver","oracle.jdbc.driver.OracleDriver","sap.jdbc4.sqlanywhere.IDriver"]
prepackaged_jdbc_name = "ojdbc6.jar"
prepackaged_ojdbc_symlink = format("{hive_lib}/{prepackaged_jdbc_name}")
templeton_port = config['configurations']['webhcat-site']['templeton.port']
#constants for type2 jdbc
jdbc_libs_dir = format("{hive_lib}/native/lib64")
lib_dir_available = os.path.exists(jdbc_libs_dir)
if sqla_db_used:
jars_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/java/*")
libs_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/native/lib64/*")
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
libs_in_hive_lib = format("{jdbc_libs_dir}/*")
# Start, Common Hosts and Ports
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
hive_metastore_hosts = default('/clusterHostInfo/hive_metastore_host', [])
hive_metastore_host = hive_metastore_hosts[0] if len(hive_metastore_hosts) > 0 else None
hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris'])
hive_server_hosts = default("/clusterHostInfo/hive_server_host", [])
hive_server_host = hive_server_hosts[0] if len(hive_server_hosts) > 0 else None
hive_server_interactive_hosts = default('/clusterHostInfo/hive_server_interactive_hosts', [])
hive_server_interactive_host = hive_server_interactive_hosts[0] if len(hive_server_interactive_hosts) > 0 else None
hive_server_interactive_ha = True if len(hive_server_interactive_hosts) > 1 else False
# End, Common Hosts and Ports
hive_transport_mode = config['configurations']['hive-site']['hive.server2.transport.mode']
if hive_transport_mode.lower() == "http":
hive_server_port = config['configurations']['hive-site']['hive.server2.thrift.http.port']
else:
hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
hive_http_endpoint = default('/configurations/hive-site/hive.server2.thrift.http.path', "cliservice")
hive_server_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal']
hive_server2_authentication = config['configurations']['hive-site']['hive.server2.authentication']
# ssl options
hive_ssl = default('/configurations/hive-site/hive.server2.use.SSL', False)
hive_ssl_keystore_path = default('/configurations/hive-site/hive.server2.keystore.path', None)
hive_interactive_ssl_keystore_path = default('/configurations/hive-interactive-site/hive.server2.keystore.path', None)
hive_ssl_keystore_password = default('/configurations/hive-site/hive.server2.keystore.password', None)
hive_interactive_ssl_keystore_password = default('/configurations/hive-interactive-site/hive.server2.keystore.password', None)
smokeuser = config['configurations']['cluster-env']['smokeuser']
smoke_test_sql = format("{tmp_dir}/hiveserver2.sql")
smoke_test_path = format("{tmp_dir}/hiveserver2Smoke.sh")
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
fs_root = config['configurations']['core-site']['fs.defaultFS']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
hive_metastore_keytab_path = config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
hive_metastore_principal = config['configurations']['hive-site']['hive.metastore.kerberos.principal']
hive_server2_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
#hive_env
hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
hive_pid_dir = status_params.hive_pid_dir
hive_pid = status_params.hive_pid
hive_interactive_pid = status_params.hive_interactive_pid
#Default conf dir for client
hive_conf_dirs_list = [hive_client_conf_dir]
# These are the folders to which the configs will be written to.
ranger_hive_component = status_params.SERVER_ROLE_DIRECTORY_MAP['HIVE_SERVER']
if status_params.role == "HIVE_METASTORE" and hive_metastore_hosts is not None and hostname in hive_metastore_hosts:
hive_conf_dirs_list.append(hive_server_conf_dir)
elif status_params.role == "HIVE_SERVER" and hive_server_hosts is not None and hostname in hive_server_hosts:
hive_conf_dirs_list.append(hive_server_conf_dir)
elif status_params.role == "HIVE_SERVER_INTERACTIVE" and hive_server_interactive_hosts is not None and hostname in hive_server_interactive_hosts:
hive_conf_dirs_list.append(status_params.hive_server_interactive_conf_dir)
ranger_hive_component = status_params.SERVER_ROLE_DIRECTORY_MAP['HIVE_SERVER_INTERACTIVE']
# log4j version is 2 for hive2; put config files under /etc/hive2/conf
if status_params.role == "HIVE_SERVER_INTERACTIVE":
log4j_version = '2'
#Starting hiveserver2
start_hiveserver2_script = 'startHiveserver2.sh.j2'
##Starting metastore
start_metastore_script = 'startMetastore.sh'
hive_metastore_pid = status_params.hive_metastore_pid
# Hive Server Interactive
slider_am_container_mb = default("/configurations/hive-interactive-env/slider_am_container_mb", 341)
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
yarn_user = config['configurations']['yarn-env']['yarn_user']
user_group = config['configurations']['cluster-env']['user_group']
artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
# Need this for yarn.nodemanager.recovery.dir in yarn-site
yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
target_hive_interactive = format("{hive_interactive_lib}/{jdbc_jar_name}")
hive_intaractive_previous_jdbc_jar = format("{hive_interactive_lib}/{hive_previous_jdbc_jar_name}")
jars_in_hive_lib = format("{hive_lib}/*.jar")
start_hiveserver2_path = format("{tmp_dir}/start_hiveserver2_script")
start_metastore_path = format("{tmp_dir}/start_metastore_script")
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE", "HIVE_SERVER_INTERACTIVE"]:
if check_stack_feature(StackFeature.HIVE_ENV_HEAPSIZE, version_for_stack_feature_checks):
hive_heapsize = config['configurations']['hive-env']['hive.heapsize']
else:
hive_heapsize = config['configurations']['hive-site']['hive.heapsize']
else:
hive_heapsize = config['configurations']['hive-env']['hive.client.heapsize']
hive_metastore_heapsize = config['configurations']['hive-env']['hive.metastore.heapsize']
java64_home = config['hostLevelParams']['java_home']
java_exec = format("{java64_home}/bin/java")
java_version = expect("/hostLevelParams/java_version", int)
##### MYSQL
db_name = config['configurations']['hive-env']['hive_database_name']
mysql_group = 'mysql'
mysql_host = config['clusterHostInfo']['hive_mysql_host']
mysql_adduser_path = format("{tmp_dir}/addMysqlUser.sh")
mysql_deluser_path = format("{tmp_dir}/removeMysqlUser.sh")
#### Metastore
# initialize the schema only if not in an upgrade/downgrade
init_metastore_schema = upgrade_direction is None
########## HCAT
hcat_dbroot = hcat_lib
hcat_user = config['configurations']['hive-env']['hcat_user']
webhcat_user = config['configurations']['hive-env']['webhcat_user']
hcat_pid_dir = status_params.hcat_pid_dir
hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
hcat_env_sh_template = config['configurations']['hcat-env']['content']
#Hive log4j properties
webhcat_log_maxfilesize = default("/configurations/webhcat-log4j/webhcat_log_maxfilesize", 256)
webhcat_log_maxbackupindex = default("/configurations/webhcat-log4j/webhcat_log_maxbackupindex", 20)
hive_log_maxfilesize = default("/configurations/hive-log4j/hive_log_maxfilesize", 256)
hive_log_maxbackupindex = default("/configurations/hive-log4j/hive_log_maxbackupindex", 30)
hive_log_level = default("/configurations/hive-env/hive.log.level", "INFO")
#hive-log4j.properties.template
if (('hive-log4j' in config['configurations']) and ('content' in config['configurations']['hive-log4j'])):
log4j_props = config['configurations']['hive-log4j']['content']
else:
log4j_props = None
#webhcat-log4j.properties.template
if (('webhcat-log4j' in config['configurations']) and ('content' in config['configurations']['webhcat-log4j'])):
log4j_webhcat_props = config['configurations']['webhcat-log4j']['content']
else:
log4j_webhcat_props = None
#hive-exec-log4j.properties.template
if (('hive-exec-log4j' in config['configurations']) and ('content' in config['configurations']['hive-exec-log4j'])):
log4j_exec_props = config['configurations']['hive-exec-log4j']['content']
else:
log4j_exec_props = None
# parquet-logging.properties
parquet_logging_properties = None
if 'parquet-logging' in config['configurations']:
parquet_logging_properties = config['configurations']['parquet-logging']['content']
process_name = status_params.process_name
hive_env_sh_template = config['configurations']['hive-env']['content']
hive_hdfs_user_dir = format("/user/{hive_user}")
hive_hdfs_user_mode = 0755
#Parameter for custom warehouse directory permissions. Permissions are in octal format and need to be converted to decimal
hive_apps_whs_mode = int(default('/configurations/hive-site/custom.hive.warehouse.mode', '0777'), 8)
hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
whs_dir_protocol = urlparse(hive_apps_whs_dir).scheme
hive_exec_scratchdir = config['configurations']['hive-site']["hive.exec.scratchdir"]
#for create_hdfs_directory
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST", hostname)
# Tez-related properties
tez_user = config['configurations']['tez-env']['tez_user']
# Tez jars
tez_local_api_jars = '/usr/lib/tez/tez*.jar'
tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
# Tez libraries
tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
if OSCheck.is_ubuntu_family():
mysql_configname = '/etc/mysql/my.cnf'
else:
mysql_configname = '/etc/my.cnf'
mysql_user = 'mysql'
# Hive security
hive_authorization_enabled = config['configurations']['hive-site']['hive.security.authorization.enabled']
mysql_jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
hive_site_config = dict(config['configurations']['hive-site'])
########################################################
############# AMS related params #####################
########################################################
set_instanceId = "false"
if 'cluster-env' in config['configurations'] and \
'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
set_instanceId = "true"
else:
ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
has_metric_collector = not len(ams_collector_hosts) == 0
if has_metric_collector:
if 'cluster-env' in config['configurations'] and \
'metrics_collector_external_port' in config['configurations']['cluster-env']:
metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
else:
metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
if metric_collector_web_address.find(':') != -1:
metric_collector_port = metric_collector_web_address.split(':')[1]
else:
metric_collector_port = '6188'
if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
metric_collector_protocol = 'https'
else:
metric_collector_protocol = 'http'
metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
########################################################
############# Atlas related params #####################
########################################################
#region Atlas Hooks
hive_atlas_application_properties = default('/configurations/hive-atlas-application.properties', {})
enable_atlas_hook = default('/configurations/hive-env/hive.atlas.hook', False)
atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
#endregion
########################################################
########### WebHCat related params #####################
########################################################
webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
templeton_pid_dir = status_params.hcat_pid_dir
webhcat_pid_file = status_params.webhcat_pid_file
templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
hcat_hdfs_user_dir = format("/user/{hcat_user}")
hcat_hdfs_user_mode = 0755
webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
webhcat_hdfs_user_mode = 0755
#for create_hdfs_directory
security_param = "true" if security_enabled else "false"
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
dfs_type = default("/commandParams/dfs_type", "")
import functools
#create partial functions with common arguments for every HdfsResource call
#to create hdfs directory we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user = hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources(),
dfs_type = dfs_type
)
# Hive Interactive related
hive_interactive_hosts = default('/clusterHostInfo/hive_server_interactive_hosts', [])
has_hive_interactive = len(hive_interactive_hosts) > 0
#llap log4j properties
hive_llap_log_maxfilesize = default('/configurations/llap-daemon-log4j/hive_llap_log_maxfilesize', 256)
hive_llap_log_maxbackupindex = default('/configurations/llap-daemon-log4j/hive_llap_log_maxbackupindex', 240)
#hive log4j2 properties
hive2_log_maxfilesize = default('/configurations/hive-log4j2/hive2_log_maxfilesize', 256)
hive2_log_maxbackupindex = default('/configurations/hive-log4j2/hive2_log_maxbackupindex', 30)
#llap cli log4j2 properties
llap_cli_log_maxfilesize = default('/configurations/llap-cli-log4j2/llap_cli_log_maxfilesize', 256)
llap_cli_log_maxbackupindex = default('/configurations/llap-cli-log4j2/llap_cli_log_maxbackupindex', 30)
if has_hive_interactive:
llap_daemon_log4j = config['configurations']['llap-daemon-log4j']['content']
llap_cli_log4j2 = config['configurations']['llap-cli-log4j2']['content']
hive_log4j2 = config['configurations']['hive-log4j2']['content']
hive_exec_log4j2 = config['configurations']['hive-exec-log4j2']['content']
beeline_log4j2 = config['configurations']['beeline-log4j2']['content']
hive_server_interactive_conf_dir = status_params.hive_server_interactive_conf_dir
execute_path_hive_interactive = os.path.join(os.environ['PATH'], hive_interactive_bin, hadoop_bin_dir)
start_hiveserver2_interactive_script = 'startHiveserver2Interactive.sh.j2'
start_hiveserver2_interactive_path = format("{tmp_dir}/start_hiveserver2_interactive_script")
hive_interactive_env_sh_template = config['configurations']['hive-interactive-env']['content']
hive_interactive_enabled = default('/configurations/hive-interactive-env/enable_hive_interactive', False)
llap_app_java_opts = default('/configurations/hive-interactive-env/llap_java_opts', '-XX:+AlwaysPreTouch {% if java_version > 7 %}-XX:+UseG1GC -XX:TLABSize=8m -XX:+ResizeTLAB -XX:+UseNUMA -XX:+AggressiveOpts -XX:InitiatingHeapOccupancyPercent=80 -XX:MaxGCPauseMillis=200{% else %}-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC{% endif %}')
hive_interactive_heapsize = hive_heapsize
llap_app_name = config['configurations']['hive-interactive-env']['llap_app_name']
# Ambari upgrade may not add this config as it will force restart of HSI (stack upgrade should)
if 'hive_heapsize' in config['configurations']['hive-interactive-env']:
hive_interactive_heapsize = config['configurations']['hive-interactive-env']['hive_heapsize']
# Service check related
if hive_transport_mode.lower() == "http":
hive_server_interactive_port = config['configurations']['hive-interactive-site']['hive.server2.thrift.http.port']
else:
hive_server_interactive_port = default('/configurations/hive-interactive-site/hive.server2.thrift.port',"10500")
# Tez for Hive interactive related
tez_interactive_config_dir = "/etc/tez_hive2/conf"
tez_interactive_user = config['configurations']['tez-env']['tez_user']
num_retries_for_checking_llap_status = default('/configurations/hive-interactive-env/num_retries_for_checking_llap_status', 10)
# Used in LLAP slider package creation
yarn_nm_mem = config['configurations']['yarn-site']['yarn.nodemanager.resource.memory-mb']
if stack_supports_hive_interactive_ga:
num_llap_daemon_running_nodes = config['configurations']['hive-interactive-env']['num_llap_nodes_for_llap_daemons']
num_llap_nodes = config['configurations']['hive-interactive-env']['num_llap_nodes']
llap_daemon_container_size = config['configurations']['hive-interactive-site']['hive.llap.daemon.yarn.container.mb']
llap_log_level = config['configurations']['hive-interactive-env']['llap_log_level']
llap_logger = default('/configurations/hive-interactive-site/hive.llap.daemon.logger', 'query-routing')
hive_aux_jars = default('/configurations/hive-interactive-env/hive_aux_jars', '')
hive_llap_io_mem_size = config['configurations']['hive-interactive-site']['hive.llap.io.memory.size']
llap_heap_size = config['configurations']['hive-interactive-env']['llap_heap_size']
llap_app_name = config['configurations']['hive-interactive-env']['llap_app_name']
llap_extra_slider_opts = default('/configurations/hive-interactive-env/llap_extra_slider_opts', "")
hive_llap_principal = None
if security_enabled:
hive_llap_keytab_file = config['configurations']['hive-interactive-site']['hive.llap.daemon.keytab.file']
hive_llap_principal = (config['configurations']['hive-interactive-site']['hive.llap.daemon.service.principal']).replace('_HOST',hostname.lower())
pass
if len(hive_server_hosts) == 0 and len(hive_server_interactive_hosts) > 0:
hive_server2_zookeeper_namespace = config['configurations']['hive-interactive-site']['hive.server2.zookeeper.namespace']
else:
hive_server2_zookeeper_namespace = config['configurations']['hive-site']['hive.server2.zookeeper.namespace']
hive_zookeeper_quorum = config['configurations']['hive-site']['hive.zookeeper.quorum']
if security_enabled:
hive_principal = hive_server_principal.replace('_HOST',hostname.lower())
hive_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
hive_cluster_token_zkstore = default("/configurations/hive-site/hive.cluster.delegation.token.store.zookeeper.znode", None)
jaas_file = os.path.join(hive_config_dir, 'zkmigrator_jaas.conf')
hive_zk_namespace = default("/configurations/hive-site/hive.zookeeper.namespace", None)
# ranger hive plugin section start
# ranger host
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
# ranger hive plugin enabled property
enable_ranger_hive = config['configurations']['hive-env']['hive_security_authorization'].lower() == 'ranger'
# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
# get ranger hive properties if enable_ranger_hive is True
if enable_ranger_hive:
# get ranger policy url
policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
if xml_configurations_supported:
policymgr_mgr_url = config['configurations']['ranger-hive-security']['ranger.plugin.hive.policy.rest.url']
if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
# ranger audit db user
xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
# ranger hive service name
repo_name = str(config['clusterName']) + '_hive'
repo_name_value = config['configurations']['ranger-hive-security']['ranger.plugin.hive.service.name']
if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
repo_name = repo_name_value
jdbc_driver_class_name = config['configurations']['ranger-hive-plugin-properties']['jdbc.driverClassName']
common_name_for_certificate = config['configurations']['ranger-hive-plugin-properties']['common.name.for.certificate']
repo_config_username = config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
# ranger-env config
ranger_env = config['configurations']['ranger-env']
# create ranger-env config having external ranger credential properties
if not has_ranger_admin and enable_ranger_hive:
external_admin_username = default('/configurations/ranger-hive-plugin-properties/external_admin_username', 'admin')
external_admin_password = default('/configurations/ranger-hive-plugin-properties/external_admin_password', 'admin')
external_ranger_admin_username = default('/configurations/ranger-hive-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
external_ranger_admin_password = default('/configurations/ranger-hive-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
ranger_env = {}
ranger_env['admin_username'] = external_admin_username
ranger_env['admin_password'] = external_admin_password
ranger_env['ranger_admin_username'] = external_ranger_admin_username
ranger_env['ranger_admin_password'] = external_ranger_admin_password
ranger_plugin_properties = config['configurations']['ranger-hive-plugin-properties']
policy_user = config['configurations']['ranger-hive-plugin-properties']['policy_user']
repo_config_password = config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
ranger_downloaded_custom_connector = None
ranger_previous_jdbc_jar_name = None
ranger_driver_curl_source = None
ranger_driver_curl_target = None
ranger_previous_jdbc_jar = None
# to get db connector related properties
if has_ranger_admin and stack_supports_ranger_audit_db:
xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
ranger_jdbc_jar_name, ranger_previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
ranger_downloaded_custom_connector = format("{tmp_dir}/{ranger_jdbc_jar_name}")
ranger_driver_curl_source = format("{jdk_location}/{ranger_jdbc_jar_name}")
ranger_driver_curl_target = format("{hive_lib}/{ranger_jdbc_jar_name}")
ranger_previous_jdbc_jar = format("{hive_lib}/{ranger_previous_jdbc_jar_name}")
sql_connector_jar = ''
ranger_hive_url = format("{hive_url}/default;principal={hive_principal}") if security_enabled else hive_url
if stack_supports_ranger_hive_jdbc_url_change:
ranger_hive_url = format("jdbc:hive2://{hive_zookeeper_quorum}/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace={hive_server2_zookeeper_namespace}")
hive_ranger_plugin_config = {
'username': repo_config_username,
'password': repo_config_password,
'jdbc.driverClassName': jdbc_driver_class_name,
'jdbc.url': ranger_hive_url,
'commonNameForCertificate': common_name_for_certificate
}
hive_ranger_plugin_repo = {
'isActive': 'true',
'config': json.dumps(hive_ranger_plugin_config),
'description': 'hive repo',
'name': repo_name,
'repositoryType': 'hive',
'assetType': '3'
}
custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
if len(custom_ranger_service_config) > 0:
hive_ranger_plugin_config.update(custom_ranger_service_config)
if stack_supports_ranger_kerberos and security_enabled:
hive_ranger_plugin_config['policy.download.auth.users'] = hive_user
hive_ranger_plugin_config['tag.download.auth.users'] = hive_user
hive_ranger_plugin_config['policy.grantrevoke.auth.users'] = hive_user
if stack_supports_ranger_kerberos:
hive_ranger_plugin_config['ambari.service.check.user'] = policy_user
hive_ranger_plugin_repo = {
'isEnabled': 'true',
'configs': hive_ranger_plugin_config,
'description': 'hive repo',
'name': repo_name,
'type': 'hive'
}
xa_audit_db_password = ''
if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
xa_audit_db_is_enabled = False
if xml_configurations_supported and stack_supports_ranger_audit_db:
xa_audit_db_is_enabled = config['configurations']['ranger-hive-audit']['xasecure.audit.destination.db']
xa_audit_hdfs_is_enabled = config['configurations']['ranger-hive-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else False
ssl_keystore_password = config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
ssl_truststore_password = config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
# for SQLA explicitly disable audit to DB for Ranger
if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla':
xa_audit_db_is_enabled = False
# ranger hive plugin section end
# below property is used for cluster deployed in cloud env to create ranger hive service in ranger admin
# need to add it as custom property
ranger_hive_metastore_lookup = default('/configurations/ranger-hive-plugin-properties/ranger.service.config.param.enable.hive.metastore.lookup', False)
if security_enabled:
hive_metastore_principal_with_host = hive_metastore_principal.replace('_HOST', hostname.lower())
# For ldap - hive_check
hive_ldap_user= config['configurations']['hive-env'].get('alert_ldap_username','')
hive_ldap_passwd=config['configurations']['hive-env'].get('alert_ldap_password','')
# For druid metadata password
druid_metadata_password = ""
if 'druid-common' in config['configurations'] \
and 'druid.metadata.storage.connector.password' in config['configurations']['druid-common']:
druid_metadata_password = config['configurations']['druid-common']['druid.metadata.storage.connector.password']
# For druid storage directory, hive will write segments here
druid_storage_dir = ""
if 'druid-common' in config['configurations'] \
and 'druid.storage.storageDirectory' in config['configurations']['druid-common']:
druid_storage_dir = config['configurations']['druid-common']['druid.storage.storageDirectory']
manage_hive_fsroot = default('/configurations/cluster-env/manage_hive_fsroot', True)
# replication directories
hive_repl_cmrootdir = default('/configurations/hive-site/hive.repl.cmrootdir', None)
hive_repl_rootdir = default('/configurations/hive-site/hive.repl.rootdir', None)
| 52.408
| 381
| 0.793009
|
980c2c39bc74dbdf36b7d2f89a3c13908bb45a58
| 12,209
|
py
|
Python
|
databricks_cli/jobs/cli.py
|
sweisdb/databricks-cli
|
5444cb8b94ef481e1656845f588d8d118bc352db
|
[
"Apache-2.0"
] | null | null | null |
databricks_cli/jobs/cli.py
|
sweisdb/databricks-cli
|
5444cb8b94ef481e1656845f588d8d118bc352db
|
[
"Apache-2.0"
] | null | null | null |
databricks_cli/jobs/cli.py
|
sweisdb/databricks-cli
|
5444cb8b94ef481e1656845f588d8d118bc352db
|
[
"Apache-2.0"
] | null | null | null |
# Databricks CLI
# Copyright 2017 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"), except
# that the use of services to which certain application programming
# interfaces (each, an "API") connect requires that the user first obtain
# a license for the use of the APIs from Databricks, Inc. ("Databricks"),
# by creating an account at www.databricks.com and agreeing to either (a)
# the Community Edition Terms of Service, (b) the Databricks Terms of
# Service, or (c) another written agreement between Licensee and Databricks
# for the use of the APIs.
#
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from json import loads as json_loads
import click
from tabulate import tabulate
from databricks_cli.click_types import OutputClickType, JsonClickType, JobIdClickType
from databricks_cli.jobs.api import JobsApi
from databricks_cli.utils import eat_exceptions, CONTEXT_SETTINGS, pretty_format, json_cli_base, \
truncate_string
from databricks_cli.configure.config import provide_api_client, profile_option, \
get_profile_from_context, debug_option, get_config, api_version_option
from databricks_cli.configure.provider import DatabricksConfig, update_and_persist_config, \
ProfileConfigProvider
from databricks_cli.version import print_version_callback, version as cli_version
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('--json-file', default=None, type=click.Path(),
help='File containing JSON request to POST to /api/2.*/jobs/create.')
@click.option('--json', default=None, type=JsonClickType(),
help=JsonClickType.help('/api/2.*/jobs/create'))
@api_version_option
@debug_option
@profile_option
@eat_exceptions
@provide_api_client
def create_cli(api_client, json_file, json, version):
"""
Creates a job.
The specification for the json option can be found
https://docs.databricks.com/api/latest/jobs.html#create
"""
check_version(api_client, version)
json_cli_base(json_file, json,
lambda json: JobsApi(api_client).create_job(json, version=version))
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('--job-id', required=True, type=JobIdClickType(), help=JobIdClickType.help)
@click.option('--json-file', default=None, type=click.Path(),
help='File containing partial JSON request to POST to /api/2.*/jobs/reset. '
'For more, read full help message.')
@click.option('--json', default=None, type=JsonClickType(),
help='Partial JSON string to POST to /api/2.*/jobs/reset. '
'For more, read full help message.')
@api_version_option
@debug_option
@profile_option
@eat_exceptions
@provide_api_client
def reset_cli(api_client, json_file, json, job_id, version):
"""
Resets (edits) the definition of a job.
The specification for the json option can be found
https://docs.databricks.com/api/latest/jobs.html#jobsjobsettings
"""
check_version(api_client, version)
if not bool(json_file) ^ bool(json):
raise RuntimeError('Either --json-file or --json should be provided')
if json_file:
with open(json_file, 'r') as f:
json = f.read()
deser_json = json_loads(json)
request_body = {
'job_id': job_id,
'new_settings': deser_json
}
JobsApi(api_client).reset_job(request_body, version=version)
def _jobs_to_table(jobs_json):
ret = []
for j in jobs_json['jobs']:
ret.append((j['job_id'], truncate_string(j['settings']['name'])))
return sorted(ret, key=lambda t: t[1].lower())
@click.command(context_settings=CONTEXT_SETTINGS,
short_help='Lists the jobs in the Databricks Job Service.')
@click.option('--output', default=None, help=OutputClickType.help, type=OutputClickType())
@click.option('--type', 'job_type', default=None, help='The type of job to list', type=str)
@click.option('--expand-tasks', is_flag=True,
help='Expands the tasks array (only available in API 2.1).')
@click.option('--offset', default=None, type=int,
help='The offset to use when listing jobs (only available in API 2.1).')
@click.option('--limit', default=None, type=int,
help='The maximum number of jobs to fetch in a single call ' +
'(only available in API 2.1).')
@click.option('--all', '_all', is_flag=True,
help='Lists all jobs by executing sequential calls to the API ' +
'(only available in API 2.1).')
@api_version_option
@debug_option
@profile_option
@eat_exceptions
@provide_api_client
def list_cli(api_client, output, job_type, version, expand_tasks, offset, limit, _all):
"""
Lists the jobs in the Databricks Job Service.
By default the output format will be a human readable table with the following fields
- Job ID
- Job name
A JSON formatted output can also be requested by setting the --output parameter to "JSON"
In table mode, the jobs are sorted by their name.
"""
check_version(api_client, version)
api_version = version or api_client.jobs_api_version
if api_version != '2.1' and (expand_tasks or offset or limit or _all):
click.echo(click.style('ERROR', fg='red') + ': the options --expand-tasks, ' +
'--offset, --limit, and --all are only available in API 2.1', err=True)
return
jobs_api = JobsApi(api_client)
has_more = True
jobs = []
if _all:
offset = 0
limit = 20
while has_more:
jobs_json = jobs_api.list_jobs(job_type=job_type, expand_tasks=expand_tasks,
offset=offset, limit=limit, version=version)
jobs += jobs_json['jobs'] if 'jobs' in jobs_json else []
has_more = jobs_json.get('has_more', False) and _all
if has_more:
offset = offset + \
(len(jobs_json['jobs']) if 'jobs' in jobs_json else 20)
out = {'jobs': jobs}
if OutputClickType.is_json(output):
click.echo(pretty_format(out))
else:
click.echo(tabulate(_jobs_to_table(out),
tablefmt='plain', disable_numparse=True))
@click.command(context_settings=CONTEXT_SETTINGS,
short_help='Deletes the specified job.')
@click.option('--job-id', required=True, type=JobIdClickType(), help=JobIdClickType.help)
@api_version_option
@debug_option
@profile_option
@eat_exceptions
@provide_api_client
def delete_cli(api_client, job_id, version):
"""
Deletes the specified job.
"""
check_version(api_client, version)
JobsApi(api_client).delete_job(job_id, version=version)
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('--job-id', required=True, type=JobIdClickType(), help=JobIdClickType.help)
@api_version_option
@debug_option
@profile_option
@eat_exceptions
@provide_api_client
def get_cli(api_client, job_id, version):
"""
Describes the metadata for a job.
"""
check_version(api_client, version)
click.echo(pretty_format(
JobsApi(api_client).get_job(job_id, version=version)))
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('--job-id', required=True, type=JobIdClickType(), help=JobIdClickType.help)
@click.option('--jar-params', default=None, type=JsonClickType(),
help='JSON string specifying an array of parameters. i.e. ["param1", "param2"]')
@click.option('--notebook-params', default=None, type=JsonClickType(),
help='JSON string specifying a map of key-value pairs. '
'i.e. {"name": "john doe", "age": 35}')
@click.option('--python-params', default=None, type=JsonClickType(),
help='JSON string specifying an array of parameters. i.e. ["param1", "param2"]')
@click.option('--python-named-params', default=None, type=JsonClickType(),
help='JSON string specifying a map of key-value pairs. '
'i.e. {"name": "john doe", "age": 35}')
@click.option('--spark-submit-params', default=None, type=JsonClickType(),
help='JSON string specifying an array of parameters. i.e. '
'["--class", "org.apache.spark.examples.SparkPi"]')
@click.option('--idempotency-token', default=None,
help='If an active run with the provided token already exists, ' +
'the request does not create a new run, ' +
'but returns the ID of the existing run instead.')
@api_version_option
@debug_option
@profile_option
@eat_exceptions
@provide_api_client
def run_now_cli(api_client, job_id, jar_params, notebook_params, python_params,
python_named_params, spark_submit_params, idempotency_token, version):
"""
Runs a job with optional per-run parameters.
Parameter options are specified in json and the format is documented in
https://docs.databricks.com/api/latest/jobs.html#jobsrunnow.
"""
check_version(api_client, version)
jar_params_json = json_loads(jar_params) if jar_params else None
notebook_params_json = json_loads(
notebook_params) if notebook_params else None
python_params = json_loads(python_params) if python_params else None
python_named_params = json_loads(
python_named_params) if python_named_params else None
spark_submit_params = json_loads(
spark_submit_params) if spark_submit_params else None
res = JobsApi(api_client).run_now(
job_id, jar_params_json, notebook_params_json, python_params,
spark_submit_params, python_named_params, idempotency_token, version=version)
click.echo(pretty_format(res))
@click.command(context_settings=CONTEXT_SETTINGS)
@api_version_option
@debug_option
@profile_option
def configure(version):
profile = get_profile_from_context()
config = ProfileConfigProvider(
profile).get_config() if profile else get_config()
new_config = config or DatabricksConfig.empty()
new_config.jobs_api_version = version
update_and_persist_config(profile, new_config)
@click.group(context_settings=CONTEXT_SETTINGS,
short_help='Utility to interact with jobs.')
@click.option('--version', '-v', is_flag=True, callback=print_version_callback,
expose_value=False, is_eager=True, help=cli_version)
@debug_option
@profile_option
@eat_exceptions
def jobs_group(): # pragma: no cover
"""
Utility to interact with jobs.
This is a wrapper around the jobs API (https://docs.databricks.com/api/latest/jobs.html).
Job runs are handled by ``databricks runs``.
"""
pass
jobs_group.add_command(create_cli, name='create')
jobs_group.add_command(list_cli, name='list')
jobs_group.add_command(delete_cli, name='delete')
jobs_group.add_command(get_cli, name='get')
jobs_group.add_command(reset_cli, name='reset')
jobs_group.add_command(run_now_cli, name='run-now')
jobs_group.add_command(configure, name='configure')
def check_version(api_client, version):
if version is not None:
# If the user explicitly passed --version=2.x for this invocation it means
# they really really want that version, let's not show any warnings
return
if api_client.jobs_api_version == '2.1':
# If the user is globally configured to use 2.1 we don't show the warning
return
click.echo(click.style('WARN', fg='yellow') + ': Your CLI is configured ' +
'to use Jobs API 2.0. In order to use the latest Jobs features ' +
'please upgrade to 2.1: \'databricks jobs configure --version=2.1\'. ' +
'Future versions of this CLI will default to the new Jobs API. ' +
'Learn more at https://docs.databricks.com/dev-tools/cli/jobs-cli.html',
err=True
)
| 40.561462
| 98
| 0.698255
|
9e3dfaa0335e9d020d274b0f48767cafc81821d3
| 186
|
py
|
Python
|
netsim/cli/version.py
|
barajus/netsim-tools
|
b536cd1d9ec1f1dd6a8507aafadca6f9528d9d33
|
[
"MIT"
] | null | null | null |
netsim/cli/version.py
|
barajus/netsim-tools
|
b536cd1d9ec1f1dd6a8507aafadca6f9528d9d33
|
[
"MIT"
] | null | null | null |
netsim/cli/version.py
|
barajus/netsim-tools
|
b536cd1d9ec1f1dd6a8507aafadca6f9528d9d33
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Print netlab usage
#
import sys
import typing
import netsim
def run(args: typing.List[str]) -> None:
print("netsim-tools version %s" % netsim.__version__)
| 15.5
| 55
| 0.715054
|
1b7d038214949ccf867bd22bcb41e29804ba0599
| 262
|
py
|
Python
|
venv/Lib/site-packages/pandas/_testing/compat.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 28,899
|
2016-10-13T03:32:12.000Z
|
2022-03-31T21:39:05.000Z
|
venv/Lib/site-packages/pandas/_testing/compat.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 31,004
|
2016-10-12T23:22:27.000Z
|
2022-03-31T23:17:38.000Z
|
venv/Lib/site-packages/pandas/_testing/compat.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 15,149
|
2016-10-13T03:21:31.000Z
|
2022-03-31T18:46:47.000Z
|
"""
Helpers for sharing tests between DataFrame/Series
"""
from pandas import DataFrame
def get_dtype(obj):
if isinstance(obj, DataFrame):
# Note: we are assuming only one column
return obj.dtypes.iat[0]
else:
return obj.dtype
| 18.714286
| 50
| 0.667939
|
1b07361970a90c99eb7c2d44fb6eb95bac3a8ced
| 29,145
|
py
|
Python
|
src/genie/libs/parser/utils/common.py
|
deepB123/genieparser
|
dc7c62d24dd833000fe6575b3c026b830244b03a
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/utils/common.py
|
deepB123/genieparser
|
dc7c62d24dd833000fe6575b3c026b830244b03a
|
[
"Apache-2.0"
] | 4
|
2021-03-24T04:25:38.000Z
|
2021-03-28T04:31:21.000Z
|
src/genie/libs/parser/utils/common.py
|
deepB123/genieparser
|
dc7c62d24dd833000fe6575b3c026b830244b03a
|
[
"Apache-2.0"
] | 1
|
2021-04-05T22:05:15.000Z
|
2021-04-05T22:05:15.000Z
|
'''Common functions to be used in parsers'''
# python
import re
import os
import sys
import json
import math
import logging
import warnings
import importlib
from genie.libs import parser
from genie.abstract import Lookup
from genie.metaparser.util import merge_dict
from pyats import configuration as cfg
from .extension import ExtendParsers
PYATS_EXT_PARSER = 'pyats.libs.external.parser'
log = logging.getLogger(__name__)
def _load_parser_json():
'''get all parser data in json file'''
try:
mod = importlib.import_module('genie.libs.parser')
parsers = os.path.join(mod.__path__[0], 'parsers.json')
except Exception:
parsers = ''
if not os.path.isfile(parsers):
log.warning('parsers.json does not exist, make sure you '
'are running with latest version of '
'genie.libs.parsers')
parser_data = {}
else:
# Open all the parsers in json file
with open(parsers) as f:
parser_data = json.load(f)
# check if provided external parser packages
ext_parser_package = cfg.get(PYATS_EXT_PARSER, None) or \
os.environ.get(PYATS_EXT_PARSER.upper().replace('.', '_'))
if ext_parser_package:
ext = ExtendParsers(ext_parser_package)
ext.extend()
ext.output.pop('tokens', None)
summary = ext.output.pop('extend_info', None)
merge_dict(parser_data, ext.output, update=True)
log.warning("External parser counts: {}\nSummary:\n{}"
.format(len(summary), json.dumps(summary, indent=2)))
return parser_data
# Parser within Genie
parser_data = _load_parser_json()
def get_parser_commands(device, data=parser_data):
'''Remove all commands which contain { as this requires
extra kwargs which cannot be guessed dynamically
Remove the ones that arent related to this os'''
commands = []
for command, values in data.items():
if '{' in command or command == 'tokens' or device.os not in values:
continue
commands.append(command)
return commands
def format_output(parser_data, tab=2):
'''Format the parsed output in an aligned intended structure'''
s = ['{\n']
if parser_data is None:
return parser_data
for k,v in sorted(parser_data.items()):
if isinstance(v, dict):
v = format_output(v, tab+2)
else:
v = repr(v)
s.append('%s%r: %s,\n' % (' '*tab, k, v))
s.append('%s}' % (' '*(tab-2)))
return ''.join(s)
def get_parser_exclude(command, device):
try:
return get_parser(command, device)[0].exclude
except AttributeError:
return []
def get_parser(command, device, fuzzy=False):
'''From a show command and device, return parser class and kwargs if any'''
try:
order_list = device.custom.get('abstraction').get('order', [])
except AttributeError:
order_list = None
lookup = Lookup.from_device(device, packages={'parser': parser})
results = _fuzzy_search_command(command, fuzzy, device.os, order_list)
valid_results = []
for result in results:
found_command, data, kwargs = result
if found_command == 'tokens':
continue
# Check if all the tokens exists and take the farthest one
for token in lookup._tokens:
if token in data:
data = data[token]
try:
valid_results.append((found_command,
_find_parser_cls(device, data), kwargs))
except KeyError:
# Case when the show command is only found under one of
# the child level tokens
continue
if not valid_results:
raise Exception("Could not find parser for "
"'{c}' under {l}".format(c=command, l=lookup._tokens))
if not fuzzy:
return valid_results[0][1], valid_results[0][2]
return valid_results
def _fuzzy_search_command(search, fuzzy, os=None, order_list=None,
device=None):
""" Find commands that match the search criteria.
Args:
search (`str`): the search query
fuzzy (`bool`): whether or not fuzzy mode should be used
os (`str`): the device os that the search space is limited to
order_list (`list`): the device abstraction order list if any
device (`Device`): the device instance
Returns:
list: the result of the search
"""
# Perfect match should return
if search in parser_data:
return [(search, parser_data[search], {})]
# Preprocess if fuzzy
if fuzzy:
search = search.lstrip('^').rstrip('$').replace(r'\ ', ' ').replace(
r'\-', '-').replace('\\"', '"').replace('\\,', ',').replace(
'\\\'', '\'').replace('\\*', '*').replace('\\:', ':').replace(
'\\^', '^').replace('\\/', '/')
# Fix search to remove extra spaces
search = ' '.join(filter(None, search.split()))
tokens = search.split()
best_score = -math.inf
result = []
for command, source in parser_data.items():
# Tokens and kwargs parameter must be non reference
match_result = _matches_fuzzy(0, 0, tokens.copy(),
command, {}, fuzzy)
if match_result:
kwargs, score = match_result
if order_list and device and \
getattr(device, order_list[0]) not in source:
continue
if os and os not in source:
continue
entry = (command, source, kwargs)
if score > best_score:
# If we found a better match, discard everything and start new
result = [entry]
best_score = score
elif score == best_score:
result.append(entry)
# Return only one instance if fuzzy is not used
# Check if any ambiguous commands
if not fuzzy and len(result) > 1:
# If all results have the same argument positions but different names
# It should return the first result
# Check if the result regex match the search
for instance in result:
s = re.sub('{.*?}', '(.*)', instance[0])
p =re.compile(s)
if p.match(search):
return [instance]
if len(set(re.sub('{.*?}', '---', instance[0])
for instance in result)) == 1:
return [result[0]]
else:
# Search is ambiguous
raise Exception("\nSearch for '" + search + "' is ambiguous. " +
"Please be more specific in your keywords.\n\n" +
"Results matched:\n" + '\n'.join(
'> ' + i[0] for i in result))
return result
def _is_regular_token(token):
""" Checks if a token is regular (does not contain regex symbols).
Args:
token (`str`): the token to be tested
Returns:
bool: whether or not the token is regular
"""
token_is_regular = True
if not token.isalnum():
# Remove escaped characters
candidate = token.replace('/', '')
candidate = candidate.replace('"', '')
candidate = candidate.replace(r'\^', '')
candidate = candidate.replace('\'', '')
candidate = candidate.replace('-', '')
candidate = candidate.replace('^', '')
candidate = candidate.replace('_', '')
candidate = candidate.replace(':', '')
candidate = candidate.replace(',', '')
candidate = candidate.replace(r'\.', '')
candidate = candidate.replace(r'\|', '')
token_is_regular = candidate.isalnum() or candidate == ''
return token_is_regular
def _matches_fuzzy(i, j, tokens, command, kwargs, fuzzy,
required_arguments=None, score=0):
""" Compares between given tokens and command to see if they match.
Args:
i (`int`): current end of tokens
j (`int`): current index of command tokens
tokens (`list`): the search tokens
command (`str`): the command to be compared with
kwargs (`dict`): the collected arguments
fuzzy (`bool`): whether or not fuzzy should be used
required_arguments (`int`): number of arguments command has
score (`int`): the current similarity score between token and command
Returns:
bool: whether or not search matches the command
"""
command_tokens = command.split()
# Initialize by counting how many arguments this command needs
if required_arguments is None:
required_arguments = len(re.findall('{.*?}', command))
while i < len(tokens):
# If command token index is greater than its length, stop
if j >= len(command_tokens):
return None
token = tokens[i]
command_token = command_tokens[j]
token_is_regular = True
if fuzzy:
if token == '*':
# Special case for `show lldp entry *`
token_is_regular = True
else:
# Check if it is nonregex token
token_is_regular = _is_regular_token(token)
if token_is_regular:
# Special case for `:\|Swap:`
token = token.replace(r'\|', '|')
# Special case for command `vim-cmd vmsvc/snapshot.get {vmid}`
token = token.replace(r'\.', '.')
if token_is_regular:
# Current token might be command or argument
if '{' in command_token:
# Handle the edge case of argument not being a token
# When this is implemented there is only one case:
# /dna/intent/api/v1/interface/{interface}
if not command_token.startswith('{'):
# Find before and after string
groups = re.match('(.*){.*?}(.*)', command_token).groups()
is_found = False
if len(groups) == 2:
start, end = groups
# Need to have perfect match with token
if token.startswith(start) and token.endswith(end):
# Escape regex
start = re.escape(start)
end = re.escape(end)
# Find the argument using the escaped start and end
kwargs[
re.search('{(.*)}', command_token).groups()[0]
] = re.match('{}(.*){}'
.format(start, end), token).groups()[0]
is_found = True
score += 103
if not is_found:
return None
else:
argument_key = re.search('{(.*)}',
command_token).groups()[0]
i += 1
j += 1
# Plus 101 once to favor nongreedy argument fit
score += 100
# If argument is any of these, argument can only be 1 token
# Else argument can be up to 2 tokens
endpoint = i + 1 \
if argument_key == 'vrf' \
or argument_key == 'rd' \
or argument_key == 'instance' \
or argument_key == 'vrf_type' \
or argument_key == 'feature' \
or argument_key == 'fileA' \
or argument_key == 'fileB' \
else i + 2
# Try out ways we can assign search tokens into argument
for index in range(i, endpoint):
if index > len(tokens):
return None
# Make sure not to use regex expression as argument
if index > i:
if fuzzy and not _is_regular_token(tokens[
index - 1]):
return None
# Currently spanned argument
if 'match' in tokens or 'include' in tokens:
argument_value = ' '.join(tokens[i - 1:index]).replace('\\', '')
else:
argument_value = ' '.join(tokens[i - 1:index]).rstrip(
'"').replace('\\', '')
# argument_value = ' '.join(tokens[i - 1:index]).replace('\\', '')
# Delete the extra tokens if spanning more than one
tokens_copy = tokens[:i] + tokens[index:]
tokens_copy[i - 1] = command_token
kwargs_copy = kwargs.copy()
kwargs_copy.setdefault(argument_key, argument_value)
result = _matches_fuzzy(i, j, tokens_copy, command,
kwargs_copy, fuzzy, required_arguments, score)
if result:
result_kwargs, score = result
if len(result_kwargs) == required_arguments:
return result_kwargs, score
return None
elif token == command_token:
# Same token, assign higher score
score += 102
elif not token == command_token:
# Not matching, check if prefix
if not command_token.startswith(token):
return None
# The two tokens are similar to each other, replace
tokens[i] = command_token
score += 100
# Matches current, go to next token
i += 1
j += 1
else:
# Count number of regex tokens that got ate
skipped = 1
# Not a token, should be a regex expression
# Keep eating if next token is also regex
while i + 1 < len(tokens) and not _is_regular_token(tokens[i + 1]):
i += 1
skipped += 1
# Match current span with command
test = re.match(' '.join(tokens[:i + 1]), command)
if test:
# Perform command token lookahead
_, end = test.span()
# Expression matches command to end
if i + 1 == len(tokens) and end == len(command):
# Return result if from start to end there are no arguments
if all(not '{' in ct for ct in command_tokens[j:]):
return kwargs, score
else:
# Else in range we have another unspecified argument
return None
if end == 0:
# If regex matched nothing, we stop because
# expression = "d? a b c" search in "a b c"
# expression = "a b d? c" search in "a b c"
return None
# Span single command token
if abs(
end - sum(len(ct) for ct in command_tokens[:j + 1]) - j
) <= 1:
if not '{' in command_token:
# Span single token if it is not argument
i += 1
j += 1
continue
else:
# Faulty match
return None
else:
# Span multiple command tokens
# Find which command token it spans up to
current_sum = 0
token_end = 0
while current_sum + len(command_tokens[token_end]) <= end:
current_sum += len(command_tokens[token_end])
if current_sum < end:
# Account for space
current_sum += 1
token_end += 1
else:
break
# Incrememt token index
i += 1
# For matched range, perform submatches on next real token
for subindex in range(j + skipped, token_end + 1):
# Make sure items are passed by copies, not by reference
submatch_result = _matches_fuzzy(i, subindex,
tokens.copy(), command, kwargs.copy(),
fuzzy, required_arguments, score)
# If any match is found, return true
if submatch_result:
result_kwargs, score = submatch_result
# Result kwargs must match
# number of arguments this command requires
if required_arguments == len(result_kwargs):
return result_kwargs, score
# Fail to match
return None
else:
# Failed to match fuzzy
return None
# Reached end of tokens
if len(command_tokens) == j:
# If command pointer is at end then it matches
return kwargs, score
else:
# It doesn't match
return None
def _find_parser_cls(device, data):
lookup = Lookup.from_device(device, packages={'parser':importlib.import_module(data['package'])})
return getattr(getattr(lookup.parser, data['module_name']), data['class'])
class Common():
'''Common functions to be used in parsers.'''
@classmethod
def regexp(self, expression):
def match(value):
if re.match(expression, value):
return value
else:
raise TypeError("Value '%s' doesnt match regex '%s'"
% (value, expression))
return match
@classmethod
def convert_intf_name(self, intf):
'''return the full interface name
Args:
intf (`str`): Short version of the interface name
Returns:
Full interface name fit the standard
Raises:
None
example:
>>> convert_intf_name(intf='Eth2/1')
'''
# Please add more when face other type of interface
convert = {'Eth': 'Ethernet',
'Lo': 'Loopback',
'lo': 'Loopback',
'Fa': 'FastEthernet',
'Fas': 'FastEthernet',
'Po': 'Port-channel',
'PO': 'Port-channel',
'Null': 'Null',
'Gi': 'GigabitEthernet',
'Gig': 'GigabitEthernet',
'GE': 'GigabitEthernet',
'Te': 'TenGigabitEthernet',
'Ten': 'TenGigabitEthernet',
'Tw': 'TwoGigabitEthernet',
'Two': 'TwoGigabitEthernet',
'Twe': 'TwentyFiveGigE',
'mgmt': 'mgmt',
'Vl': 'Vlan',
'Tu': 'Tunnel',
'Fe': '',
'Hs': 'HSSI',
'AT': 'ATM',
'Et': 'Ethernet',
'BD': 'BDI',
'Se': 'Serial',
'Fo': 'FortyGigabitEthernet',
'For': 'FortyGigabitEthernet',
'Hu': 'HundredGigE',
'Hun': 'HundredGigE',
'vl': 'vasileft',
'vr': 'vasiright',
'BE': 'Bundle-Ether',
'M-E': 'M-Ethernet', # comware
'BAGG' : 'Bridge-Aggregation' # comware
}
m = re.search(r'([a-zA-Z]+)', intf)
m1 = re.search(r'([\d\/\.]+)', intf)
m2 = re.search(r'(M-E)', intf)
if hasattr(m, 'group') and hasattr(m1, 'group'):
if hasattr(m2, 'group'):
int_type = m2.group(0)
else:
int_type = m.group(0)
int_port = m1.group(0)
if int_type in convert.keys():
return(convert[int_type] + int_port)
else:
# Unifying interface names
converted_intf = intf[0].capitalize()+intf[1:].replace(
' ','').replace('ethernet', 'Ethernet')
return(converted_intf)
else:
return(intf)
@classmethod
def retrieve_xml_child(self, root, key):
'''return the root which contains the key from xml
Args:
root (`obj`): ElementTree Object, point to top of the tree
key (`str`): Expceted tag name. ( without namespace)
Returns:
Element object of the given tag
Raises:
None
example:
>>> retrieve_xml_child(
root=<Element '{urn:ietf:params:xml:ns:netconf:base:1.0}rpc-reply' at 0xf760434c>,
key='TABLE_vrf')
'''
for item in root:
if key in item.tag:
return item
else:
root = item
return self.retrieve_xml_child(root, key)
@classmethod
def compose_compare_command(self, root, namespace, expect_command):
'''compose commmand from the xml Element object from the root,
then compare with the command with the expect_command.
Only work for cisco standard output.
Args:
root (`obj`): ElementTree Object, point to top of the tree
namespace (`str`): Namesapce. Ex. {http://www.cisco.com/nxos:8.2.0.SK.1.:rip}
expect_command (`str`): expected command.
Returns:
None
Raises:
AssertionError: xml tag cli and command is not matched
Exception: No mandatory tag __readonly__ in output
example:
>>> compose_compare_command(
root=<Element '{urn:ietf:params:xml:ns:netconf:base:1.0}rpc-reply' at 0xf760434c>,
namespace='{http://www.cisco.com/nxos:8.2.0.SK.1.:rip}',
expect_command='show bgp all dampening flap-statistics')
'''
# get to data node
cmd_node = list(root)[0]
# compose command from element tree
# ex. <nf:data>
# <show>
# <bgp>
# <all>
# <dampening>
# <flap-statistics>
# <__readonly__>
cli = ''
while True:
# get next node
try:
cmd_node = list(cmd_node)
if len(cmd_node) == 1:
# when only have one child
cmd_node = cmd_node[0]
# <__XML__PARAM__vrf-name>
# <__XML__value>VRF1</__XML__value>
# </__XML__PARAM__vrf-name>
if '__XML__value' in cmd_node.tag:
cli += ' ' + cmd_node.text
elif len(cmd_node) > 1:
# <__XML__PARAM__interface>
# <__XML__value>loopback100</__XML__value>
# <vrf>
for item in cmd_node:
if '__XML__value' in item.tag:
cli += ' ' + item.text
else:
cmd_node = item
break
else:
break
except Exception:
pass
# get tag name
tag = cmd_node.tag.replace(namespace, '')
# __readonly__ is the end of the command
if '__readonly__' not in tag:
if '__XML__PARAM__' not in tag and \
'__XML__value' not in tag and \
'TABLE' not in tag:
cli += ' ' + tag
else:
break
# if there is no __readonly__ but the command has outputs
# should be warining
if 'TABLE' in tag:
warnings.warn('Tag "__readonly__" should exsist in output when '
'there are actual values in output')
break
cli = cli.strip()
# compare the commands
assert cli == expect_command, \
'Cli created from XML tags does not match the actual cli:\n'\
'XML Tags cli: {c}\nCli command: {e}'.format(c=cli, e=expect_command)
@classmethod
def convert_xml_time(self, xml_time):
'''Convert xml time "PT1H4M41S" to normal time "01:04:41"
Args:
xml_time (`str`): XML time
Returns:
Standard time string
Raises:
None
example:
>>> convert_xml_time(xml_time='PT1H4M41S')
>>> "01:04:41"
'''
# P4DT12M38S
# PT1H4M41S
p = re.compile(r'^P((?P<day>\d+)D)?T((?P<hour>\d+)H)?((?P<minute>\d+)M)?((?P<second>\d+)S)?$')
m = p.match(xml_time)
if m:
day = m.groupdict()['day']
hour = m.groupdict()['hour']
hour = 0 if not hour else int(hour)
minute = m.groupdict()['minute']
minute = 0 if not minute else int(minute)
second = m.groupdict()['second']
second = 0 if not second else int(second)
if day:
standard_time = "{d}d{h}h".format(d=day, h="%02d"% (hour))
else:
standard_time = ''
standard_time += format("%02d"% (hour))
standard_time += ' ' + format("%02d"% (minute))
standard_time += ' ' + format("%02d"% (second))
standard_time = ':'.join(standard_time.strip().split())
else:
# P4M13DT21H21M19S
standard_time = xml_time
return standard_time
@classmethod
def find_keys(self, key, dictionary):
'''
find all keys in dictionary
Args:
dictionary:
Returns:
'''
for k, v in dictionary.items():
if k == key:
yield v
elif isinstance(v, dict):
for result in self.find_keys(key, v):
yield result
elif isinstance(v, list):
for d in v:
for result in self.find_keys(key, d):
yield result
@classmethod
def combine_units_of_time(self, hours=None, minutes=None, seconds=None):
'''Combine seperate units of time to 'normal time': HH:MM:SS
Args (All are optional. Nothing returns 00:00:00):
hours (`int`): number of hours
minutes (`int`): number of minutes
seconds (`int`): number of seconds
Returns:
Standard time string
Raises:
None
example:
>>> convert_xml_time(minutes=500)
>>> "08:20:00"
'''
total_combined_seconds = 0
if hours:
total_combined_seconds += hours * 60 * 60
if minutes:
total_combined_seconds += minutes * 60
if seconds:
total_combined_seconds += seconds
final_seconds = total_combined_seconds % 60
if final_seconds <= 9:
final_seconds = "0{}".format(final_seconds)
final_minutes = (total_combined_seconds // 60) % 60
if final_minutes <= 9:
final_minutes = "0{}".format(final_minutes)
final_hours = (total_combined_seconds // 60) // 60
if final_hours <= 9:
final_hours = "0{}".format(final_hours)
normal_time = "{}:{}:{}".format(final_hours, final_minutes,
final_seconds)
return normal_time
| 35.456204
| 106
| 0.479876
|
c592a233e38de8e447c2107360fe3ac2ca9e232e
| 188
|
py
|
Python
|
uwe_app.py
|
ContinuumBridge/uwe_app
|
e1364be4075b6e6ca6162f447ddf8942411e0906
|
[
"MIT"
] | null | null | null |
uwe_app.py
|
ContinuumBridge/uwe_app
|
e1364be4075b6e6ca6162f447ddf8942411e0906
|
[
"MIT"
] | null | null | null |
uwe_app.py
|
ContinuumBridge/uwe_app
|
e1364be4075b6e6ca6162f447ddf8942411e0906
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# uwe_app.py
# Copyright (C) ContinuumBridge Limited, 2014-2015 - All Rights Reserved
# Written by Peter Claydon
#
import sys
from uwe_app_a import App
App(sys.argv)
| 20.888889
| 72
| 0.760638
|
390789cf7d2bf1b50044dfbb853418e88d6ad844
| 6,979
|
py
|
Python
|
canvasapi/submission.py
|
damianfs/canvasapi
|
10ef96d268a0535c888d8fdd8169da31d9a66e3f
|
[
"MIT"
] | null | null | null |
canvasapi/submission.py
|
damianfs/canvasapi
|
10ef96d268a0535c888d8fdd8169da31d9a66e3f
|
[
"MIT"
] | null | null | null |
canvasapi/submission.py
|
damianfs/canvasapi
|
10ef96d268a0535c888d8fdd8169da31d9a66e3f
|
[
"MIT"
] | null | null | null |
from canvasapi.canvas_object import CanvasObject
from canvasapi.paginated_list import PaginatedList
from canvasapi.peer_review import PeerReview
from canvasapi.upload import Uploader
from canvasapi.util import combine_kwargs, obj_or_id
class Submission(CanvasObject):
def __str__(self):
return "{}-{}".format(self.assignment_id, self.user_id)
def create_submission_peer_review(self, user, **kwargs):
"""
Create a peer review for this submission.
:calls: `POST /api/v1/courses/:course_id/assignments/:assignment_id/ \
submissions/:submission_id/peer_reviews \
<https://canvas.instructure.com/doc/api/peer_reviews.html#method.peer_reviews_api.index>`_
:param user: The user object or ID to retrieve notifications for.
:type user: :class:`canvasapi.user.User` or int
:rtype: :class:`canvasapi.peer_review.PeerReview`
"""
from canvasapi.user import User
user_id = obj_or_id(user, "user", (User,))
kwargs["user_id"] = user_id
response = self._requester.request(
"POST",
"courses/{}/assignments/{}/submissions/{}/peer_reviews".format(
self.course_id, self.assignment_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
return PeerReview(self._requester, response.json())
def delete_submission_peer_review(self, user, **kwargs):
"""
Delete a peer review for this submission.
:calls: `DELETE /api/v1/courses/:course_id/assignments/:assignment_id/ \
submissions/:submission_id/peer_reviews \
<https://canvas.instructure.com/doc/api/peer_reviews.html#method.peer_reviews_api.index>`_
:param user: The user object or ID to retrieve notifications for.
:type user: :class:`canvasapi.user.User` or int
:rtype: :class:`canvasapi.peer_review.PeerReview`
"""
from canvasapi.user import User
user_id = obj_or_id(user, "user", (User,))
kwargs["user_id"] = user_id
response = self._requester.request(
"DELETE",
"courses/{}/assignments/{}/submissions/{}/peer_reviews".format(
self.course_id, self.assignment_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
return PeerReview(self._requester, response.json())
def edit(self, **kwargs):
"""
Comment on and/or update the grading for a student's assignment submission.
:calls: `PUT /api/v1/courses/:course_id/assignments/:assignment_id/submissions/:user_id \
<https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.update>`_
:rtype: :class:`canvasapi.submission.Submission`
"""
response = self._requester.request(
"PUT",
"courses/{}/assignments/{}/submissions/{}".format(
self.course_id, self.assignment_id, self.user_id
),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update(course_id=self.course_id)
super(Submission, self).set_attributes(response_json)
return self
def get_submission_peer_reviews(self, **kwargs):
"""
Get a list of all Peer Reviews this submission.
:calls: `GET /api/v1/courses/:course_id/assignments/:assignment_id/ \
submissions/:submission_id/peer_reviews \
<https://canvas.instructure.com/doc/api/peer_reviews.html#method.peer_reviews_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.peer_review.PeerReview`
"""
return PaginatedList(
PeerReview,
self._requester,
"GET",
"courses/{}/assignments/{}/submissions/{}/peer_reviews".format(
self.course_id, self.assignment_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
def mark_read(self, **kwargs):
"""
Mark submission as read. No request fields are necessary.
:calls: `PUT
/api/v1/courses/:course_id/assignments/:assignment_id/submissions/:user_id/read \
<https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.mark_submission_read>`_
:returns: True if successfully marked as read.
:rtype: bool
"""
response = self._requester.request(
"PUT",
"courses/{}/assignments/{}/submissions/{}/read".format(
self.course_id, self.assignment_id, self.user_id
),
)
return response.status_code == 204
def mark_unread(self, **kwargs):
"""
Mark submission as unread. No request fields are necessary.
:calls: `DELETE
/api/v1/courses/:course_id/assignments/:assignment_id/submissions/:user_id/read \
<https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.mark_submission_unread>`_
:returns: True if successfully marked as unread.
:rtype: bool
"""
response = self._requester.request(
"DELETE",
"courses/{}/assignments/{}/submissions/{}/read".format(
self.course_id, self.assignment_id, self.user_id
),
)
return response.status_code == 204
def upload_comment(self, file, **kwargs):
"""
Upload a file to attach to this submission as a comment.
:calls: `POST \
/api/v1/courses/:course_id/assignments/:assignment_id/submissions/:user_id/comments/files \
<https://canvas.instructure.com/doc/api/submission_comments.html#method.submission_comments_api.create_file>`_
:param file: The file or path of the file to upload.
:type file: file or str
:returns: True if the file uploaded successfully, False otherwise, \
and the JSON response from the API.
:rtype: tuple
"""
response = Uploader(
self._requester,
"courses/{}/assignments/{}/submissions/{}/comments/files".format(
self.course_id, self.assignment_id, self.user_id
),
file,
**kwargs
).start()
if response[0]:
self.edit(comment={"file_ids": [response[1]["id"]]})
return response
class GroupedSubmission(CanvasObject):
def __init__(self, requester, attributes):
super(GroupedSubmission, self).__init__(requester, attributes)
try:
self.submissions = [
Submission(requester, submission)
for submission in attributes["submissions"]
]
except KeyError:
self.submissions = list()
def __str__(self):
return "{} submission(s) for User #{}".format(
len(self.submissions), self.user_id
)
| 36.731579
| 118
| 0.618427
|
da35ef8eb42528a47ef14d43e77d214a74bcfc99
| 5,019
|
py
|
Python
|
networking_mlnx/eswitchd/resource_mngr.py
|
stackhpc/networking-mlnx
|
6a297fd040ff09e26e477b90f2fb229dc6a691b2
|
[
"Apache-2.0"
] | null | null | null |
networking_mlnx/eswitchd/resource_mngr.py
|
stackhpc/networking-mlnx
|
6a297fd040ff09e26e477b90f2fb229dc6a691b2
|
[
"Apache-2.0"
] | null | null | null |
networking_mlnx/eswitchd/resource_mngr.py
|
stackhpc/networking-mlnx
|
6a297fd040ff09e26e477b90f2fb229dc6a691b2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lxml import etree
import libvirt
from networking_mlnx._i18n import _LE, _LI, _LW
from oslo_log import log as logging
from networking_mlnx.eswitchd.common import constants
from networking_mlnx.eswitchd.db import device_db
from networking_mlnx.eswitchd.utils import pci_utils
LOG = logging.getLogger(__name__)
class ResourceManager(object):
def __init__(self):
self.pci_utils = pci_utils.pciUtils()
self.device_db = device_db.DeviceDB()
def add_fabric(self, fabric, pf):
hca_port, pf_mlx_dev = self._get_pf_details(pf)
self.device_db.add_fabric(fabric, pf, hca_port, pf_mlx_dev)
vfs = self.discover_devices(pf)
LOG.info(_LI("PF %(pf)s, vfs = %(vf)s"), {'pf': pf, 'vf': vfs})
self.device_db.set_fabric_devices(fabric, pf, vfs)
def scan_attached_devices(self):
devices = []
vm_ids = {}
conn = libvirt.openReadOnly('qemu:///system')
domains = []
self.macs_map = self._get_vfs_macs()
domains_names = conn.listDefinedDomains()
defined_domains = map(conn.lookupByName, domains_names)
domains_ids = conn.listDomainsID()
running_domains = map(conn.lookupByID, domains_ids)
for domain in defined_domains:
[state, maxmem, mem, ncpu, cputime] = domain.info()
if state in (libvirt.VIR_DOMAIN_PAUSED,
libvirt.VIR_DOMAIN_SHUTDOWN,
libvirt.VIR_DOMAIN_SHUTOFF):
domains.append(domain)
domains += running_domains
for domain in domains:
raw_xml = domain.XMLDesc(0)
tree = etree.XML(raw_xml)
hostdevs = tree.xpath("devices/hostdev/source/address")
vm_id = tree.find('uuid').text
for dev in self._get_attached_hostdevs(hostdevs):
devices.append(dev)
vm_ids[dev[0]] = vm_id
return devices, vm_ids
def get_fabric_details(self, fabric, pf=None):
return self.device_db.get_fabric_details(fabric, pf)
def discover_devices(self, pf):
return self.pci_utils.get_vfs_info(pf)
def get_fabric_for_dev(self, dev):
return self.device_db.get_dev_fabric(dev)
def _get_vfs_macs(self):
macs_map = {}
fabrics = self.device_db.device_db.keys()
for fabric in fabrics:
fabric_details = self.device_db.get_fabric_details(fabric)
try:
macs_map[fabric] = \
self.pci_utils.get_vfs_macs_ib(fabric_details)
except Exception:
LOG.exception(_LE("Failed to get vfs macs for fabric %s "),
fabric)
continue
return macs_map
def _get_attached_hostdevs(self, hostdevs):
devs = []
for hostdev in hostdevs:
dev = self.pci_utils.get_device_address(hostdev)
fabric = self.get_fabric_for_dev(dev)
if fabric:
fabric_details = self.get_fabric_details(fabric)
for pf_fabric_details in fabric_details.values():
if (pf_fabric_details['pf_device_type'] ==
constants.MLNX4_VF_DEVICE_TYPE):
hca_port = pf_fabric_details['hca_port']
pf_mlx_dev = pf_fabric_details['pf_mlx_dev']
vf_index = self.pci_utils.get_guid_index(
pf_mlx_dev, dev, hca_port)
elif (pf_fabric_details['pf_device_type'] ==
constants.MLNX5_VF_DEVICE_TYPE):
if dev in pf_fabric_details['vfs']:
vf_index = pf_fabric_details['vfs'][dev]['vf_num']
else:
continue
try:
mac = self.macs_map[fabric][str(vf_index)]
devs.append((dev, mac, fabric))
except KeyError:
LOG.warning(_LW("Failed to retrieve Hostdev MAC"
"for dev %s"), dev)
else:
LOG.info(_LI("No Fabric defined for device %s"), hostdev)
return devs
def _get_pf_details(self, pf):
hca_port = self.pci_utils.get_eth_port(pf)
pf_mlx_dev = self.pci_utils.get_pf_mlx_dev(pf)
return (hca_port, pf_mlx_dev)
| 39.519685
| 78
| 0.60251
|
cd463859954226870ed89e52fffc81c31c6bd1e5
| 645
|
py
|
Python
|
FirstStepsInPython/Fundamentals/Exercice/Regular Expressions/More Exercises/01. Race.py
|
Pittor052/SoftUni-Studies
|
1ee6341082f6ccfa45b3e82824c37722bcf2fb31
|
[
"MIT"
] | null | null | null |
FirstStepsInPython/Fundamentals/Exercice/Regular Expressions/More Exercises/01. Race.py
|
Pittor052/SoftUni-Studies
|
1ee6341082f6ccfa45b3e82824c37722bcf2fb31
|
[
"MIT"
] | null | null | null |
FirstStepsInPython/Fundamentals/Exercice/Regular Expressions/More Exercises/01. Race.py
|
Pittor052/SoftUni-Studies
|
1ee6341082f6ccfa45b3e82824c37722bcf2fb31
|
[
"MIT"
] | 1
|
2021-10-07T18:30:42.000Z
|
2021-10-07T18:30:42.000Z
|
# # INCOMPLETE !!!
import re
racers = input().split(', ')
line = input()
races = []
regex = r'(?P<name>([A-Za-z])+)'
regex2 = r'(?P<length>.*)'
compare_race = {}
while not line == 'end of race':
races.append(line)
line = input()
for race in races:
distance = len(race)
result = re.finditer(regex, race)
test = ''
for name in result:
if name.group('name') not in compare_race:
compare_race[name.group('name')] = distance
else:
if distance > compare_race[name.group('name')]:
compare_race[name.group()] += distance
print(racers)
print(compare_race)
print(races)
| 21.5
| 59
| 0.586047
|
bacd13bbd6f10b748de2def5cca3ef08764f51f4
| 15,425
|
py
|
Python
|
programs/buck_repo.py
|
richieyan/buck
|
b6173586f67433f7c168309ea4498b371337f34b
|
[
"Apache-2.0"
] | null | null | null |
programs/buck_repo.py
|
richieyan/buck
|
b6173586f67433f7c168309ea4498b371337f34b
|
[
"Apache-2.0"
] | null | null | null |
programs/buck_repo.py
|
richieyan/buck
|
b6173586f67433f7c168309ea4498b371337f34b
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import glob
import os
import platform
import subprocess
import sys
import tempfile
import textwrap
from timing import monotonic_time_nanos
from tracing import Tracing
from buck_tool import BuckTool, which, check_output, JAVA_MAX_HEAP_SIZE_MB
from buck_tool import BuckToolException, RestartBuck
import buck_version
JAVA_CLASSPATHS = [
"build/abi_processor/classes",
"build/classes",
"build/src-gen/classes",
"build/aosp/classes",
"build/dx_classes",
"src",
"src-gen",
"third-party/java/android/sdklib.jar",
"third-party/java/android/sdk-common-24.2.3.jar",
"third-party/java/android/common-24.2.3.jar",
"third-party/java/android/layoutlib-api-24.2.3.jar",
"third-party/java/aopalliance/aopalliance.jar",
"third-party/java/args4j/args4j-2.0.30.jar",
"third-party/java/asm/asm-debug-all-5.0.3.jar",
"third-party/java/closure-templates/soy-excluding-deps.jar",
"third-party/java/commons-compress/commons-compress-1.8.1.jar",
"third-party/java/concurrent-locks/concurrent-locks-1.0.0.jar",
"third-party/java/dd-plist/dd-plist.jar",
"third-party/java/ddmlib/ddmlib-22.5.3.jar",
"third-party/java/eclipse/org.eclipse.core.contenttype_3.4.200.v20140207-1251.jar",
"third-party/java/eclipse/org.eclipse.core.jobs_3.6.1.v20141014-1248.jar",
"third-party/java/eclipse/org.eclipse.core.resources_3.9.1.v20140825-1431.jar",
"third-party/java/eclipse/org.eclipse.core.runtime_3.10.0.v20140318-2214.jar",
"third-party/java/eclipse/org.eclipse.equinox.common_3.6.200.v20130402-1505.jar",
"third-party/java/eclipse/org.eclipse.equinox.preferences_3.5.200.v20140224-1527.jar",
"third-party/java/eclipse/org.eclipse.jdt.core.prefs",
"third-party/java/eclipse/org.eclipse.jdt.core_3.10.2.v20150120-1634.jar",
"third-party/java/eclipse/org.eclipse.osgi_3.10.2.v20150203-1939.jar",
"third-party/java/gson/gson-2.2.4.jar",
"third-party/java/guava/guava-19.0.jar",
"third-party/java/guice/guice-3.0.jar",
"third-party/java/guice/guice-assistedinject-3.0.jar",
"third-party/java/guice/guice-multibindings-3.0.jar",
"third-party/java/httpcomponents/httpclient-4.2.6.jar",
"third-party/java/httpcomponents/httpcore-4.2.5.jar",
"third-party/java/icu4j/icu4j-54.1.1.jar",
"third-party/java/infer-annotations/infer-annotations-1.5.jar",
"third-party/java/ini4j/ini4j-0.5.2.jar",
"third-party/java/jackson/jackson-annotations-2.5.5.jar",
"third-party/java/jackson/jackson-core-2.5.5.jar",
"third-party/java/jackson/jackson-databind-2.5.5.jar",
"third-party/java/jackson/jackson-datatype-jdk7-2.5.0.jar",
"third-party/java/jackson-datatype-guava/jackson-datatype-guava-2.5.5.jar",
"third-party/java/jetty/jetty-all-9.2.10.v20150310.jar",
"third-party/java/jna/jna-4.2.0.jar",
"third-party/java/jsr/javax.inject-1.jar",
"third-party/java/jsr/jsr305.jar",
"third-party/java/kxml2/kxml2-2.3.0.jar",
"third-party/java/nailgun/nailgun-server-0.9.2-SNAPSHOT.jar",
"third-party/java/nuprocess/nuprocess-1.0.5-SNAPSHOT.jar",
"third-party/java/ObjCBridge/ObjCBridge.jar",
"third-party/java/okhttp/okhttp-2.7.4.jar",
"third-party/java/okio/okio-1.6.0.jar",
"third-party/java/servlet-api/javax.servlet-api-3.1.0.jar",
"third-party/java/slf4j/slf4j-jdk14-1.7.5.jar",
"third-party/java/stringtemplate/ST-4.0.8.jar",
"third-party/java/thrift/libthrift-0.9.3.jar",
"third-party/java/xz-java-1.3/xz-1.3.jar",
# maven/aether libs
"third-party/java/aether/aether-api-1.0.2.v20150114.jar",
"third-party/java/aether/aether-connector-basic-1.0.2.v20150114.jar",
"third-party/java/aether/aether-impl-1.0.0.v20140518.jar",
"third-party/java/aether/aether-spi-1.0.2.v20150114.jar",
"third-party/java/aether/aether-transport-http-1.0.2.v20150114.jar",
"third-party/java/aether/aether-util-1.0.2.v20150114.jar",
"third-party/java/maven/maven-aether-provider-3.2.5.jar",
"third-party/java/maven/maven-model-3.2.5.jar",
"third-party/java/maven/maven-model-builder-3.2.5.jar",
"third-party/java/slf4j/slf4j-api-1.7.5.jar",
"third-party/java/plexus/plexus-utils-3.0.20.jar",
"third-party/java/plexus/plexus-interpolation-1.21.jar",
]
RESOURCES = {
"abi_processor_classes": "build/abi_processor/classes",
"android_agent_path": "assets/android/agent.apk",
"buck_server": "bin/buck",
"dx": "third-party/java/dx/etc/dx",
"jacoco_agent_jar": "third-party/java/jacoco/jacocoagent.jar",
"libjcocoa.dylib": "third-party/java/ObjCBridge/libjcocoa.dylib",
"logging_config_file": "config/logging.properties",
"native_exopackage_fake_path": "assets/android/native-exopackage-fakes.apk",
"path_to_asm_jar": "third-party/java/asm/asm-debug-all-5.0.3.jar",
"path_to_buck_py": "src/com/facebook/buck/parser/buck.py",
"path_to_intellij_py": "src/com/facebook/buck/command/intellij.py",
"path_to_pathlib_py": "third-party/py/pathlib/pathlib.py",
"path_to_pex": "src/com/facebook/buck/python/make_pex.py",
"path_to_pywatchman": "third-party/py/pywatchman",
"path_to_sh_binary_template": "src/com/facebook/buck/shell/sh_binary_template",
"path_to_static_content": "webserver/static",
"report_generator_jar": "build/report-generator.jar",
"testrunner_classes": "build/testrunner/classes",
}
def get_ant_env(max_heap_size_mb):
ant_env = os.environ.copy()
ant_opts = ant_env.get('ANT_OPTS', '')
if ant_opts.find('-Xmx') == -1:
# Adjust the max heap size if it's not already specified.
ant_max_heap_arg = '-Xmx{0}m'.format(max_heap_size_mb)
if ant_opts:
ant_opts += ' '
ant_opts += ant_max_heap_arg
ant_env['ANT_OPTS'] = ant_opts
return ant_env
class BuckRepo(BuckTool):
def __init__(self, buck_bin_dir, buck_project):
super(BuckRepo, self).__init__(buck_project)
self._buck_dir = self._platform_path(os.path.dirname(buck_bin_dir))
self._build_success_file = os.path.join(
self._buck_dir, "build", "successful-build")
dot_git = os.path.join(self._buck_dir, '.git')
self._is_git = os.path.exists(dot_git) and os.path.isdir(dot_git) and which('git') and \
sys.platform != 'cygwin'
self._is_buck_repo_dirty_override = os.environ.get('BUCK_REPOSITORY_DIRTY')
buck_version = buck_project.buck_version
if self._is_git and not buck_project.has_no_buck_check and buck_version:
revision = buck_version[0]
branch = buck_version[1] if len(buck_version) > 1 else None
self._checkout_and_clean(revision, branch)
self._build()
def _checkout_and_clean(self, revision, branch):
with Tracing('BuckRepo._checkout_and_clean'):
if not self._revision_exists(revision):
print(textwrap.dedent("""\
Required revision {0} is not
available in the local repository.
Buck is fetching updates from git. You can disable this by creating
a '.nobuckcheck' file in your repository, but this might lead to
strange bugs or build failures.""".format(revision)),
file=sys.stderr)
git_command = ['git', 'fetch']
git_command.extend(['--all'] if not branch else ['origin', branch])
try:
subprocess.check_call(
git_command,
stdout=sys.stderr,
cwd=self._buck_dir)
except subprocess.CalledProcessError:
raise BuckToolException(textwrap.dedent("""\
Failed to fetch Buck updates from git."""))
current_revision = self._get_git_revision()
if current_revision != revision:
print(textwrap.dedent("""\
Buck is at {0}, but should be {1}.
Buck is updating itself. To disable this, add a '.nobuckcheck'
file to your project root. In general, you should only disable
this if you are developing Buck.""".format(
current_revision, revision)),
file=sys.stderr)
try:
subprocess.check_call(
['git', 'checkout', '--quiet', revision],
cwd=self._buck_dir)
except subprocess.CalledProcessError:
raise BuckToolException(textwrap.dedent("""\
Failed to update Buck to revision {0}.""".format(revision)))
if os.path.exists(self._build_success_file):
os.remove(self._build_success_file)
ant = self._check_for_ant()
self._run_ant_clean(ant)
raise RestartBuck()
def _join_buck_dir(self, relative_path):
return os.path.join(self._buck_dir, *(relative_path.split('/')))
def _has_local_changes(self):
if not self._is_git:
return False
output = check_output(
['git', 'ls-files', '-m'],
cwd=self._buck_dir)
return bool(output.strip())
def _get_git_revision(self):
if not self._is_git:
return 'N/A'
return buck_version.get_git_revision(self._buck_dir)
def _get_git_commit_timestamp(self):
if self._is_buck_repo_dirty_override or not self._is_git:
return -1
return buck_version.get_git_revision_timestamp(self._buck_dir)
def _revision_exists(self, revision):
returncode = subprocess.call(
['git', 'cat-file', '-e', revision],
cwd=self._buck_dir)
return returncode == 0
def _check_for_ant(self):
ant = which('ant')
if not ant:
message = "You do not have ant on your $PATH. Cannot build Buck."
if sys.platform == "darwin":
message += "\nTry running 'brew install ant'."
raise BuckToolException(message)
return ant
def _print_ant_failure_and_exit(self, ant_log_path):
print(textwrap.dedent("""\
::: 'ant' failed in the buck repo at '{0}',
::: and 'buck' is not properly built. It will be unusable
::: until the error is corrected. You can check the logs
::: at {1} to figure out what broke.""".format(
self._buck_dir, ant_log_path)), file=sys.stderr)
if self._is_git:
raise BuckToolException(textwrap.dedent("""\
::: It is possible that running this command will fix it:
::: git -C "{0}" clean -xfd""".format(self._buck_dir)))
else:
raise BuckToolException(textwrap.dedent("""\
::: It is possible that running this command will fix it:
::: rm -rf "{0}"/build""".format(self._buck_dir)))
def _run_ant_clean(self, ant):
clean_log_path = os.path.join(self._buck_project.get_buck_out_log_dir(), 'ant-clean.log')
with open(clean_log_path, 'w') as clean_log:
exitcode = subprocess.call([ant, 'clean'], stdout=clean_log,
cwd=self._buck_dir, env=get_ant_env(JAVA_MAX_HEAP_SIZE_MB))
if exitcode is not 0:
self._print_ant_failure_and_exit(clean_log_path)
def _run_ant(self, ant):
ant_log_path = os.path.join(self._buck_project.get_buck_out_log_dir(), 'ant.log')
with open(ant_log_path, 'w') as ant_log:
exitcode = subprocess.call([ant], stdout=ant_log,
cwd=self._buck_dir, env=get_ant_env(JAVA_MAX_HEAP_SIZE_MB))
if exitcode is not 0:
self._print_ant_failure_and_exit(ant_log_path)
def _build(self):
with Tracing('BuckRepo._build'):
if not os.path.exists(self._build_success_file):
print(
"Buck does not appear to have been built -- building Buck!",
file=sys.stderr)
ant = self._check_for_ant()
self._run_ant_clean(ant)
self._run_ant(ant)
open(self._build_success_file, 'w').close()
print("All done, continuing with build.", file=sys.stderr)
def _has_resource(self, resource):
return True
def _get_resource(self, resource, exe=False):
return self._join_buck_dir(RESOURCES[resource.name])
def _get_buck_version_uid(self):
with Tracing('BuckRepo._get_buck_version_uid'):
# First try to get the "clean" buck version. If it succeeds,
# return it.
clean_version = buck_version.get_clean_buck_version(
self._buck_dir,
allow_dirty=self._is_buck_repo_dirty_override == "1")
if clean_version is not None:
return clean_version
# Otherwise, if there is a .nobuckcheck file, or if there isn't
# a .buckversion file, fall back to a "dirty" version.
if (self._buck_project.has_no_buck_check or
not self._buck_project.buck_version):
return buck_version.get_dirty_buck_version(self._buck_dir)
if self._has_local_changes():
print(textwrap.dedent("""\
::: Your buck directory has local modifications, and therefore
::: builds will not be able to use a distributed cache.
::: The following files must be either reverted or committed:"""),
file=sys.stderr)
subprocess.call(
['git', 'ls-files', '-m'],
stdout=sys.stderr,
cwd=self._buck_dir)
elif os.environ.get('BUCK_CLEAN_REPO_IF_DIRTY') != 'NO':
print(textwrap.dedent("""\
::: Your local buck directory is dirty, and therefore builds will
::: not be able to use a distributed cache."""), file=sys.stderr)
if sys.stdout.isatty():
print(
"::: Do you want to clean your buck directory? [y/N]",
file=sys.stderr)
choice = raw_input().lower()
if choice == "y":
subprocess.call(
['git', 'clean', '-fd'],
stdout=sys.stderr,
cwd=self._buck_dir)
raise RestartBuck()
return buck_version.get_dirty_buck_version(self._buck_dir)
def _get_extra_java_args(self):
return [
"-Dbuck.git_commit={0}".format(self._get_git_revision()),
"-Dbuck.git_commit_timestamp={0}".format(
self._get_git_commit_timestamp()),
"-Dbuck.git_dirty={0}".format(
int(self._is_buck_repo_dirty_override == "1" or
buck_version.is_dirty(self._buck_dir))),
]
def _get_bootstrap_classpath(self):
return self._join_buck_dir("build/bootstrapper/bootstrapper.jar")
def _get_java_classpath(self):
return self._pathsep.join([self._join_buck_dir(p) for p in JAVA_CLASSPATHS])
| 45.367647
| 98
| 0.620292
|
4001b64ca365acebc40d3e97a63f15db3c50e35c
| 5,259
|
py
|
Python
|
conlang/cache.py
|
zyxw59/soundchanger
|
3aef4e36e68eb1e6af4db89b8cd636918542cb7b
|
[
"MIT"
] | 2
|
2016-04-10T09:11:32.000Z
|
2019-09-01T08:31:18.000Z
|
conlang/cache.py
|
zyxw59/soundchanger
|
3aef4e36e68eb1e6af4db89b8cd636918542cb7b
|
[
"MIT"
] | null | null | null |
conlang/cache.py
|
zyxw59/soundchanger
|
3aef4e36e68eb1e6af4db89b8cd636918542cb7b
|
[
"MIT"
] | null | null | null |
import time
class Cache(object):
"""A cache of computed values.
Attributes:
cache: The cache as a dict, whose keys are the arguments to the
function the cache computes, and whose values are tuples of the
last modified time, and the result of the funciton.
funct: The function whose results the cache stores.
max_size: An int. If the cache has more than max_size entries, the
oldest entries are purged. If set to -1, the cache has unlimited
size.
mod_times: A list of keys, in order of last modification time, oldest
first.
"""
def __init__(self, funct, max_size=-1):
"""Initializes a cache.
Args:
funct: The function whose results the cache stores.
max_size: (Optional) The maximum number of entries in the cache. If
set to -1 (default), the cache has no limit.
"""
super().__init__()
self.cache = {}
self.funct = funct
self.max_size = max_size
self.mod_times = []
def __call__(self, *args):
"""Calls the function or returns a cached value.
If *args exists as a key of self.cache, the cached value is returned.
Otherwise, self.funct is called, and the result is cached.
Args:
*args: The arguments to be passed to self.funct or to be looked up
in self.cache.
Returns:
The result of the function or a cached value.
"""
if args not in self.cache:
# it's not cached, so cache it
self.update(*args)
# retrieve cached value. if it wasn't already cached, it is now
return self.cache[args][1]
def purge(self, num=-1):
"""Purges the cache.
Args:
num: (Optional) The number of entries to purge. If set to -1
(default), all entries are purged. Otherwise, num entries are
purged, starting with the oldest.
"""
if num == -1:
num = len(self.cache)
for k in self.mod_times[:num]:
del self.cache[k]
self.update_mod_times
def update(self, *args):
"""Updates a value in the cache.
If the value is not in the cache already, and adding it causes the
cache to excede max_size, the oldest entry is purged from the cache.
Args:
*args: The arguments to self.funct.
"""
self.cache[args] = time.time(), self.funct(*args)
if self.max_size != -1 and len(self.cache) > self.max_size:
self.purge(len(self.cache) - self.max_size)
else:
# self.update_mod_times() is called by self.purge(), so we only
# need to call it if self.purge() isn't called.
self.update_mod_times()
def update_mod_times(self):
"""Updates self.mod_times."""
d = self.cache
self.mod_times = sorted(d, key=lambda k: d[k][0])
class ModifiedCache(Cache):
"""A cache that can check if values need to be updated.
Attributes:
cache: The cache as a dict, whose keys are the arguments to the
function the cache computes, and whose values are tuples of the
last modified time, and the result of the funciton.
funct: The function whose results the cache stores.
max_size: An int. If the cache has more than max_size entries, the
oldest entries are purged. If set to -1, the cache has unlimited
size.
mod_times: A list of keys, in order of last modification time, oldest
first.
modified: A function that checks whether a cached value needs to be
updated, by returning a timestamp to compare with the last modified
time of the cached value. Should take the same arguments as funct.
"""
def __init__(self, funct, modified, max_size=-1):
"""Initializes a cache.
Args:
funct: The function whose results the cache stores.
modified: The function to check whether a cached value needs to be
updated. Should take the same arguments as funct, and return
something that can be compared to a timestamp returned by
time.time().
max_size: (Optional) The maximum number of entries in the cache. If
set to -1 (default), the cache has no limit.
"""
super().__init__(funct, max_size)
self.modified = modified
def __call__(self, *args):
"""Calls the function or returns a cached value.
If *args exists as a key of self.cache, and has not been modified, the
cached value is returned. Otherwise, self.funct is called, and the
result is cached.
Args:
*args: The arguments to be passed to self.funct or to be looked up
in self.cache.
Returns:
The result of the function or a cached value.
"""
if (args not in self.cache or
self.modified(*args) > self.cache[args][0]):
# it needs to be updated
self.update(*args)
# retrieve cached value
return self.cache[args][1]
| 37.834532
| 79
| 0.59536
|
e4c9437feb187e9dd52564b676cb26f054ba4729
| 732
|
py
|
Python
|
ch7/unittest/test_delete_unittest_fix.py
|
Jamrozinski/PythonTestingWithPytest
|
0dceb58f0b17fefa776748c93f5df062395d00be
|
[
"MIT"
] | 11
|
2021-05-06T12:39:39.000Z
|
2022-03-14T11:58:44.000Z
|
ch7/unittest/test_delete_unittest_fix.py
|
Jamrozinski/PythonTestingWithPytest
|
0dceb58f0b17fefa776748c93f5df062395d00be
|
[
"MIT"
] | 34
|
2019-12-16T16:53:24.000Z
|
2022-01-13T02:29:30.000Z
|
ch7/unittest/test_delete_unittest_fix.py
|
Jamrozinski/PythonTestingWithPytest
|
0dceb58f0b17fefa776748c93f5df062395d00be
|
[
"MIT"
] | 11
|
2021-06-10T21:19:42.000Z
|
2022-02-21T04:03:06.000Z
|
import pytest
import unittest
import tasks
from tasks import Task
@pytest.mark.usefixtures('tasks_db_session')
class TestNonEmpty(unittest.TestCase):
def setUp(self):
tasks.delete_all() # start empty
# add a few items, saving ids
self.ids = []
self.ids.append(tasks.add(Task('One', 'Brian', True)))
self.ids.append(tasks.add(Task('Two', 'Still Brian', False)))
self.ids.append(tasks.add(Task('Three', 'Not Brian', False)))
def test_delete_decreases_count(self):
# GIVEN 3 items
self.assertEqual(tasks.count(), 3)
# WHEN we delete one
tasks.delete(self.ids[0])
# THEN count decreases by 1
self.assertEqual(tasks.count(), 2)
| 29.28
| 69
| 0.636612
|
bd3fc879d09d5b8aa9ee2f32c31be28f17b2eda6
| 16,243
|
py
|
Python
|
google/cloud/asset/v1p2beta1/asset-v1p2beta1-py/google/cloud/asset_v1p2beta1/services/asset_service/transports/grpc_asyncio.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/cloud/asset/v1p2beta1/asset-v1p2beta1-py/google/cloud/asset_v1p2beta1/services/asset_service/transports/grpc_asyncio.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/cloud/asset/v1p2beta1/asset-v1p2beta1-py/google/cloud/asset_v1p2beta1/services/asset_service/transports/grpc_asyncio.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.asset_v1p2beta1.types import asset_service
from google.protobuf import empty_pb2 # type: ignore
from .base import AssetServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import AssetServiceGrpcTransport
class AssetServiceGrpcAsyncIOTransport(AssetServiceTransport):
"""gRPC AsyncIO backend transport for AssetService.
Asset service definition.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'cloudasset.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'cloudasset.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def create_feed(self) -> Callable[
[asset_service.CreateFeedRequest],
Awaitable[asset_service.Feed]]:
r"""Return a callable for the create feed method over gRPC.
Creates a feed in a parent
project/folder/organization to listen to its asset
updates.
Returns:
Callable[[~.CreateFeedRequest],
Awaitable[~.Feed]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_feed' not in self._stubs:
self._stubs['create_feed'] = self.grpc_channel.unary_unary(
'/google.cloud.asset.v1p2beta1.AssetService/CreateFeed',
request_serializer=asset_service.CreateFeedRequest.serialize,
response_deserializer=asset_service.Feed.deserialize,
)
return self._stubs['create_feed']
@property
def get_feed(self) -> Callable[
[asset_service.GetFeedRequest],
Awaitable[asset_service.Feed]]:
r"""Return a callable for the get feed method over gRPC.
Gets details about an asset feed.
Returns:
Callable[[~.GetFeedRequest],
Awaitable[~.Feed]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_feed' not in self._stubs:
self._stubs['get_feed'] = self.grpc_channel.unary_unary(
'/google.cloud.asset.v1p2beta1.AssetService/GetFeed',
request_serializer=asset_service.GetFeedRequest.serialize,
response_deserializer=asset_service.Feed.deserialize,
)
return self._stubs['get_feed']
@property
def list_feeds(self) -> Callable[
[asset_service.ListFeedsRequest],
Awaitable[asset_service.ListFeedsResponse]]:
r"""Return a callable for the list feeds method over gRPC.
Lists all asset feeds in a parent
project/folder/organization.
Returns:
Callable[[~.ListFeedsRequest],
Awaitable[~.ListFeedsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_feeds' not in self._stubs:
self._stubs['list_feeds'] = self.grpc_channel.unary_unary(
'/google.cloud.asset.v1p2beta1.AssetService/ListFeeds',
request_serializer=asset_service.ListFeedsRequest.serialize,
response_deserializer=asset_service.ListFeedsResponse.deserialize,
)
return self._stubs['list_feeds']
@property
def update_feed(self) -> Callable[
[asset_service.UpdateFeedRequest],
Awaitable[asset_service.Feed]]:
r"""Return a callable for the update feed method over gRPC.
Updates an asset feed configuration.
Returns:
Callable[[~.UpdateFeedRequest],
Awaitable[~.Feed]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_feed' not in self._stubs:
self._stubs['update_feed'] = self.grpc_channel.unary_unary(
'/google.cloud.asset.v1p2beta1.AssetService/UpdateFeed',
request_serializer=asset_service.UpdateFeedRequest.serialize,
response_deserializer=asset_service.Feed.deserialize,
)
return self._stubs['update_feed']
@property
def delete_feed(self) -> Callable[
[asset_service.DeleteFeedRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete feed method over gRPC.
Deletes an asset feed.
Returns:
Callable[[~.DeleteFeedRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_feed' not in self._stubs:
self._stubs['delete_feed'] = self.grpc_channel.unary_unary(
'/google.cloud.asset.v1p2beta1.AssetService/DeleteFeed',
request_serializer=asset_service.DeleteFeedRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_feed']
def close(self):
return self.grpc_channel.close()
__all__ = (
'AssetServiceGrpcAsyncIOTransport',
)
| 44.138587
| 87
| 0.623653
|
4bc2d8e0e0e3571025029d4c02800cd89d796548
| 877
|
py
|
Python
|
web/core/views.py
|
zero-or-one/kisaweb
|
d88eca63b50fd2593a7d1aa23916a80437e84925
|
[
"MIT"
] | 1
|
2021-03-21T18:11:18.000Z
|
2021-03-21T18:11:18.000Z
|
web/core/views.py
|
zero-or-one/kisaweb
|
d88eca63b50fd2593a7d1aa23916a80437e84925
|
[
"MIT"
] | null | null | null |
web/core/views.py
|
zero-or-one/kisaweb
|
d88eca63b50fd2593a7d1aa23916a80437e84925
|
[
"MIT"
] | 1
|
2021-05-28T17:17:25.000Z
|
2021-05-28T17:17:25.000Z
|
from django.shortcuts import render, HttpResponse, redirect
from events.models import Event
from django.conf import settings
from .models import CourseResources
# Create your views here.
def homepage(request):
if settings.MAINTENANCE_MODE == True:
return redirect('important_links')
else:
num_shown_events = 6
events = Event.objects.all().order_by("-id")
latest_events = events[: min(num_shown_events, len(events))]
context = {
"event_list": latest_events
}
return render(request, 'core/homepage.html', context)
def important_links(request):
return render(request, 'core/important_links.html')
def course_resources(request):
resources = CourseResources.objects.order_by('class_id')
return render(request, 'core/course_resources.html', context={
'resources': resources,
})
| 30.241379
| 68
| 0.697834
|
0c8d276a7d91342c3731252205eb3208607903a3
| 3,506
|
py
|
Python
|
fips-files/verbs/nebula.py
|
Nechrito/nebula
|
6c7ef27ab1374d3f751d866500729524f72a0c87
|
[
"BSD-2-Clause"
] | null | null | null |
fips-files/verbs/nebula.py
|
Nechrito/nebula
|
6c7ef27ab1374d3f751d866500729524f72a0c87
|
[
"BSD-2-Clause"
] | null | null | null |
fips-files/verbs/nebula.py
|
Nechrito/nebula
|
6c7ef27ab1374d3f751d866500729524f72a0c87
|
[
"BSD-2-Clause"
] | null | null | null |
"""control nebula toolkit settings
nebula work [working directory]
nebula toolkit [toolkit directory]
physx [win-vs15,winvs16]
cleannidl
"""
from mod import log, util, settings
import os
import sys
import shutil
import subprocess
if sys.platform == "win32" :
if sys.version_info.major > 2:
import winreg as _winreg
else:
import _winreg
base_reg = r"SOFTWARE\gscept\ToolkitShared"
def argToKey(key) :
"""translate argument to registry key"""
keys = {
"work" : "workdir",
"toolkit" : "path"
}
return keys.get(key,"")
def setKey(key, value) :
"""set nebula key"""
try :
if key == "work" or key == "toolkit" :
path = os.path.abspath(value)
_winreg.CreateKey(_winreg.HKEY_CURRENT_USER,base_reg)
reg_key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, base_reg, 0, _winreg.KEY_WRITE)
_winreg.SetValueEx(reg_key, argToKey(key), 0, _winreg.REG_SZ, path)
_winreg.CloseKey(reg_key)
except WindowsError:
log.error("error setting registry key")
def run(fips_dir, proj_dir, args) :
"""run the 'nebula' verb"""
if len(args) > 0 :
noun = args[0]
if noun == 'set' :
if len(args) > 2 :
setKey(args[1], args[2])
else :
log.error("expected setting and value")
elif noun == 'get' :
if len(args) > 1 :
key = argToKey(args[1])
if key != "" :
reg_key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, base_reg, 0, _winreg.KEY_READ)
keyval, regtype = _winreg.QueryValueEx(reg_key,key)
_winreg.CloseKey(reg_key)
log.info(keyval)
else :
log.error("invalid setting")
else :
log.error("expected setting name")
elif noun == 'cleannidl' :
proj = util.get_project_name_from_dir(proj_dir)
cfg = settings.get(proj_dir, 'config')
path = util.get_build_dir(fips_dir,proj,cfg)+"/nidl"
shutil.rmtree(path,True)
else :
try:
reg_key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, base_reg, 0, _winreg.KEY_READ)
workval, regtype = _winreg.QueryValueEx(reg_key,"workdir")
rootval, regtype = _winreg.QueryValueEx(reg_key,"path")
_winreg.CloseKey(reg_key)
log.info(log.YELLOW +
"Current settings:\n"
"Project directory: " + workval + "\n"
"Nebula root directory: " + rootval + "\n")
except WindowsError:
log.info(log.YELLOW + "No Nebula settings in registry\n")
else:
def run(fips_dir,proj_dir,args):
log.error("Not supported")
def help():
"""print 'nebula' help"""
log.info(log.YELLOW +
"fips nebula [set|get]\n"
" work [working directory]\n"
" toolkit [nebula root/toolkit directory]\n"
"fips nebula\n"
" prints current configuration\n"
"fips cleannidl\n"
" cleans all nidl files which forces a regeneration upon compilation\n")
| 36.520833
| 107
| 0.521107
|
da9f23df0d39e7f6fc56effb47cae74c278a20aa
| 10,812
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20200501/get_network_virtual_appliance.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200501/get_network_virtual_appliance.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200501/get_network_virtual_appliance.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNetworkVirtualApplianceResult',
'AwaitableGetNetworkVirtualApplianceResult',
'get_network_virtual_appliance',
]
@pulumi.output_type
class GetNetworkVirtualApplianceResult:
"""
NetworkVirtualAppliance Resource.
"""
def __init__(__self__, boot_strap_configuration_blobs=None, cloud_init_configuration=None, cloud_init_configuration_blobs=None, etag=None, id=None, identity=None, location=None, name=None, nva_sku=None, provisioning_state=None, tags=None, type=None, virtual_appliance_asn=None, virtual_appliance_nics=None, virtual_appliance_sites=None, virtual_hub=None):
if boot_strap_configuration_blobs and not isinstance(boot_strap_configuration_blobs, list):
raise TypeError("Expected argument 'boot_strap_configuration_blobs' to be a list")
pulumi.set(__self__, "boot_strap_configuration_blobs", boot_strap_configuration_blobs)
if cloud_init_configuration and not isinstance(cloud_init_configuration, str):
raise TypeError("Expected argument 'cloud_init_configuration' to be a str")
pulumi.set(__self__, "cloud_init_configuration", cloud_init_configuration)
if cloud_init_configuration_blobs and not isinstance(cloud_init_configuration_blobs, list):
raise TypeError("Expected argument 'cloud_init_configuration_blobs' to be a list")
pulumi.set(__self__, "cloud_init_configuration_blobs", cloud_init_configuration_blobs)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if nva_sku and not isinstance(nva_sku, dict):
raise TypeError("Expected argument 'nva_sku' to be a dict")
pulumi.set(__self__, "nva_sku", nva_sku)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_appliance_asn and not isinstance(virtual_appliance_asn, float):
raise TypeError("Expected argument 'virtual_appliance_asn' to be a float")
pulumi.set(__self__, "virtual_appliance_asn", virtual_appliance_asn)
if virtual_appliance_nics and not isinstance(virtual_appliance_nics, list):
raise TypeError("Expected argument 'virtual_appliance_nics' to be a list")
pulumi.set(__self__, "virtual_appliance_nics", virtual_appliance_nics)
if virtual_appliance_sites and not isinstance(virtual_appliance_sites, list):
raise TypeError("Expected argument 'virtual_appliance_sites' to be a list")
pulumi.set(__self__, "virtual_appliance_sites", virtual_appliance_sites)
if virtual_hub and not isinstance(virtual_hub, dict):
raise TypeError("Expected argument 'virtual_hub' to be a dict")
pulumi.set(__self__, "virtual_hub", virtual_hub)
@property
@pulumi.getter(name="bootStrapConfigurationBlobs")
def boot_strap_configuration_blobs(self) -> Optional[Sequence[str]]:
"""
BootStrapConfigurationBlobs storage URLs.
"""
return pulumi.get(self, "boot_strap_configuration_blobs")
@property
@pulumi.getter(name="cloudInitConfiguration")
def cloud_init_configuration(self) -> Optional[str]:
"""
CloudInitConfiguration string in plain text.
"""
return pulumi.get(self, "cloud_init_configuration")
@property
@pulumi.getter(name="cloudInitConfigurationBlobs")
def cloud_init_configuration_blobs(self) -> Optional[Sequence[str]]:
"""
CloudInitConfigurationBlob storage URLs.
"""
return pulumi.get(self, "cloud_init_configuration_blobs")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']:
"""
The service principal that has read access to cloud-init and config blob.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nvaSku")
def nva_sku(self) -> Optional['outputs.VirtualApplianceSkuPropertiesResponse']:
"""
Network Virtual Appliance SKU.
"""
return pulumi.get(self, "nva_sku")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualApplianceAsn")
def virtual_appliance_asn(self) -> Optional[float]:
"""
VirtualAppliance ASN.
"""
return pulumi.get(self, "virtual_appliance_asn")
@property
@pulumi.getter(name="virtualApplianceNics")
def virtual_appliance_nics(self) -> Sequence['outputs.VirtualApplianceNicPropertiesResponse']:
"""
List of Virtual Appliance Network Interfaces.
"""
return pulumi.get(self, "virtual_appliance_nics")
@property
@pulumi.getter(name="virtualApplianceSites")
def virtual_appliance_sites(self) -> Sequence['outputs.SubResourceResponse']:
"""
List of references to VirtualApplianceSite.
"""
return pulumi.get(self, "virtual_appliance_sites")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> Optional['outputs.SubResourceResponse']:
"""
The Virtual Hub where Network Virtual Appliance is being deployed.
"""
return pulumi.get(self, "virtual_hub")
class AwaitableGetNetworkVirtualApplianceResult(GetNetworkVirtualApplianceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkVirtualApplianceResult(
boot_strap_configuration_blobs=self.boot_strap_configuration_blobs,
cloud_init_configuration=self.cloud_init_configuration,
cloud_init_configuration_blobs=self.cloud_init_configuration_blobs,
etag=self.etag,
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
nva_sku=self.nva_sku,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type,
virtual_appliance_asn=self.virtual_appliance_asn,
virtual_appliance_nics=self.virtual_appliance_nics,
virtual_appliance_sites=self.virtual_appliance_sites,
virtual_hub=self.virtual_hub)
def get_network_virtual_appliance(expand: Optional[str] = None,
network_virtual_appliance_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkVirtualApplianceResult:
"""
NetworkVirtualAppliance Resource.
:param str expand: Expands referenced resources.
:param str network_virtual_appliance_name: The name of Network Virtual Appliance.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['networkVirtualApplianceName'] = network_virtual_appliance_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200501:getNetworkVirtualAppliance', __args__, opts=opts, typ=GetNetworkVirtualApplianceResult).value
return AwaitableGetNetworkVirtualApplianceResult(
boot_strap_configuration_blobs=__ret__.boot_strap_configuration_blobs,
cloud_init_configuration=__ret__.cloud_init_configuration,
cloud_init_configuration_blobs=__ret__.cloud_init_configuration_blobs,
etag=__ret__.etag,
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
nva_sku=__ret__.nva_sku,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type,
virtual_appliance_asn=__ret__.virtual_appliance_asn,
virtual_appliance_nics=__ret__.virtual_appliance_nics,
virtual_appliance_sites=__ret__.virtual_appliance_sites,
virtual_hub=__ret__.virtual_hub)
| 40.8
| 359
| 0.6798
|
a6c6b1218776dcb39dfa3fe8553ca4cfedfb0ea3
| 4,552
|
py
|
Python
|
newsrc/common/fetcher.py
|
CAIDA/chocolatine
|
72bbaa96066ccc3af4ac0505f27bab32aed6afb9
|
[
"BSD-2-Clause"
] | null | null | null |
newsrc/common/fetcher.py
|
CAIDA/chocolatine
|
72bbaa96066ccc3af4ac0505f27bab32aed6afb9
|
[
"BSD-2-Clause"
] | null | null | null |
newsrc/common/fetcher.py
|
CAIDA/chocolatine
|
72bbaa96066ccc3af4ac0505f27bab32aed6afb9
|
[
"BSD-2-Clause"
] | null | null | null |
# This file is part of chocolatine
#
# Copyright (C) 2021 The Regents of the University of California
# All Rights Reserved
#
# Permission to copy, modify, and distribute this software and its
# documentation for academic research and education purposes, without fee, and
# without a written agreement is hereby granted, provided that
# the above copyright notice, this paragraph and the following paragraphs
# appear in all copies.
#
# Permission to make use of this software for other than academic research and
# education purposes may be obtained by contacting:
#
# Office of Innovation and Commercialization
# 9500 Gilman Drive, Mail Code 0910
# University of California
# La Jolla, CA 92093-0910
# (858) 534-5815
# invent@ucsd.edu
#
# This software program and documentation are copyrighted by The Regents of the
# University of California. The software program and documentation are supplied
# "as is", without any accompanying services from The Regents. The Regents does
# not warrant that the operation of the program will be uninterrupted or
# error-free. The end-user understands that the program was developed for
# research purposes and is advised not to rely exclusively on the program for
# any reason.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
# LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
# EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED
# HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO
# OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
from enum import Enum
import pandas as pd
import requests
from requests.exceptions import HTTPError
# 12 weeks -- 10 weeks to train, 1 week to calibrate, 1 week to test
DEFAULT_DURATION=(12 * 7) * 24 * 60 * 60
DEFAULT_API_URL="https://api.ioda.caida.org/dev/signals/raw"
class ChocFetcherJobType(Enum):
CHOC_FETCH_TELESCOPE_DATA = 1
CHOC_FETCH_BGP_DATA = 2
CHOC_FETCH_TRINOCULAR_DATA = 3
class ChocFetcher(object):
def __init__(self, apiurl = DEFAULT_API_URL):
self.iodaapiurl = apiurl
def _formTelescopeQuery(self, serieskey, endtime, duration):
keysplit = serieskey.split('.')
if len(keysplit) < 4:
return None
dataSource = keysplit[1]
if keysplit[3] == "geo":
if len(keysplit) == 6:
entityType = "continent"
elif len(keysplit) == 7:
entityType = "country"
elif len(keysplit) == 8:
entityType = "region"
elif len(keysplit) == 7:
entityType = "country"
entityCode = keysplit[-1]
elif keysplit[3] == "routing":
entityType = "asn"
entityCode = keysplit[-1]
else:
return None
queryArgs = "/%s/%s?from=%u&until=%u&datasource=%s&maxPoints=%u" % ( \
entityType, entityCode, endtime - duration, endtime,
dataSource, (duration / 60) + 1)
return queryArgs
def _fetchIodaData(self, serieskey, queryArgs):
try:
resp = requests.get(self.iodaapiurl + queryArgs)
resp.raise_for_status()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
return None
except Exception as err:
print(f'Non-HTTP error occurred {err}')
return None
jsonresult = resp.json()['data'][0][0]
t = jsonresult['from']
step = jsonresult['step']
native = jsonresult['nativeStep']
if step != native:
print(f'Step value ({step}) for series {serieskey} does not match nativeStep ({native})')
return None
res = []
for v in jsonresult['values']:
res.append({"timestamp": pd.Timestamp(t, unit='s'),
"signalValue": v})
t += step
return res
def fetchTelescopeData(self, serieskey, endtime, duration):
print(serieskey)
queryArgs = self._formTelescopeQuery(serieskey, endtime, duration)
if queryArgs is None:
return None
return self._fetchIodaData(serieskey, queryArgs)
| 36.416
| 101
| 0.66696
|
b90a7ad64dfea7894bea798269a5e04915667cd1
| 2,227
|
py
|
Python
|
backend/api/models/model_class_possibilities/Model_id_recursion.py
|
hack4impact-uiuc/mentee
|
c56945db8051e798c7bf6703577a0e50a54b0d67
|
[
"MIT"
] | 7
|
2020-10-03T22:45:38.000Z
|
2021-10-02T09:54:40.000Z
|
backend/api/models/model_class_possibilities/Model_id_recursion.py
|
hack4impact-uiuc/mentee
|
c56945db8051e798c7bf6703577a0e50a54b0d67
|
[
"MIT"
] | 265
|
2020-10-01T20:06:27.000Z
|
2022-02-27T12:18:55.000Z
|
backend/api/models/model_class_possibilities/Model_id_recursion.py
|
Leundai/twanalyze
|
bfccd9de43fae78e1cbdc0a6695f1cf827a3282b
|
[
"Apache-2.0"
] | 1
|
2020-10-06T19:57:37.000Z
|
2020-10-06T19:57:37.000Z
|
from typing import Any, List, Type
class ModelSQL(object):
query_class = None # flask_alchemy attribute
query = None # flask_alchemy attribute
DONOTSEND_MODEL = {"_sa_instance_state"}
DONOTSEND = []
def __repr__(self) -> str:
return "<{}>".format(self.__class__.__name__)
def to_dict_recursive(self) -> dict:
return self._to_dict_recursive(obj_ids_crossed=[id(self)])
def _to_dict_recursive(self, obj_ids_crossed: List[int]) -> dict:
"""iterate through objects to create a dict
Keywords arguments :
obj_ids_crossed -- List of objects' id already passed through, provides against circular recursion
Inside functions :
check_crossed_obj -- Check if object has already been passed through
type_shunt_recursive -- Select actions for each type of attr
"""
def check_crossed_obj(obj: Type[ModelSQL]) -> any:
if id(obj) in obj_ids_crossed:
return str(obj)
# others possibilities
# return str(obj).join(' ').join(str(obj.id))
# return obj.id
else:
obj_ids_crossed.append(id(obj))
return obj._to_dict_recursive(obj_ids_crossed)
def type_shunt_recursive(attribute: Any) -> Any:
# model
if issubclass(type(attribute), ModelSQL):
return check_crossed_obj(attribute)
# recursive iteration of the list in case of the list is a relationship
elif isinstance(attribute, list):
values = []
for item in attribute:
values.append(type_shunt_recursive(item))
return values
# attribute is not an instance of relationship (int, str..)
else:
return attribute
result = {}
# __mapper__ is equivalent to db.inspect(self)
# but db (database) is not created yet cause we send this model to the constructor
for key in self.__mapper__.attrs.keys():
if key not in self.DONOTSEND:
attr = getattr(self, key)
result[key] = type_shunt_recursive(attr)
return result
| 35.919355
| 106
| 0.603952
|
a6f89e610603820933615f543c93773fa23ef49d
| 8,165
|
py
|
Python
|
tests/gis_tests/gis_migrations/test_operations.py
|
ioinfinity/django
|
b6a0ab523751c13ae3eaec102de70f58f73a0d94
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2017-02-11T07:07:16.000Z
|
2017-02-11T07:07:16.000Z
|
tests/gis_tests/gis_migrations/test_operations.py
|
ioinfinity/django
|
b6a0ab523751c13ae3eaec102de70f58f73a0d94
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2020-07-02T21:10:44.000Z
|
2020-07-02T21:11:21.000Z
|
tests/gis_tests/gis_migrations/test_operations.py
|
ioinfinity/django
|
b6a0ab523751c13ae3eaec102de70f58f73a0d94
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 2
|
2018-03-30T04:24:48.000Z
|
2021-05-09T12:39:09.000Z
|
from __future__ import unicode_literals
from unittest import skipUnless
from django.contrib.gis.db.models import fields
from django.contrib.gis.gdal import HAS_GDAL
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, migrations, models
from django.db.migrations.migration import Migration
from django.db.migrations.state import ProjectState
from django.test import (
TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from ..utils import mysql
if connection.features.gis_enabled:
try:
GeometryColumns = connection.ops.geometry_columns()
HAS_GEOMETRY_COLUMNS = True
except NotImplementedError:
HAS_GEOMETRY_COLUMNS = False
@skipUnlessDBFeature('gis_enabled')
class OperationTests(TransactionTestCase):
available_apps = ['gis_tests.gis_migrations']
def tearDown(self):
# Delete table after testing
if hasattr(self, 'current_state'):
self.apply_operations('gis', self.current_state, [migrations.DeleteModel('Neighborhood')])
super(OperationTests, self).tearDown()
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertColumnExists(self, table, column):
self.assertIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNotExists(self, table, column):
self.assertNotIn(column, [c.name for c in self.get_table_description(table)])
def apply_operations(self, app_label, project_state, operations):
migration = Migration('name', app_label)
migration.operations = operations
with connection.schema_editor() as editor:
return migration.apply(project_state, editor)
def set_up_test_model(self, force_raster_creation=False):
test_fields = [
('id', models.AutoField(primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('geom', fields.MultiPolygonField(srid=4326))
]
if connection.features.supports_raster or force_raster_creation:
test_fields += [('rast', fields.RasterField(srid=4326))]
operations = [migrations.CreateModel('Neighborhood', test_fields)]
return self.apply_operations('gis', ProjectState(), operations)
def assertGeometryColumnsCount(self, expected_count):
table_name = 'gis_neighborhood'
if connection.features.uppercases_column_names:
table_name = table_name.upper()
self.assertEqual(
GeometryColumns.objects.filter(**{
GeometryColumns.table_name_col(): table_name,
}).count(),
expected_count
)
def assertSpatialIndexExists(self, table, column):
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, table)
self.assertIn(column, indexes)
def alter_gis_model(self, migration_class, model_name, field_name,
blank=False, field_class=None):
project_state = self.set_up_test_model()
self.current_state = project_state
args = [model_name, field_name]
if field_class:
args.append(field_class(srid=4326, blank=blank))
operation = migration_class(*args)
new_state = project_state.clone()
operation.state_forwards('gis', new_state)
self.current_state = new_state
with connection.schema_editor() as editor:
operation.database_forwards('gis', editor, project_state, new_state)
def test_add_geom_field(self):
"""
Test the AddField operation with a geometry-enabled column.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood', 'path', False, fields.LineStringField)
self.assertColumnExists('gis_neighborhood', 'path')
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(2)
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'path')
@skipUnlessDBFeature('supports_raster')
def test_add_raster_field(self):
"""
Test the AddField operation with a raster-enabled column.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood', 'heatmap', False, fields.RasterField)
self.assertColumnExists('gis_neighborhood', 'heatmap')
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'heatmap')
@skipIfDBFeature('supports_raster')
@skipUnless(HAS_GDAL, 'A different error is raised if GDAL is not installed.')
def test_create_raster_model_on_db_without_raster_support(self):
"""
Test creating a model with a raster field on a db without raster support.
"""
msg = 'Raster fields require backends with raster support.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.set_up_test_model(True)
@skipIfDBFeature('supports_raster')
@skipUnless(HAS_GDAL, 'A different error is raised if GDAL is not installed.')
def test_add_raster_field_on_db_without_raster_support(self):
"""
Test adding a raster field on a db without raster support.
"""
msg = 'Raster fields require backends with raster support.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.alter_gis_model(
migrations.AddField, 'Neighborhood', 'heatmap',
False, fields.RasterField
)
def test_add_blank_geom_field(self):
"""
Should be able to add a GeometryField with blank=True.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood', 'path', True, fields.LineStringField)
self.assertColumnExists('gis_neighborhood', 'path')
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(2)
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'path')
@skipUnlessDBFeature('supports_raster')
def test_add_blank_raster_field(self):
"""
Should be able to add a RasterField with blank=True.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood', 'heatmap', True, fields.RasterField)
self.assertColumnExists('gis_neighborhood', 'heatmap')
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'heatmap')
def test_remove_geom_field(self):
"""
Test the RemoveField operation with a geometry-enabled column.
"""
self.alter_gis_model(migrations.RemoveField, 'Neighborhood', 'geom')
self.assertColumnNotExists('gis_neighborhood', 'geom')
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(0)
@skipUnlessDBFeature('supports_raster')
def test_remove_raster_field(self):
"""
Test the RemoveField operation with a raster-enabled column.
"""
self.alter_gis_model(migrations.RemoveField, 'Neighborhood', 'rast')
self.assertColumnNotExists('gis_neighborhood', 'rast')
def test_create_model_spatial_index(self):
self.current_state = self.set_up_test_model()
if not self.has_spatial_indexes:
self.skipTest('No support for Spatial indexes')
self.assertSpatialIndexExists('gis_neighborhood', 'geom')
if connection.features.supports_raster:
self.assertSpatialIndexExists('gis_neighborhood', 'rast')
@property
def has_spatial_indexes(self):
if mysql:
with connection.cursor() as cursor:
return connection.introspection.supports_spatial_index(cursor, 'gis_neighborhood')
return True
| 39.829268
| 104
| 0.686589
|
2e9ef589d88d6a0214b32a513c19229e202b68a9
| 8,518
|
py
|
Python
|
alg/causal_multitask_gaussian_processes_ite/cmgp/cmgp.py
|
DaraOrange/mlforhealthlabpub
|
9db861c850c94c6cf1f8bf75ed2ad8dcbd648aa3
|
[
"BSD-3-Clause"
] | 171
|
2021-02-12T10:23:19.000Z
|
2022-03-29T01:58:52.000Z
|
alg/causal_multitask_gaussian_processes_ite/cmgp/cmgp.py
|
DaraOrange/mlforhealthlabpub
|
9db861c850c94c6cf1f8bf75ed2ad8dcbd648aa3
|
[
"BSD-3-Clause"
] | 4
|
2021-06-01T08:18:33.000Z
|
2022-02-20T13:37:30.000Z
|
alg/causal_multitask_gaussian_processes_ite/cmgp/cmgp.py
|
DaraOrange/mlforhealthlabpub
|
9db861c850c94c6cf1f8bf75ed2ad8dcbd648aa3
|
[
"BSD-3-Clause"
] | 93
|
2021-02-10T03:21:59.000Z
|
2022-03-30T19:10:37.000Z
|
# Copyright (c) 2019, Ahmed M. Alaa
# Licensed under the BSD 3-clause license (see LICENSE.txt)
# third party
import GPy
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsRegressor
class CMGP:
"""
An implementation of various Gaussian models for Causal inference building on GPy.
"""
def __init__(
self,
X: np.ndarray,
Treatments: np.ndarray,
Y: np.ndarray,
mode: str = "CMGP",
max_gp_iterations: int = 1000,
) -> None:
"""
Class constructor.
Initialize a GP object for causal inference.
:mod: 'Multitask'
:dim: the dimension of the input. Default is 1
:kern: ['Matern'] or ['RBF'], Default is the Radial Basis Kernel
:mkern: For multitask models, can select from IMC and LMC models, default is IMC
"""
X = np.asarray(X)
Y = np.asarray(Y)
Treatments = np.asarray(Treatments)
# Setup
dim = len(X[0])
dim_outcome = len(np.unique(Y))
self.dim = dim
self.dim_outcome = dim_outcome
self.mode = mode
self.max_gp_iterations = max_gp_iterations
if (self.dim < 1) or (type(self.dim) != int):
raise ValueError(
"Invalid value for the input dimension! Input dimension has to be a positive integer."
)
self._fit(X, Treatments, Y)
def _fit(
self,
Train_X: np.ndarray,
Train_T: np.ndarray,
Train_Y: np.ndarray,
) -> "CMGP":
"""
Optimizes the model hyperparameters using the factual samples for the treated and control arms.
Train_X has to be an N x dim matrix.
:Train_X: The input covariates
:Train_T: The treatment assignments
:Train_Y: The corresponding outcomes
"""
# Inputs: Train_X (the features), Train_T (treatment assignments), Train_Y (outcomes)
# Train_X has to be an N x dim matrix.
Dataset = pd.DataFrame(Train_X)
Dataset["Y"] = Train_Y
Dataset["T"] = Train_T
if self.dim > 1:
Feature_names = list(range(self.dim))
else:
Feature_names = [0]
Dataset0 = Dataset[Dataset["T"] == 0].copy()
Dataset1 = Dataset[Dataset["T"] == 1].copy()
# Extract data for the first learning task (control population)
X0 = np.reshape(Dataset0[Feature_names].copy(), (len(Dataset0), self.dim))
y0 = np.reshape(np.array(Dataset0["Y"].copy()), (len(Dataset0), 1))
# Extract data for the second learning task (treated population)
X1 = np.reshape(Dataset1[Feature_names].copy(), (len(Dataset1), self.dim))
y1 = np.reshape(np.array(Dataset1["Y"].copy()), (len(Dataset1), 1))
# Create an instance of a GPy Coregionalization model
K0 = GPy.kern.RBF(self.dim, ARD=True)
K1 = GPy.kern.RBF(self.dim, ARD=True)
kernel_dict = {
"CMGP": GPy.util.multioutput.LCM(
input_dim=self.dim, num_outputs=self.dim_outcome, kernels_list=[K0, K1]
),
"NSGP": GPy.util.multioutput.ICM(
input_dim=self.dim, num_outputs=self.dim_outcome, kernel=K0
),
}
self.model = GPy.models.GPCoregionalizedRegression(
X_list=[X0, X1], Y_list=[y0, y1], kernel=kernel_dict[self.mode]
)
self._initialize_hyperparameters(Train_X, Train_T, Train_Y)
try:
self.model.optimize("bfgs", max_iters=self.max_gp_iterations)
except np.linalg.LinAlgError as err:
print("Covariance matrix not invertible. ", err)
raise err
return self
def predict(self, X: np.ndarray) -> np.ndarray:
"""
Infers the treatment effect for a certain set of input covariates.
Returns the predicted ITE and posterior variance.
:X: The input covariates at which the outcomes need to be predicted
"""
if self.dim == 1:
X_ = X[:, None]
X_0 = np.hstack([X_, np.reshape(np.array([0] * len(X)), (len(X), 1))])
X_1 = np.hstack([X_, np.reshape(np.array([1] * len(X)), (len(X), 1))])
noise_dict_0 = {"output_index": X_0[:, 1:].astype(int)}
noise_dict_1 = {"output_index": X_1[:, 1:].astype(int)}
Y_est_0 = self.model.predict(X_0, Y_metadata=noise_dict_0)[0]
Y_est_1 = self.model.predict(X_1, Y_metadata=noise_dict_1)[0]
else:
X_0 = np.array(
np.hstack([X, np.zeros_like(X[:, 1].reshape((len(X[:, 1]), 1)))])
)
X_1 = np.array(
np.hstack([X, np.ones_like(X[:, 1].reshape((len(X[:, 1]), 1)))])
)
X_0_shape = X_0.shape
X_1_shape = X_1.shape
noise_dict_0 = {
"output_index": X_0[:, X_0_shape[1] - 1]
.reshape((X_0_shape[0], 1))
.astype(int)
}
noise_dict_1 = {
"output_index": X_1[:, X_1_shape[1] - 1]
.reshape((X_1_shape[0], 1))
.astype(int)
}
Y_est_0 = np.array(
list(self.model.predict(X_0, Y_metadata=noise_dict_0)[0])
)
Y_est_1 = np.array(
list(self.model.predict(X_1, Y_metadata=noise_dict_1)[0])
)
return np.asarray(Y_est_1 - Y_est_0)
def _initialize_hyperparameters(
self, X: np.ndarray, T: np.ndarray, Y: np.ndarray
) -> None:
"""
Initializes the multi-tasking model's hyper-parameters before passing to the optimizer
:X: The input covariates
:T: The treatment assignments
:Y: The corresponding outcomes
"""
# -----------------------------------------------------------------------------------
# Output Parameters:
# -----------------
# :Ls0, Ls1: length scale vectors for treated and control, dimensions match self.dim
# :s0, s1: noise variances for the two kernels
# :a0, a1: diagonal elements of correlation matrix 0
# :b0, b1: off-diagonal elements of correlation matrix 1
# -----------------------------------------------------------------------------------
Dataset = pd.DataFrame(X)
Dataset["Y"] = Y
Dataset["T"] = T
if self.dim > 1:
Feature_names = list(range(self.dim))
else:
Feature_names = [0]
Dataset0 = Dataset[Dataset["T"] == 0].copy()
Dataset1 = Dataset[Dataset["T"] == 1].copy()
neigh0 = KNeighborsRegressor(n_neighbors=10)
neigh1 = KNeighborsRegressor(n_neighbors=10)
neigh0.fit(Dataset0[Feature_names], Dataset0["Y"])
neigh1.fit(Dataset1[Feature_names], Dataset1["Y"])
Dataset["Yk0"] = neigh0.predict(Dataset[Feature_names])
Dataset["Yk1"] = neigh1.predict(Dataset[Feature_names])
Dataset0["Yk0"] = Dataset.loc[Dataset["T"] == 0, "Yk0"]
Dataset0["Yk1"] = Dataset.loc[Dataset["T"] == 0, "Yk1"]
Dataset1["Yk0"] = Dataset.loc[Dataset["T"] == 1, "Yk0"]
Dataset1["Yk1"] = Dataset.loc[Dataset["T"] == 1, "Yk1"]
a0 = np.sqrt(np.mean((Dataset0["Y"] - np.mean(Dataset0["Y"])) ** 2))
a1 = np.sqrt(np.mean((Dataset1["Y"] - np.mean(Dataset1["Y"])) ** 2))
b0 = np.mean(
(Dataset["Yk0"] - np.mean(Dataset["Yk0"]))
* (Dataset["Yk1"] - np.mean(Dataset["Yk1"]))
) / (a0 * a1)
b1 = b0
s0 = np.sqrt(np.mean((Dataset0["Y"] - Dataset0["Yk0"]) ** 2)) / a0
s1 = np.sqrt(np.mean((Dataset1["Y"] - Dataset1["Yk1"]) ** 2)) / a1
# `````````````````````````````````````````````````````
self.model.sum.ICM0.rbf.lengthscale = 10 * np.ones(self.dim)
self.model.sum.ICM1.rbf.lengthscale = 10 * np.ones(self.dim)
self.model.sum.ICM0.rbf.variance = 1
self.model.sum.ICM1.rbf.variance = 1
self.model.sum.ICM0.B.W[0] = b0
self.model.sum.ICM0.B.W[1] = b0
self.model.sum.ICM1.B.W[0] = b1
self.model.sum.ICM1.B.W[1] = b1
self.model.sum.ICM0.B.kappa[0] = a0 ** 2
self.model.sum.ICM0.B.kappa[1] = 1e-4
self.model.sum.ICM1.B.kappa[0] = 1e-4
self.model.sum.ICM1.B.kappa[1] = a1 ** 2
self.model.mixed_noise.Gaussian_noise_0.variance = s0 ** 2
self.model.mixed_noise.Gaussian_noise_1.variance = s1 ** 2
| 37.034783
| 103
| 0.545316
|
fe3d39c5552eb9e3f68c2e63de5a6354a74726eb
| 19,690
|
py
|
Python
|
finitetransform/mojette.py
|
shakes76/ChaoS
|
f8cec4cf0fa8c00ee5624a444c3f51d452b76485
|
[
"Apache-2.0"
] | 11
|
2018-09-12T12:04:45.000Z
|
2021-05-09T10:57:17.000Z
|
finitetransform/mojette.py
|
shakes76/ChaoS
|
f8cec4cf0fa8c00ee5624a444c3f51d452b76485
|
[
"Apache-2.0"
] | null | null | null |
finitetransform/mojette.py
|
shakes76/ChaoS
|
f8cec4cf0fa8c00ee5624a444c3f51d452b76485
|
[
"Apache-2.0"
] | 6
|
2018-08-30T01:07:29.000Z
|
2022-02-08T11:17:35.000Z
|
# -*- coding: utf-8 -*-
"""
Python module for computing methods related to the Mojette transform.
The transform (resulting in projections) is computed via the 'transform' member.
Assumes coordinate system with rows as x-axis and cols as y-axis. Thus angles are taken as complex(q,p) with q in the column direction.
Use the farey module to generate the angle sets.
Copyright 2018 Shekhar S. Chandra
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import finitetransform.farey as farey #local module
import finitetransform.radon as radon
import numpy as np
import scipy.fftpack as fftpack
import pyfftw
# Monkey patch in fftn and ifftn from pyfftw.interfaces.scipy_fftpack
fftpack.fft2 = pyfftw.interfaces.scipy_fftpack.fft2
fftpack.ifft2 = pyfftw.interfaces.scipy_fftpack.ifft2
fftpack.fft = pyfftw.interfaces.scipy_fftpack.fft
fftpack.ifft = pyfftw.interfaces.scipy_fftpack.ifft
# Turn on the cache for optimum performance
pyfftw.interfaces.cache.enable()
def projectionLength(angle, P, Q):
'''
Return the number of bins for projection at angle of a PxQ image.
Wraps function from Farey module
'''
return farey.projectionLength(angle, P, Q) #no. of bins
def toFinite(fareyVector, N):
'''
Return the finite vector corresponding to the Farey vector provided for a given modulus/length N
and the multiplicative inverse of the relevant Farey angle
Wraps function from Farey module
'''
return farey.toFinite(fareyVector, N)
def finiteTranslateOffset(fareyVector, N, P, Q):
'''
Translate offset required when mapping Farey vectors to finite angles
Returns translate offset and perp Boolean flag pair
Wraps function from Farey module
'''
return farey.finiteTranslateOffset(fareyVector, N, P, Q)
def isKatzCriterion(P, Q, angles, K = 1):
'''
Return true if angle set meets Katz criterion for exact reconstruction of
discrete arrays
'''
sumOfP = 0
sumOfQ = 0
n = len(angles)
for j in range(0, n):
p, q = farey.get_pq(angles[j])
sumOfP += abs(p)
sumOfQ += abs(q)
# if max(sumOfP, sumOfQ) > max(rows, cols):
if sumOfP > K*P or sumOfQ > K*Q:
return True
else:
return False
def project(image, q, p, dtype=np.int32):
'''
Projects an array at rational angle given by p, q.
Returns a list of bins that make up the resulting projection
'''
offsetMojette = 0
rows, cols = image.shape
totalBins = abs(q)*(rows-1) + abs(p)*(cols-1) + 1
# print "Projection (%d, %d) has %d bins" % (q, p, totalBins)
if q*p >= 0: #If positive slope
offsetMojette = p*(rows-1)
projection = np.zeros(totalBins, dtype)
for x in range(0, rows):
for y in range(0, cols):
if q*p >= 0:
translateMojette = q*y - p*x + offsetMojette #GetY = q, GetX = p
else:
translateMojette = p*x - q*y; #GetY = q, GetX = p
# print "t:", translateMojette, "x:", x, "y:", y, "p:", p, "q:", q
projection[translateMojette] += image[x,y]
return projection
def transform(image, angles, dtype=np.int32, prevProjections = []):
'''
Compute the Mojette transform for a given angle set and return list of projections.
The angle set is assumed to be a list of 2-tuples as (q, p). Returns a list of projections.
Previous projections can be provided (for use in iterative reconstruction methods), but must be of correct size.
'''
mu = len(angles)
#Compute Mojette
projections = []
for n in range(0, mu):
p = int(angles[n].imag)
q = int(angles[n].real)
projection = project(image, q, p, dtype)
if not prevProjections:
projections.append(projection)
else:
projections.append(projection+prevProjections[n])
return projections
def backproject(projections, angles, P, Q, norm = True, dtype=np.int32, prevImage = np.array([])):
'''
Directly backprojects (smears) a set of projections at rational angles given by angles in image space (PxQ).
Returns an image of size PxQ that makes up the reconstruction
'''
image = np.zeros((P,Q),dtype)
normValue = 1.0
if norm:
normValue = float(len(angles))
for projection, angle in zip(projections, angles):
p = int(angle.imag)
q = int(angle.real)
offsetMojette = 0
if q*p >= 0: #If positive slope
offsetMojette = p*(Q-1)
for x in range(0, P):
for y in range(0, Q):
if q*p >= 0:
translateMojette = q*y - p*x + offsetMojette #GetY = q, GetX = p
else:
translateMojette = p*x - q*y #GetY = q, GetX = p
# print "t:", translateMojette, "x:", x, "y:", y, "p:", p, "q:", q
prevValue = 0
if prevImage.size > 0:
prevValue = prevImage[x,y]
try:
image[x,y] += projection[translateMojette]/normValue + prevValue
except IndexError:
image[x,y] += 0 + prevValue
return image
def finiteProjection(projection, angle, P, Q, N, center=False):
'''
Convert a Mojette projection taken at angle into a finite (FRT) projection.
'''
dyadic = True
if N % 2 == 1: # if odd, assume prime
dyadic = False
shiftQ = int(N/2.0+0.5)-int(Q/2.0+0.5)
shiftP = int(N/2.0+0.5)-int(P/2.0+0.5)
finiteProj = np.zeros(N)
p, q = farey.get_pq(angle)
m, inv = farey.toFinite(angle, N)
# print "p:", p, "q:", q, "m:", m, "inv:", inv
translateOffset, perp = farey.finiteTranslateOffset(angle, N, P, Q)
angleSign = p*q
if dyadic:
for translate, bin in enumerate(projection):
if angleSign >= 0 and perp: #Reverse for perp
translateMojette = translateOffset - translate
else:
translateMojette = translate - translateOffset
translateFinite = (inv*translateMojette)%N
if center:
translateFinite = (translateFinite + shiftQ + m*(N-shiftP))%N
finiteProj[translateFinite] += bin
else:
for translate, bin in enumerate(projection):
if angleSign >= 0 and perp: #Reverse for perp
translateMojette = int(translateOffset) - int(translate)
else:
translateMojette = int(translate) - int(translateOffset)
if translateMojette < 0:
translateFinite = ( N - ( inv*abs(translateMojette) )%N )%N
else:
translateFinite = (inv*translateMojette)%N #has issues in C, may need checking
if center:
translateFinite = (translateFinite + shiftQ + m*(N-shiftP))%N
finiteProj[translateFinite] += bin
return finiteProj
#inversion methods
def toDRT(projections, angles, N, P, Q, center=False):
'''
Convert the Mojette (asymetric) projection data to DRT (symetric) projections.
Use the iFRT to reconstruct the image. Requires N+1 or N+N/2 projections if N is prime or dyadic respectively.
Returns the resulting DRT space as a 2D array
'''
size = int(N + N/2)
dyadic = True
if N % 2 == 1: # if odd, assume prime
size = int(N+1)
dyadic = False
m = 0
frtSpace = np.zeros( (size,N) )
if dyadic:
print("Dyadic size not tested yet.")
#for each project
'''for index, proj in enumerate(projections):
p, q = farey.get_pq(angles[index])
m, inv = farey.toFinite(angles[index], N)
frtSpace[m][:] = finiteProjection(proj, angles[index], P, Q, N, center)'''
else: #prime size
for index, proj in enumerate(projections):
p, q = farey.get_pq(angles[index])
m, inv = farey.toFinite(angles[index], N)
frtSpace[m][:] = finiteProjection(proj, angles[index], P, Q, N, center)
return frtSpace
#helper functions
def discreteSliceSamples(angle, b, fftShape):
'''
Generate the b points along slice at angle of DFT space with shape.
'''
r, s = fftShape
q = farey.getX(angle)
p = farey.getY(angle)
u = []
v = []
u.append(0 + r/2)
v.append(0 + s/2)
for m in range(1, b/4):
u.append(p*m + r/2)
v.append(q*m + s/2)
for m in range(-b/4, 1):
u.append(p*m + r/2)
v.append(q*m + s/2)
# print "u:",u
# print "v:",v
return u, v
def sliceSamples(angle, b, fftShape, center=False):
'''
Generate the b points along slice at angle of DFT space with shape.
'''
r, s = fftShape
p, q = farey.get_pq(angle)
u = []
v = []
offsetU = 0
offsetV = 0
if center:
offsetU = r/2
offsetV = s/2
# increment = 1.0/math.sqrt(p**2+q**2)
u.append(0 + offsetU)
v.append(0 + offsetV)
for m in range(1, (b-1)/2):
u.append((1.0/p)*m + offsetU)
v.append(-(1.0/q)*m + offsetV)
for m in range(-(b-1)/2, 1):
# print "m:", m, "delP:", -(1.0/p)*m + offsetU, "delQ:", (1.0/q)*m + offsetV
u.append((1.0/p)*m + offsetU)
v.append(-(1.0/q)*m + offsetV)
# print "u:",u
# print "v:",v
return u, v
#angle sets
def angleSet_ProjectionLengths(angles, P, Q):
'''
Returns a matching list of projection lengths for each angle in set
'''
binLengthList = []
for angle in angles:
binLengthList.append(projectionLength(angle,P,Q))
return binLengthList
def angleSet_Finite(p, quadrants=1, finiteList=False):
'''
Generate the minimal L1 angle set for the MT that has finite coverage.
If quadrants is more than 1, two quadrants will be used.
'''
fareyVectors = farey.Farey()
octants = 2
if quadrants > 1:
octants = 4
if quadrants > 2:
octants = 8
fareyVectors.compactOn()
fareyVectors.generate(p/2, octants)
vectors = fareyVectors.vectors
sortedVectors = sorted(vectors, key=lambda x: x.real**2+x.imag**2) #sort by L2 magnitude
#compute corresponding m values
finiteAngles = []
for vector in sortedVectors:
if vector.real == 0:
m = 0
elif vector.imag == 0:
m = p
else:
m, inv = toFinite(vector, p)
finiteAngles.append(m)
# print("m:", m, "vector:", vector)
# print("sortedVectors:", sortedVectors)
#print(finiteAngles)
#ensure coverage
count = 0
filled = [0]*(p+1) #list of zeros
finalVectors = []
finalFiniteAngles = []
for vector, m in zip(sortedVectors, finiteAngles):
if filled[m] == 0:
count += 1
filled[m] = 1
finalVectors.append(vector)
finalFiniteAngles.append(m)
if count == p+1:
break
if finiteList:
return finalVectors, finalFiniteAngles
return finalVectors
def angleSet_Symmetric(P, Q, octant=0, binLengths=False, K = 1):
'''
Generate the minimal L1 angle set for the MT.
Parameter K controls the redundancy, K = 1 is minimal.
If octant is non-zero, full quadrant will be used. Octant schemes are as follows:
If octant = -1, the opposing octant is also used.
If octant = 0,1 (default), only use one octant.
If octant = 2, octant will be mirrored from diagonal to form a quadrant.
If octant = 4, 2 quadrants.
If octant = 8, all quadrants.
Function can also return bin lengths for each bin.
'''
angles = []
fareyVectors = farey.Farey()
maxPQ = max(P,Q)
fareyVectors.compactOff()
fareyVectors.generate(maxPQ-1, 1)
vectors = fareyVectors.vectors
sortedVectors = sorted(vectors, key=lambda x: x.real**2+x.imag**2) #sort by L2 magnitude
index = 0
binLengthList = []
angles.append(sortedVectors[index])
binLengthList.append(projectionLength(sortedVectors[index],P,Q))
while not isKatzCriterion(P, Q, angles, K) and index < len(sortedVectors): # check Katz
index += 1
angles.append(sortedVectors[index])
p, q = farey.get_pq(sortedVectors[index]) # p = imag, q = real
binLengthList.append(projectionLength(sortedVectors[index],P,Q))
# if isKatzCriterion(P, Q, angles):
# break
if octant == 0:
continue
#add octants
if octant == -1:
nextOctantAngle = farey.farey(p, -q) #mirror from axis
angles.append(nextOctantAngle)
binLengthList.append(projectionLength(nextOctantAngle,P,Q))
if octant > 0 and p != q:
nextOctantAngle = farey.farey(q, p) #swap to mirror from diagonal
angles.append(nextOctantAngle)
binLengthList.append(projectionLength(nextOctantAngle,P,Q))
if octant > 1:
nextOctantAngle = farey.farey(p, -q) #mirror from axis
angles.append(nextOctantAngle)
binLengthList.append(projectionLength(nextOctantAngle,P,Q))
if p != q: #dont replicate
nextOctantAngle = farey.farey(q, -p) #mirror from axis and swap to mirror from diagonal
angles.append(nextOctantAngle)
binLengthList.append(projectionLength(nextOctantAngle,P,Q))
if octant > 1: #add the diagonal and column projections when symmetric (all quadrant are wanted)
nextOctantAngle = farey.farey(1, 0) #mirror from axis
angles.append(nextOctantAngle)
binLengthList.append(projectionLength(nextOctantAngle,P,Q))
if binLengths:
return angles, binLengthList
return angles
def angleSubSets_Symmetric(s, mode, P, Q, octant=0, binLengths=False, K = 1):
'''
Generate the minimal L1 angle set for the MT for s subsets.
Parameter K controls the redundancy, K = 1 is minimal.
If octant is non-zero, full quadrant will be used. Octant schemes are as follows:
If octant = -1, the opposing octant is also used.
If octant = 0,1 (default), only use one octant.
If octant = 2, octant will be mirrored from diagonal to form a quadrant.
If octant = 4, 2 quadrants.
If octant = 8, all quadrants.
Function can also return bin lengths for each bin.
'''
angles = []
subsetAngles = []
for i in range(s):
subsetAngles.append([])
fareyVectors = farey.Farey()
maxPQ = max(P,Q)
fareyVectors.compactOff()
fareyVectors.generate(maxPQ-1, 1)
vectors = fareyVectors.vectors
sortedVectors = sorted(vectors, key=lambda x: x.real**2+x.imag**2) #sort by L2 magnitude
index = 0
subsetIndex = 0
binLengthList = []
angles.append(sortedVectors[index])
subsetAngles[subsetIndex].append(sortedVectors[index])
binLengthList.append(projectionLength(sortedVectors[index],P,Q))
while not isKatzCriterion(P, Q, angles, K) and index < len(sortedVectors): # check Katz
index += 1
angles.append(sortedVectors[index])
subsetAngles[subsetIndex].append(sortedVectors[index])
p, q = farey.get_pq(sortedVectors[index]) # p = imag, q = real
binLengthList.append(projectionLength(sortedVectors[index],P,Q))
# if isKatzCriterion(P, Q, angles):
# break
if octant == 0:
continue
#add octants
if octant == -1:
nextOctantAngle = farey.farey(p, -q) #mirror from axis
angles.append(nextOctantAngle)
subsetAngles[subsetIndex].append(nextOctantAngle)
binLengthList.append(projectionLength(nextOctantAngle,P,Q))
if mode == 1:
subsetIndex += 1
subsetIndex %= s
if octant > 0 and p != q:
nextOctantAngle = farey.farey(q, p) #swap to mirror from diagonal
angles.append(nextOctantAngle)
subsetAngles[subsetIndex].append(nextOctantAngle)
binLengthList.append(projectionLength(nextOctantAngle,P,Q))
if mode == 1:
subsetIndex += 1
subsetIndex %= s
if octant > 1:
nextOctantAngle = farey.farey(p, -q) #mirror from axis
angles.append(nextOctantAngle)
subsetAngles[subsetIndex].append(nextOctantAngle)
binLengthList.append(projectionLength(nextOctantAngle,P,Q))
if mode == 1:
subsetIndex += 1
subsetIndex %= s
if p != q: #dont replicate
nextOctantAngle = farey.farey(q, -p) #mirror from axis and swap to mirror from diagonal
angles.append(nextOctantAngle)
subsetAngles[subsetIndex].append(nextOctantAngle)
binLengthList.append(projectionLength(nextOctantAngle,P,Q))
if mode == 1:
subsetIndex += 1
subsetIndex %= s
if mode == 0:
subsetIndex += 1
subsetIndex %= s
if octant > 1: #add the diagonal and column projections when symmetric (all quadrant are wanted)
nextOctantAngle = farey.farey(1, 0) #mirror from axis
angles.append(nextOctantAngle)
subsetAngles[0].append(nextOctantAngle)
binLengthList.append(projectionLength(nextOctantAngle,P,Q))
if binLengths:
return angles, subsetAngles, binLengthList
return angles, subsetAngles
def angleSetSliceCoordinates(angles, P, Q, N, center=False):
'''
Compute the 2D coordinates of each translate (in NxN DFT space) of every projection having angle in angles.
Returns a list of u, v coordinate arrays [[u_0[...],v_0[...]], [u_1[...],v_1[...]], ...] per angle
'''
coords = []
translateOffset = 0
translateMojette = 0
translateFinite = 0
m = 0
offset = 0.0
if center:
offset = N/2.0
for index, angle in enumerate(angles):
u = []
v = []
coordinateList = []
p = int(angle.imag)
q = int(angle.real)
angleSign = p*q
m, inv = farey.toFinite(angle, N)
translateOffset, perp = farey.finiteTranslateOffset(angle, N)
B = projectionLength(angle, P, Q)
for translate in range(0, B):
if angleSign >= 0 and perp: #Reverse for perp
translateMojette = translateOffset - translate
else:
translateMojette = translate - translateOffset
translateFinite = (inv*translateMojette)%N #has issues in C, may need checking
# frtSpace[m][translateFinite] += bin
u.append( (translateFinite+offset)%N )
v.append( (m*translateFinite+offset)%N )
coordinateList.append(u)
coordinateList.append(v)
coords.append(coordinateList)
return coords
| 35.160714
| 135
| 0.599644
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.