hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dc89f26a2363a34c9a404fbca34d5e521a9e60dc
| 436
|
py
|
Python
|
example_WriteBinary.py
|
teristam/openephys-fileIO
|
8089e7c4aff829c13a79656b8812a3d3e68eb1eb
|
[
"MIT"
] | 1
|
2020-08-16T21:52:10.000Z
|
2020-08-16T21:52:10.000Z
|
example_WriteBinary.py
|
teristam/openephys-fileIO
|
8089e7c4aff829c13a79656b8812a3d3e68eb1eb
|
[
"MIT"
] | null | null | null |
example_WriteBinary.py
|
teristam/openephys-fileIO
|
8089e7c4aff829c13a79656b8812a3d3e68eb1eb
|
[
"MIT"
] | null | null | null |
#%%
# Write data to the new binary format
import sys
sys.path.append("..") # Adds higher directory to python modules path.
import numpy as np
from pathlib import Path
from openephys_fileIO import fileIO
#%% Convert continuous data to flat binary
outFolder = 'E:\\open-ephys-testdata\\M2_D23-binary'
input_folder = 'E:\\open-ephys-testdata\\M2_D23_2019-04-03_13-34-00'
fileIO.convertContinuous2Binary(input_folder,outFolder)
# %%
| 22.947368
| 69
| 0.761468
|
16ee636f8a5cca184b3bcd872a8023e088899d07
| 393
|
py
|
Python
|
fizzsite/fizzsite/wsgi.py
|
hurhurhurt/Fizzle
|
53d780d501966f4a4010b0e395bd9f87bf67a489
|
[
"MIT"
] | null | null | null |
fizzsite/fizzsite/wsgi.py
|
hurhurhurt/Fizzle
|
53d780d501966f4a4010b0e395bd9f87bf67a489
|
[
"MIT"
] | null | null | null |
fizzsite/fizzsite/wsgi.py
|
hurhurhurt/Fizzle
|
53d780d501966f4a4010b0e395bd9f87bf67a489
|
[
"MIT"
] | null | null | null |
"""
WSGI config for fizzsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fizzsite.settings')
application = get_wsgi_application()
| 23.117647
| 78
| 0.78626
|
c35e9d912be8ef95861cef217c6bb80df953d4e1
| 1,674
|
py
|
Python
|
jsonlog_cli/record.py
|
borntyping/legere
|
df26b01bb922b88c6674299ffe5ce62df226bddd
|
[
"MIT"
] | 1
|
2019-09-25T07:59:39.000Z
|
2019-09-25T07:59:39.000Z
|
jsonlog_cli/record.py
|
borntyping/legere
|
df26b01bb922b88c6674299ffe5ce62df226bddd
|
[
"MIT"
] | 3
|
2020-03-24T17:28:20.000Z
|
2020-10-01T09:40:26.000Z
|
jsonlog_cli/record.py
|
borntyping/jsonlog-cli
|
df26b01bb922b88c6674299ffe5ce62df226bddd
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import dataclasses
import json
import textwrap
import typing
import jsonlog
log = jsonlog.getLogger(__name__)
RecordKey = str
RecordValue = typing.Union[None, str, int, float, bool, typing.Sequence, typing.Mapping]
class RecordDict(dict, typing.Mapping[str, RecordValue]):
"""A mapping that allows access to values as if they were attributes."""
def __getattr__(self, item) -> typing.Any:
return self[item]
@dataclasses.dataclass()
class Record:
message: str
json: RecordDict
def __post_init__(self) -> None:
self.json["__json__"] = dict(self.json)
self.json["__message__"] = self.message
@classmethod
def from_string(cls, message: str):
message = message.strip()
try:
data = json.loads(message, object_hook=RecordDict)
except json.JSONDecodeError as error:
excerpt = textwrap.shorten(message, 100)
log.exception(f"Could not parse JSON from line {excerpt!r}")
raise error
return cls(message=message, json=data)
def keys(self) -> typing.Iterable[str]:
return [k for k in self.json.keys() if k not in {"__json__", "__message__"}]
def extract(self, key: typing.Optional[str]) -> RecordValue:
if key is None:
return None
if key in self.json:
return self.json[key]
return self._extract(key)
def _extract(self, key: str) -> RecordValue:
result = self.json
for k in key.split("."):
try:
result = result[k]
except KeyError:
return None
return result
| 26.15625
| 88
| 0.626643
|
66ff28995b6260e3daf2b26579420ae803ba2d49
| 6,148
|
py
|
Python
|
jetavator_mssql/services/MSSQLService.py
|
jetavator/jetavator_mssql
|
e90dfc4ab429c4d3339e7e33b0b2a9974d27689a
|
[
"Apache-2.0"
] | null | null | null |
jetavator_mssql/services/MSSQLService.py
|
jetavator/jetavator_mssql
|
e90dfc4ab429c4d3339e7e33b0b2a9974d27689a
|
[
"Apache-2.0"
] | null | null | null |
jetavator_mssql/services/MSSQLService.py
|
jetavator/jetavator_mssql
|
e90dfc4ab429c4d3339e7e33b0b2a9974d27689a
|
[
"Apache-2.0"
] | null | null | null |
from typing import Iterable, Set
import pandas
import sqlalchemy
from sqlalchemy.exc import ProgrammingError, DBAPIError
from lazy_property import LazyProperty
from jetavator.services import StorageService
class MSSQLService(StorageService, register_as='mssql'):
index_option_kwargs: Set[str] = {"mssql_clustered"}
@LazyProperty
def sqlalchemy_connection(self):
if self.config.trusted_connection:
return sqlalchemy.create_engine(
"mssql+pyodbc://{server}:1433/{database}"
"?driver=ODBC+Driver+17+for+SQL+Server".format(
server=self.config.server,
database=self.config.database
),
connect_args={'autocommit': True},
deprecate_large_types=True
)
else:
return sqlalchemy.create_engine(
"mssql+pyodbc://{username}:{password}@{server}:1433/{database}"
"?driver=ODBC+Driver+17+for+SQL+Server".format(
username=self.config.username,
password=self.config.password,
server=self.config.server,
database=self.config.database
),
connect_args={'autocommit': True},
deprecate_large_types=True
)
def execute(self, sql):
sql_statement = sql.encode("ascii", "ignore").decode("ascii")
try:
result_proxy = self.sqlalchemy_connection.execute(
sql_statement
)
except (ProgrammingError, DBAPIError) as e:
raise Exception(
f"""
Config dump:
{self.config}
Error while strying to run script:
{sql_statement}
""" + str(e)
)
if result_proxy.returns_rows:
df = pandas.DataFrame(result_proxy.fetchall())
if df.shape != (0, 0):
df.columns = result_proxy.keys()
return df
else:
return pandas.DataFrame()
def drop_schema(self):
self.sqlalchemy_connection.execute(
f"""
DECLARE @drop_statements AS CURSOR
DECLARE @statement AS VARCHAR(max)
SET @drop_statements = CURSOR FOR
SELECT 'DROP VIEW [{self.config.schema}].[' + TABLE_NAME + ']'
FROM INFORMATION_SCHEMA.VIEWS
WHERE TABLE_SCHEMA = '{self.config.schema}'
UNION ALL
SELECT 'DROP TABLE [{self.config.schema}].[' + TABLE_NAME + ']'
FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA = '{self.config.schema}'
AND TABLE_TYPE = 'BASE TABLE'
OPEN @drop_statements
FETCH NEXT FROM @drop_statements INTO @statement
WHILE @@FETCH_STATUS = 0
BEGIN
EXECUTE (@statement)
FETCH NEXT FROM @drop_statements INTO @statement
END
CLOSE @drop_statements
DEALLOCATE @drop_statements
"""
)
self.sqlalchemy_connection.execute(
f"DROP SCHEMA [{self.config.schema}]"
)
def create_schema(self):
self.sqlalchemy_connection.execute(
"CREATE SCHEMA [" + self.config.schema + "]"
)
@property
def schema_empty(self):
return (
len(
self.sqlalchemy_connection.execute(
f"""
SELECT TOP 1
TABLE_NAME
FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_CATALOG = '{self.config.database}'
AND TABLE_SCHEMA = '{self.config.schema}'
"""
).fetchall()
) == 0
)
@property
def schema_exists(self):
return self._sql_exists(
f"""
SELECT SCHEMA_NAME
FROM INFORMATION_SCHEMA.SCHEMATA
WHERE CATALOG_NAME = '{self.config.database}'
AND SCHEMA_NAME = '{self.config.schema}'
"""
)
def _sql_exists(self, sql):
result_proxy = self.sqlalchemy_connection.execute(sql)
return bool(result_proxy.first())
def table_exists(self, table_name):
return self._sql_exists(
f"""
SELECT TABLE_NAME
FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_CATALOG = '{self.config.database}'
AND TABLE_SCHEMA = '{self.config.schema}'
AND TABLE_NAME = '{table_name}'
"""
)
def column_exists(self, table_name, column_name):
return self._sql_exists(
f"""
SELECT COLUMN_NAME
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_CATALOG = '{self.config.database}'
AND TABLE_SCHEMA = '{self.config.schema}'
AND TABLE_NAME = '{table_name}'
AND COLUMN_NAME = '{column_name}'
"""
)
def sql_query_single_value(self, sql):
try:
return self.sqlalchemy_connection.execute(
sql
).first()[0]
except TypeError:
return None
# def execute_sql_element(
# self,
# sqlalchemy_element: sqlalchemy.sql.expression.Executable,
# async_cursor: bool = False
# ) -> pandas.DataFrame:
# return self.sqlalchemy_connection.execute(sqlalchemy_element).fetchall()
def test(self) -> None:
self.execute("SELECT 1")
def load_dataframe(self, dataframe: pandas.DataFrame, source_name: str, source_column_names: Iterable[str]) -> None:
# TODO: Implement MSSQLService.load_dataframe
raise NotImplementedError()
# def compile_sqlalchemy(
# self,
# sqlalchemy_element: sqlalchemy.sql.expression.ClauseElement
# ) -> str:
# return super().compile_sqlalchemy(sqlalchemy_element).replace("DATETIME", "DATETIME2")
| 32.877005
| 120
| 0.543754
|
9d80ee21c70288f7372247aaf49091144a800949
| 3,209
|
py
|
Python
|
huobi/utils/input_checker.py
|
codemonkey89/huobi_Python
|
92b96679f6e239c785df7c4354a0a94deda2768f
|
[
"Apache-2.0"
] | null | null | null |
huobi/utils/input_checker.py
|
codemonkey89/huobi_Python
|
92b96679f6e239c785df7c4354a0a94deda2768f
|
[
"Apache-2.0"
] | null | null | null |
huobi/utils/input_checker.py
|
codemonkey89/huobi_Python
|
92b96679f6e239c785df7c4354a0a94deda2768f
|
[
"Apache-2.0"
] | null | null | null |
import re
import time
from huobi.exception.huobi_api_exception import HuobiApiException
reg_ex = "[ _`~!@#$%^&*()+=|{}':;',\\[\\].<>/?~!@#¥%……&*()——+|{}【】‘;:”“’。,、?]|\n|\t"
def check_symbol(symbol):
if not isinstance(symbol, str):
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] symbol must be string")
if re.match(reg_ex, symbol):
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] " + symbol + " is invalid symbol")
def check_symbol_list(symbols):
if not isinstance(symbols, list):
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] symbols in subscription is not a list")
for symbol in symbols:
check_symbol(symbol)
def check_currency(currency):
if not isinstance(currency, str):
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] currency must be string")
if re.match(reg_ex, currency) is not None:
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] " + currency + " is invalid currency")
def check_range(value, min_value, max_value, name):
if value is None:
return
if min_value > value or value > max_value:
raise HuobiApiException(HuobiApiException.INPUT_ERROR,
"[Input] " + name + " is out of bound. " + str(value) + " is not in [" + str(
min_value) + "," + str(max_value) + "]")
def check_should_not_none(value, name):
if value is None:
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] " + name + " should not be null")
def check_should_none(value, name):
if value is not None:
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] " + name + " should be null")
def check_in_list(value, list_configed, name):
if (value is not None) and (value not in list_configed):
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] " + name + " should be one in " + (",".join(list_configed)))
def check_list(list_value, min_value, max_value, name):
if list_value is None:
return
if len(list_value) > max_value:
raise HuobiApiException(HuobiApiException.INPUT_ERROR,
"[Input] " + name + " is out of bound, the max size is " + str(max_value))
if len(list_value) < min_value:
raise HuobiApiException(HuobiApiException.INPUT_ERROR,
"[Input] " + name + " should contain " + str(min_value) + " item(s) at least")
def greater_or_equal(value, base, name):
if value is not None and value < base:
raise HuobiApiException(HuobiApiException.INPUT_ERROR,
"[Input] " + name + " should be greater than " + base)
def format_date(value, name):
if value is None:
return None
if not isinstance(value, str):
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] " + name + " must be string")
try:
new_time = time.strptime(value, "%Y-%m-%d")
return time.strftime("%Y-%m-%d", new_time)
except:
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] " + name + " is not invalid date format")
| 40.620253
| 132
| 0.641633
|
ca116b5c9addb449e8ba5b3ad7e489858946b1af
| 11,370
|
py
|
Python
|
tests/test_connection.py
|
PerchLive/pyopenvidu
|
fa8ac55b446c94a026dbd73d67cc090810a8c3f1
|
[
"MIT"
] | null | null | null |
tests/test_connection.py
|
PerchLive/pyopenvidu
|
fa8ac55b446c94a026dbd73d67cc090810a8c3f1
|
[
"MIT"
] | null | null | null |
tests/test_connection.py
|
PerchLive/pyopenvidu
|
fa8ac55b446c94a026dbd73d67cc090810a8c3f1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Tests for OpenViduConnection object"""
import pytest
from pyopenvidu import OpenVidu, OpenViduSessionDoesNotExistsError, OpenViduConnectionDoesNotExistsError
from urllib.parse import urljoin
from datetime import datetime
URL_BASE = 'http://test.openvidu.io:4443/'
SESSIONS = {"numberOfElements": 2, "content": [
{"sessionId": "TestSession", "createdAt": 1538482606338, "mediaMode": "ROUTED", "recordingMode": "MANUAL",
"defaultOutputMode": "COMPOSED", "defaultRecordingLayout": "BEST_FIT", "customSessionId": "TestSession",
"connections": {"numberOfElements": 3, "content": [
{"connectionId": "vhdxz7abbfirh2lh", "createdAt": 1538482606412, "location": "",
"platform": "Chrome 69.0.3497.100 on Linux 64-bit",
"token": "wss://localhost:4443?sessionId=TestSession&token=2ezkertrimk6nttk&role=PUBLISHER&turnUsername=H0EQLL&turnCredential=kjh48u",
"role": "PUBLISHER", "serverData": "", "clientData": "TestClient1", "publishers": [
{"createdAt": 1538482606976, "streamId": "vhdxz7abbfirh2lh_CAMERA_CLVAU",
"mediaOptions": {"hasAudio": True, "audioActive": True, "hasVideo": True, "videoActive": True,
"typeOfVideo": "CAMERA", "frameRate": 30,
"videoDimensions": "{\"width\":640,\"height\":480}", "filter": {}}}],
"subscribers": []},
{"connectionId": "maxawd3ysuj1rxvq", "createdAt": 1538482607659, "location": "",
"platform": "Chrome 69.0.3497.100 on Linux 64-bit",
"token": "wss://localhost:4443?sessionId=TestSession&token=ovj1b4ysuqmcirti&role=PUBLISHER&turnUsername=INOAHN&turnCredential=oujrqd",
"role": "PUBLISHER", "serverData": "", "clientData": "TestClient2", "publishers": [],
"subscribers": [
{"createdAt": 1538482607799, "streamId": "vhdxz7abbfirh2lh_CAMERA_CLVAU"
}
]},
{"connectionId": "maxawc4zsuj1rxva", "createdAt": 1538482607659, "location": "",
"platform": "Chrome 69.0.3497.100 on Linux 64-bit",
"token": "wss://localhost:4443?sessionId=TestSession&token=ovj1b4ysuqmcirti&role=PUBLISHER&turnUsername=INOAHN&turnCredential=oujrqd",
"role": "PUBLISHER", "publishers": [],
"subscribers": [
{"createdAt": 1538482607799, "streamId": "vhdxz7abbfirh2lh_CAMERA_CLVAU"
}
]},
]}, "recording": False},
{"sessionId": "TestSession2", "createdAt": 1538482606338, "mediaMode": "ROUTED", "recordingMode": "MANUAL",
"defaultOutputMode": "COMPOSED", "defaultRecordingLayout": "BEST_FIT", "customSessionId": "TestSession",
"connections": {"numberOfElements": 3, "content": [
{"connectionId": "vhdxz7abbfirh2lh", "createdAt": 1538482606412, "location": "",
"platform": "Chrome 69.0.3497.100 on Linux 64-bit",
"token": "wss://localhost:4443?sessionId=TestSession&token=2ezkertrimk6nttk&role=PUBLISHER&turnUsername=H0EQLL&turnCredential=kjh48u",
"role": "PUBLISHER", "serverData": "", "clientData": "TestClient1", "publishers": [
{"createdAt": 1538482606976, "streamId": "vhdxz7abbfirh2lh_CAMERA_CLVAU",
"mediaOptions": {"hasAudio": True, "audioActive": True, "hasVideo": True, "videoActive": True,
"typeOfVideo": "CAMERA", "frameRate": 30,
"videoDimensions": "{\"width\":640,\"height\":480}", "filter": {}}}],
"subscribers": []}, {"connectionId": "maxawd3ysuj1rxvq", "createdAt": 1538482607659, "location": "",
"platform": "Chrome 69.0.3497.100 on Linux 64-bit",
"token": "wss://localhost:4443?sessionId=TestSession&token=ovj1b4ysuqmcirti&role=PUBLISHER&turnUsername=INOAHN&turnCredential=oujrqd",
"role": "PUBLISHER", "serverData": "", "clientData": "TestClient2", "publishers": [],
"subscribers": [
{"createdAt": 1538482607799, "streamId": "vhdxz7abbfirh2lh_CAMERA_CLVAU"}]},
{"connectionId": "ipc_IPCAM_rtsp_A8MJ_91_191_213_49_554_live_mpeg4_sdp", "createdAt": 1582121476379,
"location": "unknown", "platform": "IPCAM", "role": "PUBLISHER", "serverData": "MY_IP_CAMERA", "publishers": [
{"createdAt": 1582121476439,
"streamId": "str_IPC_XC1W_ipc_IPCAM_rtsp_A8MJ_91_191_213_49_554_live_mpeg4_sdp",
"rtspUri": "rtsp://91.191.213.49:554/live_mpeg4.sdp",
"mediaOptions": {"hasAudio": True, "audioActive": True, "hasVideo": True, "videoActive": True,
"typeOfVideo": "IPCAM", "frameRate": None, "videoDimensions": None, "filter": {},
"adaptativeBitrate": True, "onlyPlayWithSubscribers": True}}], "subscribers": []}
]},
"recording": False}
]}
SECRET = 'MY_SECRET'
@pytest.fixture
def openvidu_instance(requests_mock):
requests_mock.get(urljoin(URL_BASE, 'api/sessions'), json=SESSIONS)
requests_mock.get(urljoin(URL_BASE, 'api/sessions/TestSession'), json=SESSIONS['content'][0])
requests_mock.get(urljoin(URL_BASE, 'api/sessions/TestSession2'), json=SESSIONS['content'][1])
yield OpenVidu(URL_BASE, SECRET)
@pytest.fixture
def session_instance(openvidu_instance):
yield openvidu_instance.get_session('TestSession')
@pytest.fixture
def connection_instance(session_instance):
yield session_instance.get_connection('vhdxz7abbfirh2lh')
#
# Disconnection
#
def test_disconnection(connection_instance, requests_mock):
a = requests_mock.delete(urljoin(URL_BASE, 'api/sessions/TestSession/connection/vhdxz7abbfirh2lh'), json={},
status_code=204)
connection_instance.force_disconnect()
assert a.called
def test_disconnection_failed_no_connection(connection_instance, requests_mock):
requests_mock.delete(urljoin(URL_BASE, 'api/sessions/TestSession/connection/vhdxz7abbfirh2lh'), json={},
status_code=404)
with pytest.raises(OpenViduConnectionDoesNotExistsError):
connection_instance.force_disconnect()
def test_disconnection_failed_no_session(connection_instance, requests_mock):
requests_mock.delete(urljoin(URL_BASE, 'api/sessions/TestSession/connection/vhdxz7abbfirh2lh'), json={},
status_code=400)
with pytest.raises(OpenViduSessionDoesNotExistsError):
connection_instance.force_disconnect()
#
# Signals
#
def test_signal(connection_instance, requests_mock):
a = requests_mock.post(urljoin(URL_BASE, 'api/signal'), status_code=200)
connection_instance.signal('MY_TYPE', "Hello world!")
assert a.last_request.json() == {
"session": SESSIONS['content'][0]['sessionId'],
"type": "MY_TYPE",
"data": "Hello world!",
"to": [SESSIONS['content'][0]['connections']['content'][0]['connectionId']]
}
def test_signal_value_error(connection_instance, requests_mock):
a = requests_mock.post(urljoin(URL_BASE, 'api/signal'), status_code=400)
with pytest.raises(ValueError):
connection_instance.signal('MY_TYPE', "Hello world!")
def test_signal_no_session(connection_instance, requests_mock):
a = requests_mock.post(urljoin(URL_BASE, 'api/signal'), status_code=404)
with pytest.raises(OpenViduSessionDoesNotExistsError):
connection_instance.signal('MY_TYPE', "Hello world!")
assert a.called
def test_signal_no_connection(connection_instance, requests_mock):
a = requests_mock.post(urljoin(URL_BASE, 'api/signal'), status_code=406)
with pytest.raises(OpenViduConnectionDoesNotExistsError):
connection_instance.signal('MY_TYPE', "Hello world!")
#
# Unpublish
#
def test_force_unpublish_all(connection_instance, requests_mock):
a = requests_mock.delete(urljoin(URL_BASE, 'api/sessions/TestSession/stream/vhdxz7abbfirh2lh_CAMERA_CLVAU'),
json={},
status_code=204)
connection_instance.force_unpublish_all_streams()
assert a.called
#
# Properties
#
def test_properties(connection_instance):
assert connection_instance.session_id == SESSIONS['content'][0]['sessionId']
assert connection_instance.id == SESSIONS['content'][0]['connections']['content'][0]['connectionId']
assert connection_instance.created_at == datetime.utcfromtimestamp(
SESSIONS['content'][0]['connections']['content'][0]['createdAt'] / 1000.0
)
assert connection_instance.token == SESSIONS['content'][0]['connections']['content'][0]['token']
assert connection_instance.client_data == SESSIONS['content'][0]['connections']['content'][0]['clientData']
assert connection_instance.server_data == SESSIONS['content'][0]['connections']['content'][0]['serverData']
assert connection_instance.platform == SESSIONS['content'][0]['connections']['content'][0]['platform']
assert connection_instance.role == SESSIONS['content'][0]['connections']['content'][0]['role']
assert len(connection_instance.publishers) == len(SESSIONS['content'][0]['connections']['content'][0]['publishers'])
def test_properties_none_fields(session_instance):
connection_instance = session_instance.get_connection('maxawc4zsuj1rxva')
assert connection_instance.session_id == SESSIONS['content'][0]['sessionId']
assert connection_instance.id == SESSIONS['content'][0]['connections']['content'][2]['connectionId']
assert connection_instance.created_at == datetime.utcfromtimestamp(
SESSIONS['content'][0]['connections']['content'][2]['createdAt'] / 1000.0
)
assert connection_instance.token == SESSIONS['content'][0]['connections']['content'][2]['token']
assert connection_instance.client_data is None
assert connection_instance.server_data is None
assert connection_instance.platform == SESSIONS['content'][0]['connections']['content'][2]['platform']
assert connection_instance.role == SESSIONS['content'][0]['connections']['content'][2]['role']
assert len(connection_instance.publishers) == len(SESSIONS['content'][0]['connections']['content'][2]['publishers'])
def test_properties_ipcam_fields(openvidu_instance):
session_instance = openvidu_instance.get_session('TestSession2')
connection_instance = session_instance.get_connection('ipc_IPCAM_rtsp_A8MJ_91_191_213_49_554_live_mpeg4_sdp')
assert connection_instance.session_id == SESSIONS['content'][1]['sessionId']
assert connection_instance.id == SESSIONS['content'][1]['connections']['content'][2]['connectionId']
assert connection_instance.created_at == datetime.utcfromtimestamp(
SESSIONS['content'][1]['connections']['content'][2]['createdAt'] / 1000.0
)
assert connection_instance.token is None
assert connection_instance.client_data is None
assert connection_instance.server_data == SESSIONS['content'][1]['connections']['content'][2]['serverData']
assert connection_instance.platform == SESSIONS['content'][1]['connections']['content'][2]['platform']
assert connection_instance.role == SESSIONS['content'][1]['connections']['content'][2]['role']
assert len(connection_instance.publishers) == len(SESSIONS['content'][1]['connections']['content'][2]['publishers'])
| 48.798283
| 165
| 0.678012
|
e6880440b9f406a266e4838060fd528172d43f14
| 567
|
py
|
Python
|
examples/cookbook-plain/cookbook/recipes/models.py
|
CarlosMart626/graphene-django-authorization
|
8f6b30417868b447d68fe5bd1425385b285a604b
|
[
"MIT"
] | 2
|
2017-11-17T18:32:49.000Z
|
2018-03-07T06:56:16.000Z
|
examples/cookbook-plain/cookbook/recipes/models.py
|
CarlosMart626/graphene-django-authorization
|
8f6b30417868b447d68fe5bd1425385b285a604b
|
[
"MIT"
] | 2
|
2018-02-26T00:42:25.000Z
|
2018-02-26T00:42:41.000Z
|
examples/cookbook-plain/cookbook/recipes/models.py
|
CarlosMart626/graphene-django-authorization
|
8f6b30417868b447d68fe5bd1425385b285a604b
|
[
"MIT"
] | 2
|
2018-02-26T00:34:50.000Z
|
2021-08-06T08:27:32.000Z
|
from django.db import models
from cookbook.ingredients.models import Ingredient
class Recipe(models.Model):
title = models.CharField(max_length=100)
instructions = models.TextField()
class RecipeIngredient(models.Model):
recipe = models.ForeignKey(Recipe, related_name='amounts')
ingredient = models.ForeignKey(Ingredient, related_name='used_by')
amount = models.FloatField()
unit = models.CharField(max_length=20, choices=(
('unit', 'Units'),
('kg', 'Kilograms'),
('l', 'Litres'),
('st', 'Shots'),
))
| 27
| 70
| 0.66843
|
922b0de15742c37e04bc9ca6f6c626c4962b2472
| 266
|
py
|
Python
|
rapidsms/contrib/httptester/urls.py
|
glosoftgroup/rsms
|
b1f9c9a471ed9595e02e2529fcc58bf23cc2ab34
|
[
"BSD-3-Clause"
] | null | null | null |
rapidsms/contrib/httptester/urls.py
|
glosoftgroup/rsms
|
b1f9c9a471ed9595e02e2529fcc58bf23cc2ab34
|
[
"BSD-3-Clause"
] | null | null | null |
rapidsms/contrib/httptester/urls.py
|
glosoftgroup/rsms
|
b1f9c9a471ed9595e02e2529fcc58bf23cc2ab34
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from django.conf.urls import url
from . import views
urlpatterns = [
url(r"^$", views.generate_identity, name='httptester-index'),
url(r"^(?P<identity>\d+)/$", views.message_tester, name='httptester')
]
| 20.461538
| 73
| 0.665414
|
d4dfd681ea310ff58a648ef14770c0833d429067
| 2,279
|
py
|
Python
|
logging/cloud-client/export_test.py
|
alexhaines123/googlecloudsqlexamples
|
06d9254ec77955c02f18cd79a57cdfbd64dbf8ea
|
[
"Apache-2.0"
] | 2
|
2017-09-23T04:23:46.000Z
|
2021-06-11T01:23:06.000Z
|
logging/cloud-client/export_test.py
|
ryanmats/python-docs-samples
|
183a6186cd059c7ba24ef324614bc5fee08bff08
|
[
"Apache-2.0"
] | null | null | null |
logging/cloud-client/export_test.py
|
ryanmats/python-docs-samples
|
183a6186cd059c7ba24ef324614bc5fee08bff08
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import string
from gcloud import logging
from gcp.testing import eventually_consistent
import pytest
import export
TEST_SINK_NAME_TMPL = 'example_sink_{}'
TEST_SINK_FILTER = 'severity>=CRITICAL'
def _random_id():
return ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(6))
@pytest.yield_fixture
def example_sink(cloud_config):
client = logging.Client()
sink = client.sink(
TEST_SINK_NAME_TMPL.format(_random_id()),
TEST_SINK_FILTER,
'storage.googleapis.com/{bucket}'.format(
bucket=cloud_config.storage_bucket))
sink.create()
yield sink
try:
sink.delete()
except:
pass
def test_list(example_sink, capsys):
@eventually_consistent.call
def _():
export.list_sinks()
out, _ = capsys.readouterr()
assert example_sink.name in out
def test_create(cloud_config, capsys):
sink_name = TEST_SINK_NAME_TMPL.format(_random_id())
try:
export.create_sink(
sink_name,
cloud_config.storage_bucket,
TEST_SINK_FILTER)
# Clean-up the temporary sink.
finally:
try:
logging.Client().sink(sink_name).delete()
except:
pass
out, _ = capsys.readouterr()
assert sink_name in out
def test_update(example_sink, capsys):
updated_filter = 'severity>=INFO'
export.update_sink(example_sink.name, updated_filter)
example_sink.reload()
assert example_sink.filter_ == updated_filter
def test_delete(example_sink, capsys):
export.delete_sink(example_sink.name)
assert not example_sink.exists()
| 24.771739
| 74
| 0.69548
|
d52ce70db92a97578813a13fc77e13b059add127
| 12,735
|
py
|
Python
|
saharaclient/osc/v1/jobs.py
|
mail2nsrajesh/python-saharaclient
|
48e68c1c3a9e6737e24be2da88de8999a4609a51
|
[
"Apache-2.0"
] | null | null | null |
saharaclient/osc/v1/jobs.py
|
mail2nsrajesh/python-saharaclient
|
48e68c1c3a9e6737e24be2da88de8999a4609a51
|
[
"Apache-2.0"
] | null | null | null |
saharaclient/osc/v1/jobs.py
|
mail2nsrajesh/python-saharaclient
|
48e68c1c3a9e6737e24be2da88de8999a4609a51
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils as osc_utils
from oslo_log import log as logging
from oslo_serialization import jsonutils
from saharaclient.osc.v1 import utils
JOB_FIELDS = ['id', 'job_template_id', 'cluster_id', 'input_id', 'output_id',
'start_time', 'end_time', 'status', 'is_public', 'is_protected',
'engine_job_id']
JOB_STATUS_CHOICES = ['done-with-error', 'failed', 'killed', 'pending',
'running', 'succeeded', 'to-be-killed']
def _format_job_output(data):
data['status'] = data['info']['status']
del data['info']
data['job_template_id'] = data.pop('job_id')
class ExecuteJob(command.ShowOne):
"""Executes job"""
log = logging.getLogger(__name__ + ".ExecuteJob")
def get_parser(self, prog_name):
parser = super(ExecuteJob, self).get_parser(prog_name)
parser.add_argument(
'--job-template',
metavar="<job-template>",
help="Name or ID of the job template "
"[REQUIRED if JSON is not provided]",
)
parser.add_argument(
'--cluster',
metavar="<cluster>",
help="Name or ID of the cluster "
"[REQUIRED if JSON is not provided]",
)
parser.add_argument(
'--input',
metavar="<input>",
help="Name or ID of the input data source",
)
parser.add_argument(
'--output',
metavar="<output>",
help="Name or ID of the output data source",
)
parser.add_argument(
'--params',
metavar="<name:value>",
nargs='+',
help="Parameters to add to the job"
)
parser.add_argument(
'--args',
metavar="<argument>",
nargs='+',
help="Arguments to add to the job"
)
parser.add_argument(
'--public',
action='store_true',
default=False,
help='Make the job public',
)
parser.add_argument(
'--protected',
action='store_true',
default=False,
help='Make the job protected',
)
configs = parser.add_mutually_exclusive_group()
configs.add_argument(
'--config-json',
metavar='<filename>',
help='JSON representation of the job configs'
)
configs.add_argument(
'--configs',
metavar="<name:value>",
nargs='+',
help="Configs to add to the job"
)
parser.add_argument(
'--interface',
metavar='<filename>',
help='JSON representation of the interface'
)
parser.add_argument(
'--json',
metavar='<filename>',
help='JSON representation of the job. Other arguments will not be '
'taken into account if this one is provided'
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
if parsed_args.json:
blob = osc_utils.read_blob_file_contents(parsed_args.json)
try:
template = jsonutils.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'template from file %s: %s' % (parsed_args.json, e))
if 'job_configs' in template:
template['configs'] = template.pop('job_configs')
data = client.job_executions.create(**template).to_dict()
else:
if not parsed_args.cluster or not parsed_args.job_template:
raise exceptions.CommandError(
'At least --cluster, --job-template, arguments should be '
'specified or json template should be provided with '
'--json argument')
job_configs = {}
if parsed_args.interface:
blob = osc_utils.read_blob_file_contents(parsed_args.json)
try:
parsed_args.interface = jsonutils.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'interface from file %s: %s' % (parsed_args.json, e))
if parsed_args.config_json:
blob = osc_utils.read_blob_file_contents(parsed_args.configs)
try:
job_configs['configs'] = jsonutils.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'configs from file %s: %s' % (parsed_args.json, e))
elif parsed_args.configs:
job_configs['configs'] = dict(
map(lambda x: x.split(':', 1), parsed_args.configs))
if parsed_args.args:
job_configs['args'] = parsed_args.args
if parsed_args.params:
job_configs['params'] = dict(
map(lambda x: x.split(':', 1), parsed_args.params))
jt_id = utils.get_resource_id(
client.jobs, parsed_args.job_template)
cluster_id = utils.get_resource_id(
client.clusters, parsed_args.cluster)
if parsed_args.input not in [None, "", "None"]:
input_id = utils.get_resource_id(
client.data_sources, parsed_args.input)
else:
input_id = None
if parsed_args.output not in [None, "", "None"]:
output_id = utils.get_resource_id(
client.data_sources, parsed_args.output)
else:
output_id = None
data = client.job_executions.create(
job_id=jt_id, cluster_id=cluster_id, input_id=input_id,
output_id=output_id, interface=parsed_args.interface,
configs=job_configs, is_public=parsed_args.public,
is_protected=parsed_args.protected).to_dict()
sys.stdout.write(
'Job "{job}" has been started successfully.\n'.format(
job=data['id']))
_format_job_output(data)
data = utils.prepare_data(data, JOB_FIELDS)
return self.dict2columns(data)
class ListJobs(command.Lister):
"""Lists jobs"""
log = logging.getLogger(__name__ + ".ListJobs")
def get_parser(self, prog_name):
parser = super(ListJobs, self).get_parser(prog_name)
parser.add_argument(
'--long',
action='store_true',
default=False,
help='List additional fields in output',
)
parser.add_argument(
'--status',
metavar="<status>",
choices=JOB_STATUS_CHOICES,
help="List jobs with specific status"
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
data = client.job_executions.list()
for job in data:
job.status = job.info['status']
if parsed_args.status:
data = [job for job in data
if job.info['status'] == parsed_args.status.replace(
'-', '').upper()]
if parsed_args.long:
columns = ('id', 'cluster id', 'job id', 'status', 'start time',
'end time')
column_headers = utils.prepare_column_headers(columns)
else:
columns = ('id', 'cluster id', 'job id', 'status')
column_headers = utils.prepare_column_headers(columns)
return (
column_headers,
(osc_utils.get_item_properties(
s,
columns
) for s in data)
)
class ShowJob(command.ShowOne):
"""Display job details"""
log = logging.getLogger(__name__ + ".ShowJob")
def get_parser(self, prog_name):
parser = super(ShowJob, self).get_parser(prog_name)
parser.add_argument(
"job",
metavar="<job>",
help="ID of the job to display",
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
data = client.job_executions.get(parsed_args.job).to_dict()
_format_job_output(data)
data = utils.prepare_data(data, JOB_FIELDS)
return self.dict2columns(data)
class DeleteJob(command.Command):
"""Deletes job"""
log = logging.getLogger(__name__ + ".DeleteJob")
def get_parser(self, prog_name):
parser = super(DeleteJob, self).get_parser(prog_name)
parser.add_argument(
"job",
metavar="<job>",
nargs="+",
help="ID(s) of the job(s) to delete",
)
parser.add_argument(
'--wait',
action='store_true',
default=False,
help='Wait for the job(s) delete to complete',
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
for job_id in parsed_args.job:
client.job_executions.delete(job_id)
sys.stdout.write(
'Job "{job}" deletion has been started.\n'.format(job=job_id))
if parsed_args.wait:
for job_id in parsed_args.job:
if not utils.wait_for_delete(client.job_executions, job_id):
self.log.error(
'Error occurred during job deleting: %s' %
job_id)
else:
sys.stdout.write(
'Job "{job}" has been removed successfully.\n'.format(
job=job_id))
class UpdateJob(command.ShowOne):
"""Updates job"""
log = logging.getLogger(__name__ + ".UpdateJob")
def get_parser(self, prog_name):
parser = super(UpdateJob, self).get_parser(prog_name)
parser.add_argument(
'job',
metavar="<job>",
help="ID of the job to update",
)
public = parser.add_mutually_exclusive_group()
public.add_argument(
'--public',
action='store_true',
help='Make the job public (Visible from other tenants)',
dest='is_public'
)
public.add_argument(
'--private',
action='store_false',
help='Make the job private (Visible only from this tenant)',
dest='is_public'
)
protected = parser.add_mutually_exclusive_group()
protected.add_argument(
'--protected',
action='store_true',
help='Make the job protected',
dest='is_protected'
)
protected.add_argument(
'--unprotected',
action='store_false',
help='Make the job unprotected',
dest='is_protected'
)
parser.set_defaults(is_public=None, is_protected=None)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
update_dict = utils.create_dict_from_kwargs(
is_public=parsed_args.is_public,
is_protected=parsed_args.is_protected)
data = client.job_executions.update(
parsed_args.job, **update_dict).job_execution
_format_job_output(data)
data = utils.prepare_data(data, JOB_FIELDS)
return self.dict2columns(data)
| 33.077922
| 79
| 0.558382
|
353cd7e6cb26047e19d335d5cd12a27b970847aa
| 412
|
py
|
Python
|
tools/getDensity.py
|
DaylightingSociety/SocMap
|
c8e9f40efdcee2c765cd02b6398d948fecf6bd83
|
[
"BSD-3-Clause"
] | 16
|
2018-06-25T04:02:50.000Z
|
2022-01-25T11:06:50.000Z
|
tools/getDensity.py
|
DaylightingSociety/SocMap
|
c8e9f40efdcee2c765cd02b6398d948fecf6bd83
|
[
"BSD-3-Clause"
] | 26
|
2018-02-07T04:15:44.000Z
|
2020-10-08T15:39:28.000Z
|
tools/getDensity.py
|
DaylightingSociety/SocMap
|
c8e9f40efdcee2c765cd02b6398d948fecf6bd83
|
[
"BSD-3-Clause"
] | 4
|
2018-02-13T18:38:21.000Z
|
2019-02-22T19:39:56.000Z
|
#!/usr/bin/env python3
import sys, os
import igraph as ig
"""
This tools returns a density measurement for each map provided as an argument
and prints the densities in order
"""
if __name__ == "__main__":
if( len(sys.argv) < 2 ):
print("USAGE: %s <map.gml> [map2.gml...]" % sys.argv[0])
sys.exit(1)
for fname in sys.argv[1:]:
net = ig.Graph.Read_GML(fname)
print("%s,%f" % (fname,net.density()))
| 21.684211
| 78
| 0.65534
|
1464e4dffdf9883c6e522677c124d041b0d7d669
| 6,180
|
py
|
Python
|
setup.py
|
snsnlou/mars
|
6b8eec162eccc8bb980a98ca2cf1e6a4b866d302
|
[
"Apache-2.0"
] | 1
|
2021-11-30T12:07:21.000Z
|
2021-11-30T12:07:21.000Z
|
setup.py
|
snsnlou/mars
|
6b8eec162eccc8bb980a98ca2cf1e6a4b866d302
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
snsnlou/mars
|
6b8eec162eccc8bb980a98ca2cf1e6a4b866d302
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
import re
import sys
from setuptools import setup, find_packages, Extension
from distutils.sysconfig import get_config_var
from distutils.version import LooseVersion
import numpy as np
from Cython.Build import cythonize
try:
import distutils.ccompiler
if sys.platform != 'win32':
from numpy.distutils.ccompiler import CCompiler_compile
distutils.ccompiler.CCompiler.compile = CCompiler_compile
except ImportError:
pass
# From https://github.com/pandas-dev/pandas/pull/24274:
# For mac, ensure extensions are built for macos 10.9 when compiling on a
# 10.9 system or above, overriding distuitls behaviour which is to target
# the version that python was built for. This may be overridden by setting
# MACOSX_DEPLOYMENT_TARGET before calling setup.py
if sys.platform == 'darwin':
if 'MACOSX_DEPLOYMENT_TARGET' not in os.environ:
current_system = LooseVersion(platform.mac_ver()[0])
python_target = LooseVersion(
get_config_var('MACOSX_DEPLOYMENT_TARGET'))
if python_target < '10.9' and current_system >= '10.9':
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9'
repo_root = os.path.dirname(os.path.abspath(__file__))
def execfile(fname, globs, locs=None):
locs = locs or globs
exec(compile(open(fname).read(), fname, "exec"), globs, locs)
version_file_path = os.path.join(repo_root, 'mars', '_version.py')
version_ns = {'__file__': version_file_path}
execfile(version_file_path, version_ns)
version = version_ns['__version__']
# check version vs tag
if os.environ.get('GIT_TAG') and re.search(r'v\d', os.environ['GIT_TAG']) \
and os.environ['GIT_TAG'] != 'v' + version:
raise ValueError('Tag %r does not match source version %r'
% (os.environ['GIT_TAG'], version))
requirements = []
with open(os.path.join(repo_root, 'requirements.txt'), 'r') as f:
requirements.extend(f.read().splitlines())
extra_requirements = []
with open(os.path.join(repo_root, 'requirements-extra.txt'), 'r') as f:
extra_requirements.extend(f.read().splitlines())
dev_requirements = []
with open(os.path.join(repo_root, 'requirements-dev.txt'), 'r') as f:
dev_requirements.extend(f.read().splitlines())
vineyard_requirements = []
with open(os.path.join(repo_root, 'requirements-vineyard.txt'), 'r') as f:
vineyard_requirements.extend(f.read().splitlines())
long_description = None
if os.path.exists(os.path.join(repo_root, 'README.rst')):
with open(os.path.join(repo_root, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
if os.path.exists(os.path.join(repo_root, '.git')):
git_info = version_ns['get_git_info']()
if git_info:
with open(os.path.join(repo_root, 'mars', '.git-branch'), 'w') as git_file:
git_file.write(' '.join(git_info))
cythonize_kw = dict(language_level=sys.version_info[0])
cy_extension_kw = dict()
if os.environ.get('CYTHON_TRACE'):
cy_extension_kw['define_macros'] = [('CYTHON_TRACE_NOGIL', '1'), ('CYTHON_TRACE', '1')]
cythonize_kw['compiler_directives'] = {'linetrace': True}
if 'MSC' in sys.version:
extra_compile_args = ['/Ot', '/I' + os.path.join(repo_root, 'misc')]
cy_extension_kw['extra_compile_args'] = extra_compile_args
else:
extra_compile_args = ['-O3']
cy_extension_kw['extra_compile_args'] = extra_compile_args
def _discover_pyx():
exts = dict()
for root, _, files in os.walk(os.path.join(repo_root, 'mars')):
for fn in files:
if not fn.endswith('.pyx'):
continue
full_fn = os.path.relpath(os.path.join(root, fn), repo_root)
mod_name = full_fn.replace('.pyx', '').replace(os.path.sep, '.')
exts[mod_name] = Extension(mod_name, [full_fn], **cy_extension_kw)
return exts
cy_extension_kw['include_dirs'] = [np.get_include()]
extensions_dict = _discover_pyx()
cy_extensions = list(extensions_dict.values())
extensions = cythonize(cy_extensions, **cythonize_kw) + \
[Extension('mars.lib.mmh3', ['mars/lib/mmh3_src/mmh3module.cpp', 'mars/lib/mmh3_src/MurmurHash3.cpp'])]
setup_options = dict(
name='pymars',
version=version,
description='MARS: a tensor-based unified framework for large-scale data computation.',
long_description=long_description,
long_description_content_type='text/x-rst',
author='Qin Xuye',
author_email='qin@qinxuye.me',
maintainer='Qin Xuye',
maintainer_email='qin@qinxuye.me',
url='http://github.com/mars-project/mars',
license='Apache License 2.0',
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries',
],
packages=find_packages(exclude=('*.tests.*', '*.tests')),
include_package_data=True,
entry_points={'console_scripts': [
'mars-scheduler = mars.scheduler.__main__:main',
'mars-worker = mars.worker.__main__:main',
'mars-web = mars.web.__main__:main',
]},
python_requires='>=3.6',
install_requires=requirements,
ext_modules=extensions,
extras_require={
'distributed': extra_requirements,
'dev': extra_requirements + dev_requirements,
'vineyard': vineyard_requirements,
}
)
setup(**setup_options)
| 37.005988
| 107
| 0.69288
|
1de29ca7efdc3a1c90102bcee38b257eb74a38bc
| 684
|
py
|
Python
|
marvin_teste_engine/prediction/predictor.py
|
cerliofaccojr/cancer-diagnosis-engine
|
08f1e8568579d7c31ec0772f214eca8c608649d8
|
[
"Apache-2.0"
] | null | null | null |
marvin_teste_engine/prediction/predictor.py
|
cerliofaccojr/cancer-diagnosis-engine
|
08f1e8568579d7c31ec0772f214eca8c608649d8
|
[
"Apache-2.0"
] | null | null | null |
marvin_teste_engine/prediction/predictor.py
|
cerliofaccojr/cancer-diagnosis-engine
|
08f1e8568579d7c31ec0772f214eca8c608649d8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
"""Predictor engine action.
Use this module to add the project main code.
"""
from .._compatibility import six
from .._logging import get_logger
from marvin_python_toolbox.engine_base import EngineBasePrediction
import numpy as np
__all__ = ['Predictor']
logger = get_logger('predictor')
class Predictor(EngineBasePrediction):
def __init__(self, **kwargs):
super(Predictor, self).__init__(**kwargs)
def execute(self, input_message, params, **kwargs):
print(input_message)
final_prediction = self.marvin_model.predict(map(float,input_message["message"]))[0]
return "Maoi " + str(final_prediction)
| 22.8
| 92
| 0.725146
|
c6b963f29e37ffe27aab01e874254619e986a6ab
| 7,071
|
py
|
Python
|
python/GafferUI/ScriptWindow.py
|
davidsminor/gaffer
|
64f75654ce778105dd93fbaad0e4486a5577cd09
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferUI/ScriptWindow.py
|
davidsminor/gaffer
|
64f75654ce778105dd93fbaad0e4486a5577cd09
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferUI/ScriptWindow.py
|
davidsminor/gaffer
|
64f75654ce778105dd93fbaad0e4486a5577cd09
|
[
"BSD-3-Clause"
] | null | null | null |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import weakref
import IECore
import Gaffer
import GafferUI
class ScriptWindow( GafferUI.Window ) :
def __init__( self, script, **kw ) :
GafferUI.Window.__init__( self, **kw )
self.__script = script
self.__listContainer = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing = 2 )
menuDefinition = self.menuDefinition( script.applicationRoot() ) if script.applicationRoot() else IECore.MenuDefinition()
self.__listContainer.append( GafferUI.MenuBar( menuDefinition ) )
applicationRoot = self.__script.ancestor( Gaffer.ApplicationRoot.staticTypeId() )
layouts = GafferUI.Layouts.acquire( applicationRoot ) if applicationRoot is not None else None
if layouts is not None and "Default" in layouts.names() :
self.setLayout( layouts.create( "Default", script ) )
else :
self.setLayout( GafferUI.CompoundEditor( script ) )
self.setChild( self.__listContainer )
self.__closedConnection = self.closedSignal().connect( Gaffer.WeakMethod( self.__closed ) )
self.__scriptPlugSetConnection = script.plugSetSignal().connect( Gaffer.WeakMethod( self.__scriptPlugChanged ) )
self.__updateTitle()
ScriptWindow.__instances.append( weakref.ref( self ) )
def scriptNode( self ) :
return self.__script
def setLayout( self, compoundEditor ) :
if len( self.__listContainer ) > 1 :
del self.__listContainer[1]
assert( compoundEditor.scriptNode().isSame( self.scriptNode() ) )
self.__listContainer.append( compoundEditor, expand=True )
def getLayout( self ) :
return self.__listContainer[1]
def _acceptsClose( self ) :
if not self.__script["unsavedChanges"].getValue() :
return True
f = self.__script["fileName"].getValue()
f = f.rpartition( "/" )[2] if f else "untitled"
dialogue = GafferUI.ConfirmationDialogue(
"Discard Unsaved Changes?",
"The file %s has unsaved changes. Do you want to discard them?" % f,
confirmLabel = "Discard"
)
return dialogue.waitForConfirmation( parentWindow=self )
def __closed( self, widget ) :
scriptParent = self.__script.parent()
if scriptParent is not None :
scriptParent.removeChild( self.__script )
def __scriptPlugChanged( self, plug ) :
if plug.isSame( self.__script["fileName"] ) or plug.isSame( self.__script["unsavedChanges"] ) :
self.__updateTitle()
def __updateTitle( self ) :
f = self.__script["fileName"].getValue()
if not f :
f = "untitled"
d = ""
else :
d, n, f = f.rpartition( "/" )
d = " - " + d
u = " *" if self.__script["unsavedChanges"].getValue() else ""
self.setTitle( "Gaffer : %s%s%s" % ( f, u, d ) )
__instances = [] # weak references to all instances - used by acquire()
## Returns the ScriptWindow for the specified script, creating one
# if necessary.
@staticmethod
def acquire( script ) :
for w in ScriptWindow.__instances :
scriptWindow = w()
if scriptWindow is not None and scriptWindow.scriptNode().isSame( script ) :
return scriptWindow
return ScriptWindow( script )
## Returns an IECore.MenuDefinition which is used to define the menu bars for all ScriptWindows
# created as part of the specified application. This can be edited at any time to modify subsequently
# created ScriptWindows - typically editing would be done as part of gaffer startup.
@staticmethod
def menuDefinition( applicationOrApplicationRoot ) :
if isinstance( applicationOrApplicationRoot, Gaffer.Application ) :
applicationRoot = applicationOrApplicationRoot.root()
else :
assert( isinstance( applicationOrApplicationRoot, Gaffer.ApplicationRoot ) )
applicationRoot = applicationOrApplicationRoot
menuDefinition = getattr( applicationRoot, "_scriptWindowMenuDefinition", None )
if menuDefinition :
return menuDefinition
menuDefinition = IECore.MenuDefinition()
applicationRoot._scriptWindowMenuDefinition = menuDefinition
return menuDefinition
## This function provides the top level functionality for instantiating
# the UI. Once called, new ScriptWindows will be instantiated for each
# script added to the application, and EventLoop.mainEventLoop().stop() will
# be called when the last script is removed.
__scriptAddedConnections = []
__scriptRemovedConnections = []
@classmethod
def connect( cls, applicationRoot ) :
cls.__scriptAddedConnections.append( applicationRoot["scripts"].childAddedSignal().connect( ScriptWindow.__scriptAdded ) )
cls.__scriptRemovedConnections.append( applicationRoot["scripts"].childRemovedSignal().connect( ScriptWindow.__staticScriptRemoved ) )
__automaticallyCreatedInstances = [] # strong references to instances made by __scriptAdded()
@staticmethod
def __scriptAdded( scriptContainer, script ) :
w = ScriptWindow( script )
w.setVisible( True )
ScriptWindow.__automaticallyCreatedInstances.append( w )
@staticmethod
def __staticScriptRemoved( scriptContainer, script ) :
for w in ScriptWindow.__automaticallyCreatedInstances :
if w.scriptNode().isSame( script ) :
ScriptWindow.__automaticallyCreatedInstances.remove( w )
if not len( scriptContainer.children() ) :
GafferUI.EventLoop.mainEventLoop().stop()
| 36.448454
| 136
| 0.721963
|
25457fde8d12d9b864c7b58ba528d92a8f01e1f9
| 16,442
|
py
|
Python
|
scrabble/char2ir.py
|
jbkoh/Scrabble
|
6d64be2e9c7d0392332592c804eb15c20a3e2516
|
[
"BSD-3-Clause"
] | 6
|
2018-11-20T13:58:58.000Z
|
2020-07-10T13:43:37.000Z
|
scrabble/char2ir.py
|
jbkoh/Scrabble
|
6d64be2e9c7d0392332592c804eb15c20a3e2516
|
[
"BSD-3-Clause"
] | null | null | null |
scrabble/char2ir.py
|
jbkoh/Scrabble
|
6d64be2e9c7d0392332592c804eb15c20a3e2516
|
[
"BSD-3-Clause"
] | 2
|
2018-09-05T12:16:38.000Z
|
2022-03-18T07:29:41.000Z
|
import os
from uuid import uuid4
from operator import itemgetter
from pathlib import Path
import pycrfsuite
from bson.binary import Binary as BsonBinary
import arrow
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.metrics import precision_recall_fscore_support, f1_score
from .mongo_models import store_model, get_model, get_tags_mapping, \
get_crf_results, store_result, get_entity_results
from .base_scrabble import BaseScrabble
from .common import *
from . import eval_func
curr_dir = Path(os.path.dirname(os.path.abspath(__file__)))
def gen_uuid():
return str(uuid4())
class Char2Ir(BaseScrabble):
def __init__(self,
target_building,
target_srcids,
building_label_dict,
building_sentence_dict,
source_buildings=[],
source_sample_num_list=[],
learning_srcids=[],
config={}
):
super(Char2Ir, self).__init__(
target_building,
target_srcids,
building_label_dict,
building_sentence_dict,
{},
source_buildings,
source_sample_num_list,
learning_srcids,
config)
self.model_uuid = None
if 'crftype' in config:
self.crftype = config['crftype']
else:
self.crftype = 'crfsuite'
if 'crfalgo' in config:
self.crfalgo = config['crfalgo']
else:
self.crfalgo = 'ap'
if 'crfqs' in config:
self.query_strategy = config['crfqs']
else:
self.query_strategy = 'confidence'
if 'user_cluster_flag' in config:
self.use_cluster_flag = config['use_cluster_flag']
else:
self.use_cluster_flag = True
if 'available_metadata_types' in config:
self.available_metadata_types = config['available_metadata_types']
else:
self.available_metadata_types = ['VendorGivenName',
'BACnetDescription',
'BACnetName',
]
self.concatenate_sentences = False
# Note: Hardcode to disable use_brick_flag
"""
if 'use_brick_flag' in config:
self.use_brick_flag = config['use_brick_flag']
else:
self.use_brick_flag = False # Temporarily disable it
"""
self.use_brick_flag = False
self._init_data(learning_srcids)
def _init_data(self, learning_srcids=[]):
self.sentence_dict = {}
self.label_dict = {}
self.building_cluster_dict = {}
for building, source_sample_num in zip(self.source_buildings,
self.source_sample_num_list):
self.sentence_dict.update(self.building_sentence_dict[building])
one_label_dict = self.building_label_dict[building]
self.label_dict.update(one_label_dict)
if learning_srcids:
self.learning_srcids = learning_srcids
else:
sample_srcid_list = select_random_samples(
building = building,
srcids = one_label_dict.keys(),
n = source_sample_num,
use_cluster_flag = self.use_cluster_flag,
sentence_dict = self.building_sentence_dict[building],
shuffle_flag = False,
)
self.learning_srcids += sample_srcid_list
if building not in self.building_cluster_dict:
self.building_cluster_dict[building] = get_word_clusters(
self.building_sentence_dict[building])
# Construct Brick examples
brick_sentence_dict = dict()
brick_label_dict = dict()
if self.use_brick_flag:
with open(curr_dir / 'metadata/brick_tags_labels.json', 'r') as fp:
tag_label_list = json.load(fp)
for tag_labels in tag_label_list:
# Append meaningless characters before and after the tag
# to make it separate from dependencies.
# But comment them out to check if it works.
# char_tags = [' '] + list(map(itemgetter(0), tag_labels)) + [' ']
char_tags = list(map(itemgetter(0), tag_labels))
# char_labels = ['O'] + list(map(itemgetter(1), tag_labels)) + ['O']
char_labels = list(map(itemgetter(1), tag_labels))
brick_sentence_dict[''.join(char_tags)] = char_tags + ['NEWLINE']
brick_label_dict[''.join(char_tags)] = char_labels + ['O']
self.sentence_dict.update(brick_sentence_dict)
self.label_dict.update(brick_label_dict)
self.brick_srcids = list(brick_sentence_dict.keys())
def merge_sentences(self, sentences):
return '@\t@'.join(['@'.join(sentences[column]) for column in column_names
if column in sentences]).split('@')
def merge_labels(self, labels):
return '@O@'.join(['@'.join(labels[column]) for column in column_names
if column in labels]).split('@')
def _add_point_to_model(self, srcid, trainer):
if self.concatenate_sentences:
sentence = self.merge_sentences(self.sentence_dict[srcid])
labels = self.merge_labels(self.label_dict[srcid])
assert len(sentence) == len(labels)
trainer.append(pycrfsuite.ItemSequence(
self._calc_features(sentence, None)), labels)
else:
for metadata_type, sentence in self.sentence_dict[srcid].items():
labels = self.label_dict[srcid][metadata_type]
trainer.append(pycrfsuite.ItemSequence(
self._calc_features(sentence, None)), labels)
def update_model(self, srcids):
assert (len(self.source_buildings) == len(self.source_sample_num_list))
self.learning_srcids += srcids
if self.crfalgo == 'prev':
crfalgo = 'ap'
else:
crfalgo = self.crfalgo
if self.crfalgo == 'default':
trainer = pycrfsuite.Trainer(verbose=False)
else:
trainer = pycrfsuite.Trainer(verbose=False, algorithm=crfalgo)
if self.crfalgo == 'ap':
#trainer.set('max_iterations', 125)
trainer.set('max_iterations', 200)
elif self.crfalgo == 'lbfgs':
trainer.set('c2', 0.02)
# algorithm: {'lbfgs', 'l2sgd', 'ap', 'pa', 'arow'}
if self.crfalgo != 'prev':
trainer.set_params({'feature.possible_states': True,
'feature.possible_transitions': True})
for srcid in self.learning_srcids:
#for metadata_type, sentence in self.sentence_dict[srcid].items():
# labels = self.label_dict[srcid][metadata_type]
# trainer.append(pycrfsuite.ItemSequence(
# self._calc_features(sentence, None)), labels)
self._add_point_to_model(srcid, trainer)
if self.use_brick_flag:
for srcid in self.brick_srcids:
sentence = self.brick_sentence_dict[srcid]
labels = self.brick_label_dict[srcid]
trainer.append(pycrfsuite.ItemSequence(
self._calc_features(sentence, None)), labels)
model_uuid = gen_uuid()
crf_model_file = 'temp/{0}.{1}.model'.format(model_uuid, 'crfsuite')
t0 = arrow.get()
trainer.train(crf_model_file)
t1 = arrow.get()
print('training crf took: {0}'.format(t1 - t0))
with open(crf_model_file, 'rb') as fp:
model_bin = fp.read()
model = {
# 'source_list': sample_dict,
'gen_time': arrow.get().datetime,
'use_cluster_flag': self.use_cluster_flag,
'use_brick_flag': self.use_brick_flag,
'model_binary': BsonBinary(model_bin),
'source_building_count': len(self.source_buildings),
'learning_srcids': sorted(set(self.learning_srcids)),
'uuid': model_uuid,
'crftype': 'crfsuite'
}
store_model(model)
os.remove(crf_model_file)
self.model_uuid = model_uuid
@staticmethod
def _get_model(model_uuid):
model_query = {
'uuid': model_uuid
}
model = get_model(model_query)
return model
def select_informative_samples(self, sample_num):
target_sentence_dict = {srcid: self.sentence_dict[srcid]
for srcid in self.target_srcids}
model = self._get_model(self.model_uuid)
predicted_dict, score_dict = self._predict_func(model,
target_sentence_dict,
self.crftype)
#cluster_dict = get_cluster_dict(self.target_building)
cluster_dict = self.building_cluster_dict[self.target_building]
new_srcids = []
if self.query_strategy == 'confidence':
for srcid, scores in score_dict.items():
# Normalize with length
curr_score = 0
sentence_len = 0
for metadata_type, score in scores.items():
sentence = self.sentence_dict[srcid][metadata_type]
if not sentence:
continue
curr_score += np.log(score)
sentence_len += len(sentence)
score_dict[srcid] = curr_score / sentence_len
sorted_scores = sorted(score_dict.items(), key=itemgetter(1))
# load word clusters not to select too similar samples.
added_cids = []
new_srcid_cnt = 0
for srcid, score in sorted_scores:
if srcid in self.target_srcids:
if srcid in self.learning_srcids:
continue
the_cid = None
for cid, cluster in cluster_dict.items():
if srcid in cluster:
the_cid = cid
break
if the_cid in added_cids:
continue
added_cids.append(the_cid)
new_srcids.append(srcid)
new_srcid_cnt += 1
if new_srcid_cnt == sample_num:
break
return new_srcids
def _load_crf_model_files(self, model, filename, crftype):
crf_model_file = filename
with open(crf_model_file, 'wb') as fp:
fp.write(model['model_binary'])
def _calc_features(self, sentence, building=None):
sentenceFeatures = list()
sentence = ['$' if c.isdigit() else c for c in sentence]
for i, word in enumerate(sentence):
features = {
'word.lower=' + word.lower(): 1.0,
'word.isdigit': float(word.isdigit())
}
if i == 0:
features['BOS'] = 1.0
else:
features['-1:word.lower=' + sentence[i - 1].lower()] = 1.0
if i == 0:
pass
elif i == 1:
features['SECOND'] = 1.0
else:
features['-2:word.lower=' + sentence[i - 2].lower()] = 1.0
if i<len(sentence)-1:
features['+1:word.lower='+sentence[i+1].lower()] = 1.0
else:
features['EOS'] = 1.0
sentenceFeatures.append(features)
return sentenceFeatures
def divide_list(self, l, sep_indices):
base_idx = 0
d = defaultdict(list)
column_idx = -1
for column_idx, sep_idx in enumerate(sep_indices):
curr_metadata_type = column_names[column_idx]
for c in l[base_idx:sep_idx]:
d[curr_metadata_type].append(c)
base_idx = sep_idx
if base_idx < len(l):
curr_metadata_type = column_names[column_idx+1]
for c in l[base_idx:]:
d[curr_metadata_type].append(c)
return dict(d)
def _predict_func(self, model, sentence_dict, crftype):
crf_model_file = 'temp/{0}.{1}.model'.format(self.model_uuid, crftype)
self._load_crf_model_files(model, crf_model_file, crftype)
predicted_dict = dict()
score_dict = dict()
begin_time = arrow.get()
if crftype == 'crfsuite':
# Init tagger
tagger = pycrfsuite.Tagger()
tagger.open(crf_model_file)
# Tagging sentences with tagger
for srcid, sentences in sentence_dict.items():
predicteds = {}
scores = {}
if self.concatenate_sentences:
sentence = self.merge_sentences(sentences)
predicted = tagger.tag(self._calc_features(sentence))
score = tagger.probability(predicted)
predicteds['VendorGivenName'] = predicted
scores['VendorGivenName'] = score
else:
for metadata_type, sentence in sentences.items():
predicted = tagger.tag(self._calc_features(sentence))
score = tagger.probability(predicted)
predicteds[metadata_type] = predicted
scores[metadata_type] = score
predicted_dict[srcid] = predicteds
score_dict[srcid] = scores
return predicted_dict, score_dict
def _predict_and_proba(self, target_srcids):
# Validate if we have all information
for srcid in target_srcids:
try:
assert srcid in self.sentence_dict
except:
pdb.set_trace()
target_sentence_dict = {srcid: self.sentence_dict[srcid]
for srcid in target_srcids}
model = self._get_model(self.model_uuid)
predicted_dict, score_dict = self._predict_func(model,
target_sentence_dict,
self.crftype)
# Construct output data
pred_phrase_dict = make_phrase_dict(target_sentence_dict, predicted_dict)
return predicted_dict, score_dict, pred_phrase_dict
def predict(self, target_srcids=None):
if not target_srcids:
target_srcids = self.target_srcids
predicted_dict, _, _ = self._predict_and_proba(target_srcids)
return predicted_dict
def predict_proba(self, target_srcids=None):
if not target_srcids:
target_srcids = self.target_srcids
_, score_dict, _ = self._predict_and_proba(target_srcids)
return score_dict
def learn_auto(self, iter_num=1):
pass
def evaluate(self, preds):
srcids = list(preds.keys())
pred_tags_list = [reduce(adder,
[preds[srcid][t]
for t in self.available_metadata_types])
for srcid in srcids]
true_tags_list = [reduce(adder,
[self.label_dict[srcid][t]
for t in self.available_metadata_types])
for srcid in srcids]
acc = eval_func.sequential_accuracy(true_tags_list,
pred_tags_list)
pred = [preds[srcid] for srcid in preds.keys()]
true = [self.label_dict[srcid] for srcid in preds.keys()]
mlb = MultiLabelBinarizer()
mlb.fit(pred + true)
encoded_true = mlb.transform(true)
encoded_pred = mlb.transform(pred)
macro_f1 = f1_score(encoded_true, encoded_pred, average='macro')
f1 = f1_score(encoded_true, encoded_pred, average='weighted')
res = {
'accuracy': acc,
'f1': f1,
'macro_f1': macro_f1
}
return res
| 40.398034
| 84
| 0.556502
|
8d8c7a7d53c547581dc44d4c03a4bf8c7755301c
| 5,888
|
py
|
Python
|
deprecated_nets/net_original.py
|
danielmk/pyDentateeLife2020
|
b4a9f2beaa0c74dbc9583e2cf228856612596f8a
|
[
"MIT"
] | 1
|
2022-02-24T20:39:46.000Z
|
2022-02-24T20:39:46.000Z
|
deprecated_nets/net_original.py
|
danielmk/pyDentateeLife2020
|
b4a9f2beaa0c74dbc9583e2cf228856612596f8a
|
[
"MIT"
] | null | null | null |
deprecated_nets/net_original.py
|
danielmk/pyDentateeLife2020
|
b4a9f2beaa0c74dbc9583e2cf228856612596f8a
|
[
"MIT"
] | 4
|
2020-02-18T09:25:20.000Z
|
2021-11-20T23:52:29.000Z
|
# -*- coding: utf-8 -*-
"""
This module implements the class StandardNetwork.
StandardNetwork creates a ring network as defined in Santhakumar et al. 2005
with some changes as in Yim et al. 2015.
See StandardNetwork docstring for details.
Created on Tue Nov 28 13:01:38 2017
@author: DanielM
"""
from neuron import h, gui
import ouropy
import matplotlib.pyplot as plt
import numpy as np
from granulecell import GranuleCell
from mossycell_cat import MossyCell
from basketcell import BasketCell
from hippcell import HippCell
h.nrn_load_dll("C:\Users\DanielM\Repos\models_dentate\dentate_gyrus_Santhakumar2005_and_Yim_patterns\dentategyrusnet2005\\nrnmech.dll")
class StandardNetworkOriginal(ouropy.gennetwork.GenNetwork):
""" This model implements the ring model from Santhakumar et al. 2005.
with some changes as in Yim et al. 2015.
It features inhibition but omits the MC->GC connection.
"""
def __init__(self, seed=None, temporal_patterns=np.array([]),
spatial_patterns_gcs=np.array([]),
spatial_patterns_bcs=np.array([]), sprouting=0):
# Setup cells
self.mk_population(GranuleCell, 500)
self.mk_population(MossyCell, 15)
self.mk_population(BasketCell, 6)
self.mk_population(HippCell, 6)
# Set seed for reproducibility
if seed:
self.set_numpy_seed(seed)
# Setup recordings
self.populations[0].record_aps()
self.populations[1].record_aps()
self.populations[2].record_aps()
self.populations[3].record_aps()
temporal_patterns = np.atleast_2d(temporal_patterns)
if spatial_patterns_gcs.any() and temporal_patterns.any():
spatial_patterns_gcs = np.atleast_2d(spatial_patterns_gcs)
for pat in range(len(spatial_patterns_gcs)):
ouropy.gennetwork.PerforantPathPoissonStimulation(self.populations[0],
temporal_patterns[pat],
spatial_patterns_gcs[pat],
'dd',
1.5, 5.5, 0, 2*10**(-2))
if spatial_patterns_bcs.any() and temporal_patterns.any():
spatial_patterns_bcs = np.atleast_2d(spatial_patterns_bcs)
for pat in range(len(spatial_patterns_bcs)):
# PP -> BC
ouropy.gennetwork.PerforantPathPoissonStimulation(self.populations[2],
temporal_patterns[pat],
spatial_patterns_bcs[pat],
'ddend',
2, 6.3, 0, 1*10**(-2))
# Sprouting
ouropy.gennetwork.Exp2SynConnection(self.populations[0], self.populations[0],
100, 'proxd', sprouting,
1.5, 5.5, 0, 10, 0.8, 2*10**(-3))
# GC -> MC
ouropy.gennetwork.Exp2SynConnection(self.populations[0], self.populations[1],
3, 'proxd',
1, 0.5,6.2, 0, 10, 1.5, 0.2*10**(-3))
# GC -> BC
ouropy.gennetwork.Exp2SynConnection(self.populations[0], self.populations[2],
3, 'proxd',
1, 0.3, 0.6, 0, 10, 0.8, 4.7*10**(-3))
# GC -> HC
ouropy.gennetwork.Exp2SynConnection(self.populations[0], self.populations[3],
5, 'proxd',
3, 0.3, 0.6, 0, 10, 1.5, 0.5*10**(-3))
# MC -> MC
ouropy.gennetwork.Exp2SynConnection(self.populations[1], self.populations[1],
6, 'proxd',
3, 0.45, 2.2, 0, 10, 2, 0.5*10**(-3))
# MC -> BC
ouropy.gennetwork.Exp2SynConnection(self.populations[1], self.populations[2],
3, 'proxd',
1, 0.1, 0.1, 1, 10, 3, 0.3*10**(-3))
# MC -> HC
ouropy.gennetwork.Exp2SynConnection(self.populations[1], self.populations[3],
5, 'midd',
2, 0.9, 3.6, 0, 10, 3,0.2*10**(-3))
# BC -> GC
#ORIGINAL
ouropy.gennetwork.Exp2SynConnection(self.populations[2], self.populations[0],
140, 'soma',
100, 0.26, 5.5, -70, -10, 0.85, 1.6*10**(-3))
# BC -> MC
ouropy.gennetwork.Exp2SynConnection(self.populations[2], self.populations[1],
7, 'proxd',
3, 0.3, 3.3, -70, -10, 1.5, 1.5*10**(-3))
# BC -> BC
ouropy.gennetwork.Exp2SynConnection(self.populations[2], self.populations[2],
3, 'proxd',
2, 0.16, 1.8, -70, -10, 0.8, 7.6*10**(-3))
# HC -> GC
#ORIGINAL
ouropy.gennetwork.Exp2SynConnection(self.populations[3], self.populations[0],
260, 'dd',
160, 0.5, 6, -70, 10, 1.6, 0.5*10**(-3))
# HC -> MC
ouropy.gennetwork.Exp2SynConnection(self.populations[3], self.populations[1],
5, ['mid1d', 'mid2d'],
4, 0.5, 6, -70, 10, 1, 1.5*10**(-3))
# HC -> BC
ouropy.gennetwork.Exp2SynConnection(self.populations[3], self.populations[2],
5, 'ddend',
4, 0.4, 5.8, -70, 10, 1.6, 0.5*10**(-3))
| 43.614815
| 135
| 0.492018
|
bb8b400ec722626b23eec551616958c0d96ed59d
| 1,115
|
py
|
Python
|
robeep/plugin/check_device_temperature.py
|
k-nii0211/rbp_agent
|
53724797ee7b579fc98d63809b9551867929476e
|
[
"MIT"
] | null | null | null |
robeep/plugin/check_device_temperature.py
|
k-nii0211/rbp_agent
|
53724797ee7b579fc98d63809b9551867929476e
|
[
"MIT"
] | null | null | null |
robeep/plugin/check_device_temperature.py
|
k-nii0211/rbp_agent
|
53724797ee7b579fc98d63809b9551867929476e
|
[
"MIT"
] | null | null | null |
import logging
_logger = logging.getLogger(__name__)
try:
from naoqi import ALProxy
except ImportError, e:
ALProxy = None
_keys = [
"Head",
"Battery",
"HeadYaw",
"HeadPitch",
"LElbowYaw",
"LElbowRoll",
"RElbowYaw",
"RElbowRoll",
"LHand",
"LWristYaw",
"RHand",
"RWristYaw",
"LShoulderPitch",
"LShoulderRoll",
"RShoulderPitch",
"RShoulderRoll",
"HipRoll",
"HipPitch",
"KneePitch",
"WheelFL",
"WheelFR",
"WheelB"
]
class CheckDeviceTemperature(object):
def __call__(self):
if ALProxy is None:
_logger.warn('ALProxy is None.')
return None
memProxy = ALProxy("ALMemory", "localhost", 9559)
ret = dict()
for key in _keys:
value = memProxy.getData(
"Device/SubDeviceList/%s/Temperature/Sensor/Status" % key
)
if not value:
_logger.warn('Noting return value %s' % key)
continue
ret[key] = value
return ret
check_device_temperature = CheckDeviceTemperature
| 20.272727
| 73
| 0.564126
|
e78ed0b050fc72838d1b718ba26762517b9c6605
| 6,226
|
py
|
Python
|
igibson/examples/vr/in_development/vr_hand_speed_benchmark.py
|
mamadbiabon/iGibson
|
d416a470240eb7ad86e04fee475ae4bd67263a7c
|
[
"MIT"
] | null | null | null |
igibson/examples/vr/in_development/vr_hand_speed_benchmark.py
|
mamadbiabon/iGibson
|
d416a470240eb7ad86e04fee475ae4bd67263a7c
|
[
"MIT"
] | null | null | null |
igibson/examples/vr/in_development/vr_hand_speed_benchmark.py
|
mamadbiabon/iGibson
|
d416a470240eb7ad86e04fee475ae4bd67263a7c
|
[
"MIT"
] | null | null | null |
""" This demo can be used to benchmark how speedily the VR hand
can be used. The aim is to put all the objects into the box on the left
side of the table.
You can use the left and right controllers to start/stop/reset the timer,
as well as show/hide its display. The "overlay toggle" action and its
corresponding button index mapping can be found in the vr_config.yaml file in the igibson folder.
"""
import os
import pybullet as p
import pybullet_data
import igibson
from igibson.objects.articulated_object import ArticulatedObject
from igibson.objects.ycb_object import YCBObject
from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
from igibson.render.mesh_renderer.mesh_renderer_vr import VrSettings
from igibson.robots.behavior_robot import BehaviorRobot
from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.simulator import Simulator
from igibson.utils.vr_utils import VrTimer
# Set to true to use viewer manipulation instead of VR
# Set to false by default so this benchmark task can be performed in VR
VIEWER_MANIP = False
# Set to true to print out render, physics and overall frame FPS
PRINT_STATS = False
# Set to true to use gripper instead of VR hands
USE_GRIPPER = False
# HDR files for PBR rendering
hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr")
hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr")
light_modulation_map_filename = os.path.join(
igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png"
)
background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg")
def main():
# VR rendering settings
vr_rendering_settings = MeshRendererSettings(
optimized=True,
fullscreen=False,
env_texture_filename=hdr_texture,
env_texture_filename2=hdr_texture2,
env_texture_filename3=background_texture,
light_modulation_map_filename=light_modulation_map_filename,
enable_shadow=True,
enable_pbr=True,
msaa=True,
light_dimming_factor=1.0,
)
vr_settings = VrSettings()
if VIEWER_MANIP:
s = Simulator(
mode="gui_interactive",
image_width=512,
image_height=512,
rendering_settings=vr_rendering_settings,
)
vr_settings.turn_off_vr_mode()
s = Simulator(mode="vr", rendering_settings=vr_rendering_settings, vr_settings=vr_settings)
scene = InteractiveIndoorScene("Rs_int")
scene._set_first_n_objects(2)
s.import_scene(scene)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
if not VIEWER_MANIP:
vr_agent = BehaviorRobot(s, use_gripper=USE_GRIPPER)
objects = [
("jenga/jenga.urdf", (1.300000, -0.700000, 0.750000), (0.000000, 0.707107, 0.000000, 0.707107)),
("jenga/jenga.urdf", (1.200000, -0.700000, 0.750000), (0.000000, 0.707107, 0.000000, 0.707107)),
("jenga/jenga.urdf", (1.100000, -0.700000, 0.750000), (0.000000, 0.707107, 0.000000, 0.707107)),
("jenga/jenga.urdf", (1.000000, -0.700000, 0.750000), (0.000000, 0.707107, 0.000000, 0.707107)),
("jenga/jenga.urdf", (0.900000, -0.700000, 0.750000), (0.000000, 0.707107, 0.000000, 0.707107)),
("jenga/jenga.urdf", (0.800000, -0.700000, 0.750000), (0.000000, 0.707107, 0.000000, 0.707107)),
("table/table.urdf", (1.000000, -0.200000, 0.000000), (0.000000, 0.000000, 0.707107, 0.707107)),
("duck_vhacd.urdf", (1.050000, -0.500000, 0.700000), (0.000000, 0.000000, 0.707107, 0.707107)),
("duck_vhacd.urdf", (0.950000, -0.100000, 0.700000), (0.000000, 0.000000, 0.707107, 0.707107)),
("sphere_small.urdf", (0.850000, -0.400000, 0.700000), (0.000000, 0.000000, 0.707107, 0.707107)),
("duck_vhacd.urdf", (0.850000, -0.400000, 1.00000), (0.000000, 0.000000, 0.707107, 0.707107)),
]
for item in objects:
fpath = item[0]
pos = item[1]
orn = item[2]
item_ob = ArticulatedObject(fpath, scale=1, renderer_params={"use_pbr": False, "use_pbr_mapping": False})
s.import_object(item_ob)
item_ob.set_position(pos)
item_ob.set_orientation(orn)
for i in range(3):
obj = YCBObject("003_cracker_box")
s.import_object(obj)
obj.set_position_orientation([1.100000 + 0.12 * i, -0.300000, 0.750000], [0, 0, 0, 1])
obj = ArticulatedObject(
os.path.join(
igibson.ig_dataset_path,
"objects",
"basket",
"e3bae8da192ab3d4a17ae19fa77775ff",
"e3bae8da192ab3d4a17ae19fa77775ff.urdf",
),
scale=2,
)
s.import_object(obj)
obj.set_position_orientation([1.1, 0.300000, 1.0], [0, 0, 0, 1])
# Time how long demo takes
time_text = s.add_vr_overlay_text(
text_data="Current time: NOT STARTED", font_size=100, font_style="Bold", color=[0, 0, 0], pos=[100, 100]
)
timer = VrTimer()
# Main simulation loop
while True:
s.step(print_stats=PRINT_STATS)
if not VIEWER_MANIP:
# Events that manage timer functionality
r_toggle = s.query_vr_event("right_controller", "overlay_toggle")
l_toggle = s.query_vr_event("left_controller", "overlay_toggle")
# Overlay toggle action on right controller is used to start/stop timer
if r_toggle and not l_toggle:
if timer.is_timer_running():
timer.stop_timer()
else:
timer.start_timer()
# Overlay toggle action on left controller is used to show/hide timer
elif l_toggle and not r_toggle:
time_text.set_show_state(not time_text.get_show_state())
# Reset timer if both toggle buttons are pressed at once
elif r_toggle and l_toggle:
timer.refresh_timer()
# Update timer value
time_text.set_text("Current time: {}".format(round(timer.get_timer_val(), 1)))
# Update VR agent
vr_agent.apply_action()
s.disconnect()
if __name__ == "__main__":
main()
| 40.69281
| 113
| 0.667523
|
2970f80fc9528c0b1b13373f13a8346aaead3d12
| 793
|
py
|
Python
|
json_checker/app.py
|
DKorytkin/JsonChecker
|
1a803c3d873db5a1139a55b7efb3b40d9c1b6891
|
[
"MIT"
] | 5
|
2017-08-17T11:24:13.000Z
|
2017-08-21T09:45:29.000Z
|
json_checker/app.py
|
DKorytkin/JsonChecker
|
1a803c3d873db5a1139a55b7efb3b40d9c1b6891
|
[
"MIT"
] | 179
|
2017-09-05T09:18:41.000Z
|
2022-03-31T17:20:03.000Z
|
json_checker/app.py
|
DKorytkin/json_checker
|
1a803c3d873db5a1139a55b7efb3b40d9c1b6891
|
[
"MIT"
] | null | null | null |
import logging
from typing import Any
from json_checker.core.base import Base
from json_checker.core.exceptions import CheckerError
from json_checker.core.checkers import Validator
from json_checker.core.reports import Report
log = logging.getLogger(__name__)
class Checker(Base):
def validate(self, data: Any) -> Any:
log.debug(
"Checker settings: ignore_extra_keys=%s, soft=%s"
% (self.ignore_extra_keys, self.soft)
)
report = Report(self.soft)
checker = Validator(
expected_data=self.expected_data,
report=report,
ignore_extra_keys=self.ignore_extra_keys,
)
checker.validate(data)
if report.has_errors():
raise CheckerError(report)
return data
| 26.433333
| 61
| 0.662043
|
5ab7a8594e1a34cca29b9d8d2339ca4f22dc603b
| 5,423
|
py
|
Python
|
src/olympia/abuse/migrations/0001_initial.py
|
dante381/addons-server
|
9702860a19ecca1cb4e4998f37bc43c1b2dd3aa7
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/abuse/migrations/0001_initial.py
|
dante381/addons-server
|
9702860a19ecca1cb4e4998f37bc43c1b2dd3aa7
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/abuse/migrations/0001_initial.py
|
dante381/addons-server
|
9702860a19ecca1cb4e4998f37bc43c1b2dd3aa7
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.2.5 on 2019-09-12 13:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
import django.utils.timezone
import olympia.amo.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('addons', '__first__'),
]
operations = [
migrations.CreateModel(
name='AbuseReport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('country_code', models.CharField(default=None, max_length=2, null=True)),
('guid', models.CharField(max_length=255, null=True)),
('message', models.TextField(blank=True)),
('state', models.PositiveSmallIntegerField(choices=[(1, 'Untriaged'), (2, 'Valid'), (3, 'Suspicious'), (4, 'Deleted')], default=1)),
('client_id', models.CharField(blank=True, default=None, max_length=64, null=True)),
('addon_name', models.CharField(blank=True, default=None, max_length=255, null=True)),
('addon_summary', models.CharField(blank=True, default=None, max_length=255, null=True)),
('addon_version', models.CharField(blank=True, default=None, max_length=255, null=True)),
('addon_signature', models.PositiveSmallIntegerField(blank=True, choices=[(None, 'None'), (1, 'Curated and partner'), (2, 'Curated'), (3, 'Partner'), (4, 'Non-curated'), (5, 'Unsigned'), (6, 'Broken'), (7, 'Unknown'), (8, 'Missing'), (9, 'Preliminary'), (10, 'Signed'), (11, 'System'), (12, 'Privileged')], default=None, null=True)),
('application', models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Firefox'), (61, 'Firefox for Android')], default=1, null=True)),
('application_version', models.CharField(blank=True, default=None, max_length=255, null=True)),
('application_locale', models.CharField(blank=True, default=None, max_length=255, null=True)),
('operating_system', models.CharField(blank=True, default=None, max_length=255, null=True)),
('operating_system_version', models.CharField(blank=True, default=None, max_length=255, null=True)),
('install_date', models.DateTimeField(blank=True, default=None, null=True)),
('reason', models.PositiveSmallIntegerField(blank=True, choices=[(None, 'None'), (1, 'Damages computer and/or data'), (2, 'Creates spam or advertising'), (3, 'Changes search / homepage / new tab page without informing user'), (5, 'Doesn’t work, breaks websites, or slows Firefox down'), (6, 'Hateful, violent, or illegal content'), (7, 'Pretends to be something it’s not'), (9, "Wasn't wanted / impossible to get rid of"), (127, 'Other')], default=None, null=True)),
('addon_install_origin', models.CharField(blank=True, default=None, max_length=255, null=True)),
('addon_install_method', models.PositiveSmallIntegerField(blank=True, choices=[(None, 'None'), (1, 'Add-on Manager Web API'), (2, 'Direct link'), (3, 'Install Trigger'), (4, 'From File'), (5, 'Webext management API'), (6, 'Drag & Drop'), (7, 'Sideload'), (8, 'File URL'), (9, 'Enterprise Policy'), (10, 'Included in build'), (11, 'System Add-on'), (12, 'Temporary Add-on'), (13, 'Sync'), (14, 'URL'), (127, 'Other')], default=None, null=True)),
('addon_install_source', models.PositiveSmallIntegerField(blank=True, choices=[(None, 'None'), (1, 'Add-ons Manager'), (2, 'Add-ons Debugging'), (3, 'Preferences'), (4, 'AMO'), (5, 'App Profile'), (6, 'Disco Pane'), (7, 'Included in build'), (8, 'Extension'), (9, 'Enterprise Policy'), (10, 'File URL'), (11, 'GMP Plugin'), (12, 'Internal'), (13, 'Plugin'), (14, 'Return to AMO'), (15, 'Sync'), (16, 'System Add-on'), (17, 'Temporary Add-on'), (18, 'Unknown'), (127, 'Other')], default=None, null=True)),
('report_entry_point', models.PositiveSmallIntegerField(blank=True, choices=[(None, 'None'), (1, 'Uninstall'), (2, 'Menu'), (3, 'Toolbar context menu')], default=None, null=True)),
('addon', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='abuse_reports', to='addons.Addon')),
('reporter', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='abuse_reported', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='abuse_reports', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'abuse_reports',
'base_manager_name': 'unfiltered',
},
bases=(olympia.amo.models.SaveUpdateMixin, models.Model),
managers=[
('unfiltered', django.db.models.manager.Manager()),
],
),
migrations.AddIndex(
model_name='abusereport',
index=models.Index(fields=['created'], name='created_idx'),
),
]
| 83.430769
| 520
| 0.628434
|
8a3ed843c9190aeb81eaa94c4bab49a1c3b8dbef
| 344
|
py
|
Python
|
test/test_config.py
|
cbjuan/test-textgenrnn
|
dc92d6b86f86a2d4811ecee64ccf3f9f8a8ca4c0
|
[
"Apache-2.0"
] | null | null | null |
test/test_config.py
|
cbjuan/test-textgenrnn
|
dc92d6b86f86a2d4811ecee64ccf3f9f8a8ca4c0
|
[
"Apache-2.0"
] | 4
|
2018-09-20T12:27:42.000Z
|
2018-10-15T15:29:37.000Z
|
test/test_config.py
|
GarridoLabs/test-textgenrnn
|
dc92d6b86f86a2d4811ecee64ccf3f9f8a8ca4c0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
@license
Copyright(c) 2018, GarridoLabs and the project's contributors.
This source code is licensed under the Apache License, Version 2.0 found in
the LICENSE.txt file in the root directory of this source tree.
'''
"""
Configuration Variables to use in test mode
"""
skip_integration_test = True
| 22.933333
| 79
| 0.703488
|
ccb4e835fb72f8ed6b931718ae0341a54abcd02a
| 75
|
py
|
Python
|
stilio/persistence/constants.py
|
fakegit/stilio
|
cf198b8ccadc7dcadc462ce83b801af00ef4e2f2
|
[
"Apache-2.0"
] | 71
|
2019-10-09T17:18:12.000Z
|
2022-02-26T12:15:53.000Z
|
stilio/persistence/constants.py
|
fakegit/stilio
|
cf198b8ccadc7dcadc462ce83b801af00ef4e2f2
|
[
"Apache-2.0"
] | 3
|
2019-10-16T17:52:48.000Z
|
2021-12-01T16:50:18.000Z
|
stilio/persistence/constants.py
|
fakegit/stilio
|
cf198b8ccadc7dcadc462ce83b801af00ef4e2f2
|
[
"Apache-2.0"
] | 11
|
2020-01-21T09:09:14.000Z
|
2022-03-27T12:05:36.000Z
|
from stilio.persistence.torrents.models import Torrent
MODELS = [Torrent]
| 18.75
| 54
| 0.813333
|
07d0f9c11a9c48e3107d1ff8042d89d0c44e2d28
| 432
|
py
|
Python
|
youtube/Config.py
|
youtube-py/youtube.py
|
9f8db2a4b831361088d6ca818cef4468be90117b
|
[
"MIT"
] | 1
|
2020-12-14T11:57:22.000Z
|
2020-12-14T11:57:22.000Z
|
youtube/Config.py
|
youtube-py/youtube.py
|
9f8db2a4b831361088d6ca818cef4468be90117b
|
[
"MIT"
] | 41
|
2021-01-15T13:18:46.000Z
|
2022-03-28T14:22:54.000Z
|
youtube/Config.py
|
youtube-py/youtube.py
|
9f8db2a4b831361088d6ca818cef4468be90117b
|
[
"MIT"
] | 5
|
2020-12-06T18:06:05.000Z
|
2021-07-07T00:49:37.000Z
|
# -*- coding: utf-8 -*-
"""
youtube.py is really lite python liberary with fast downloader
"""
__title__ = "youtube.py"
__version__ = "2.1.0"
__author__ = "Mayank Gupta"
__license__ = "MIT License"
__copyright__ = "Copyright 2020 YouTube.py (Mayank Gupta)"
__github__ = "https://github.com/youtube-py/youtube.py"
__issues__ = "https://github.com/youtube-py/youtube.py/issues"
__docs__ = "https://youtube-python.mayankfawkes.xyz/"
| 28.8
| 62
| 0.726852
|
ab5a645adac2c8489ae71b5085f0c444b06e8bc0
| 4,395
|
py
|
Python
|
app/models.py
|
LaurierCS/Pod4
|
1dd2fd34c874ff216fb181c9afc084be34630630
|
[
"MIT"
] | 1
|
2022-03-29T16:41:33.000Z
|
2022-03-29T16:41:33.000Z
|
app/models.py
|
LaurierCS/Pod4
|
1dd2fd34c874ff216fb181c9afc084be34630630
|
[
"MIT"
] | null | null | null |
app/models.py
|
LaurierCS/Pod4
|
1dd2fd34c874ff216fb181c9afc084be34630630
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.db.models.fields import *
from django.template.defaultfilters import slugify
from django import forms
from django.contrib.auth.models import User
class Profile(models.Model):
user = models.OneToOneField(User, related_name = 'profile', null=True, on_delete=models.CASCADE)
first_name = models.CharField(max_length=100, blank=False, default="John")
last_name = models.CharField(max_length=100, blank=False, default="Doe")
bio = models.TextField(max_length=500, blank=True)
email = models.EmailField(max_length=256, blank=True)
image = models.ImageField(
default="images/smiley.jpg", upload_to='images/', blank=True)
# 👇 THE "tech_roadmap" RELATIONSHIP BELOW IS THE TECHNOLOGIES THAT THE USER WANTS TO ACHIEVE
# (if a tech is in this list, it will show up on the tree graph)
# This is all the tech they either want to work with or have already achieved experience with
# This will allow us to make the graph unique to each persons tech goals, instead of showing nodes for dozens of different
# technologies they aren't even aiming to achieve
# tech_roadmap = models.ManyToManyField("Technology", blank=True, null=True)
date_created = models.DateTimeField(auto_now_add=True, null=True)
email = models.EmailField(max_length=256, blank=True, null=True)
twitter = models.URLField(max_length=200, blank=True, null=True)
linkedin = models.URLField(max_length=200, blank=True, null=True)
github = models.URLField(max_length=200, blank=True, null=True)
website = models.URLField(max_length=200, blank=True, null=True)
def __str__(self):
return self.user.username
"""
Condensed skill tree and markers into one unit
Skill tree elements of type N will now represent skills
Skill tree elements of type C will be parents to at least one node of type N
"""
class Skill(models.Model):
node_type_choices = (
("C", "Category"), ("N", "Node"), ("U", "User"))
# Foreign key fields
parentId = models.ForeignKey("Skill", on_delete=models.CASCADE, null=True)
# Text fields
id = models.CharField(primary_key=True, max_length=30)
name = models.CharField(max_length=30)
icon_HREF = models.URLField(max_length=200)
node_type = models.CharField(max_length=40, choices=node_type_choices, default="C")
def __str__(self):
return self.name
"""
DesiredSkills list will contain all skills that a user is either currently proficient in or
desires to be proficient in
There will be different degrees of efficiency on a 0 to 5 scale
Each desired skill will have a set of experiences with it that can be nullable
"""
class DesiredSkill(models.Model):
proficiency_choices = [
(0, "Aiming to Learn"),
(1, "Some Understanding"),
(2, "Some Proficiency"),
(3, "Capable"),
(4, "Able to Use Professionally"),
(5, "Expert"),
]
# Foreign Key Fields
user_id = models.ForeignKey("Profile", on_delete=models.CASCADE)
skill = models.ForeignKey("Skill", on_delete=models.CASCADE)
# User Input Fields
proficiency = models.FloatField(choices=proficiency_choices, default=0)
description = models.TextField(max_length=1000)
def __str__(self):
return self.skill.__str__()
class Experience(models.Model):
# technologies = models.ManyToManyField("Technology")
EXPERIENCE_TYPE = (
('E', 'Exploration'),
('P', 'Project'),
('L', 'Learning'),
('H', 'Hackathon'),
('Ev', 'Event'),
)
# Foreign Key Fields
profile = models.ForeignKey("Profile", on_delete=models.CASCADE, null=True)
skills = models.ManyToManyField(DesiredSkill)
# Text Fields
name = models.CharField(max_length=200)
kind = models.CharField(max_length=40, choices=EXPERIENCE_TYPE, default="E" )
description = models.TextField(null=True, blank=True)
# Other Fields
likes_amount = models.IntegerField(default=0)
start_date = models.DateField(null=True, blank=True) #let these be allowed to be null for now until the widget is setup for date input sumbission
end_date = models.DateField(null=True, blank=True) #let these be allowed to be null for now until the widget is setup for date input sumbission
project_link = models.URLField(max_length=2000, null=True, blank=True)
image = models.ImageField(upload_to='images/', blank=True)
def __str__(self):
return self.name
| 40.694444
| 147
| 0.721502
|
465128237c532d09d7eda02e6715109147a6013c
| 252
|
py
|
Python
|
code_all/day16/student_system/main.py
|
testcg/python
|
4db4bd5d0e44af807d2df80cf8c8980b40cc03c4
|
[
"MIT"
] | null | null | null |
code_all/day16/student_system/main.py
|
testcg/python
|
4db4bd5d0e44af807d2df80cf8c8980b40cc03c4
|
[
"MIT"
] | null | null | null |
code_all/day16/student_system/main.py
|
testcg/python
|
4db4bd5d0e44af807d2df80cf8c8980b40cc03c4
|
[
"MIT"
] | null | null | null |
from bll import StudentController
from usl import StudentView
# 如果当前是主模块,才执行入口逻辑
if __name__ == '__main__':
try:
controller = StudentController()
view = StudentView(controller)
view.main()
except:
print("程序出错")
| 21
| 40
| 0.654762
|
c18dccb908c2874c766331541dd8a0693ded4144
| 12,518
|
py
|
Python
|
classification/imaterialist_challenge_furniture_2018/models/inceptionresnetv2_ssd_like.py
|
vfdev-5/ignite-examples
|
fb15b59e2b159e1e2bc4628f8756055e9154f5c8
|
[
"MIT"
] | 11
|
2018-04-07T17:49:58.000Z
|
2022-03-15T07:18:18.000Z
|
classification/imaterialist_challenge_furniture_2018/models/inceptionresnetv2_ssd_like.py
|
vfdev-5/ignite-examples
|
fb15b59e2b159e1e2bc4628f8756055e9154f5c8
|
[
"MIT"
] | null | null | null |
classification/imaterialist_challenge_furniture_2018/models/inceptionresnetv2_ssd_like.py
|
vfdev-5/ignite-examples
|
fb15b59e2b159e1e2bc4628f8756055e9154f5c8
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.nn import Module, Linear, ModuleList, AdaptiveAvgPool2d, ReLU, Dropout
from torch.nn.init import normal_, constant_
from pretrainedmodels.models.inceptionresnetv2 import inceptionresnetv2
class FurnitureInceptionResNetV4350SSDLike(nn.Module):
def __init__(self, num_classes, pretrained='imagenet'):
super(FurnitureInceptionResNetV4350SSDLike, self).__init__()
self.extractor = Extractor350(pretrained=pretrained)
self.num_classes = num_classes
self.num_anchors = (1, 1, 1, 1)
self.in_channels = self.extractor.channels
self.cls_layers = nn.ModuleList()
for i in range(len(self.in_channels)):
self.cls_layers += [
nn.Conv2d(self.in_channels[i], self.num_anchors[i] * self.num_classes,
kernel_size=3, padding=1)
]
n_boxes = sum([i ** 2 for i in self.extractor.featuremap_sizes])
self.boxes_to_classes = []
for i in range(num_classes):
self.boxes_to_classes.append(nn.Linear(n_boxes, 1))
self.boxes_to_classes = nn.ModuleList(self.boxes_to_classes)
self.relu = nn.ReLU(inplace=True)
self.drop = nn.Dropout(p=0.4)
self.final_classifier = nn.Linear(num_classes, num_classes)
def forward(self, x):
cls_preds = []
xs = self.extractor(x)
for i, x in enumerate(xs):
cls_pred = self.cls_layers[i](x)
cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous()
cls_preds.append(cls_pred.view(cls_pred.size(0), -1, self.num_classes))
cls_preds = torch.cat(cls_preds, 1)
merged_cls_preds = []
for i, m in enumerate(self.boxes_to_classes):
merged_cls_preds.append(m(cls_preds[:, :, i]))
merged_cls_preds = torch.cat(merged_cls_preds, 1)
out = self.relu(merged_cls_preds)
out = self.drop(out)
out = self.final_classifier(out)
return out
class Extractor350(nn.Module):
featuremap_sizes = (20, 9, 1)
channels = (256, 320, 256)
def __init__(self, pretrained):
super(Extractor350, self).__init__()
model = inceptionresnetv2(pretrained=pretrained)
self.stem = nn.Sequential(
model.conv2d_1a,
model.conv2d_2a,
model.conv2d_2b,
model.maxpool_3a,
model.conv2d_3b,
model.conv2d_4a,
model.maxpool_5a,
)
self.low_features_a = nn.Sequential(
model.mixed_5b,
model.repeat,
)
self.low_features_b = nn.Sequential(
model.mixed_6a,
model.repeat_1
)
self.mid_features = nn.Sequential(
model.mixed_7a,
model.repeat_2,
model.block8
)
self.top_features = nn.Sequential(
model.conv2d_7b,
model.avgpool_1a,
)
self.smooth2 = nn.Conv2d(1088, 256, kernel_size=3, stride=1, padding=1)
self.smooth3 = nn.Conv2d(2080, 320, kernel_size=3, stride=1, padding=1)
self.top_smooth = nn.Sequential(
nn.Conv2d(1536, 256, kernel_size=1, stride=1, padding=0),
nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)
)
# aliases
self.smooth_layers = nn.ModuleList([
self.smooth2,
self.smooth3,
self.top_smooth,
])
def forward(self, x):
out = []
x = self.stem(x)
x = self.low_features_a(x)
x = self.low_features_b(x)
out.append(self.smooth2(x))
x = self.mid_features(x)
out.append(self.smooth3(x))
x = self.top_features(x)
out.append(self.top_smooth(x))
return out
class FurnitureInceptionResNetV4350SSDLike_v2(nn.Module):
def __init__(self, num_classes, pretrained='imagenet'):
super(FurnitureInceptionResNetV4350SSDLike_v2, self).__init__()
self.extractor = Extractor350_v2(pretrained=pretrained)
self.num_classes = num_classes
self.num_anchors = (1, 1, 1)
self.in_channels = self.extractor.channels
self.cls_layers = nn.ModuleList()
for i in range(len(self.in_channels)):
self.cls_layers += [
nn.Conv2d(self.in_channels[i], self.num_anchors[i] * self.num_classes,
kernel_size=3, padding=1),
nn.Sigmoid()
]
n_levels = len(self.extractor.featuremap_sizes)
self.boxes_to_classes = []
for i in range(num_classes):
self.boxes_to_classes.append(nn.Linear(n_levels, 1))
self.boxes_to_classes = nn.ModuleList(self.boxes_to_classes)
self.inner_classifier = nn.Linear(n_levels * num_classes, num_classes)
self.relu = nn.ReLU()
self.drop = nn.Dropout(p=0.4)
self.final_classifier = nn.Linear(2 * num_classes, num_classes)
def forward(self, x):
cls_preds = []
xs = self.extractor(x)
# Transform output feature maps to bbox predictions
for i, x in enumerate(xs):
cls_pred = self.cls_layers[i](x)
cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous()
cls_pred = cls_pred.view(cls_pred.size(0), -1, self.num_classes)
# Sum all predictions of all boxes at single level
cls_pred = torch.sum(cls_pred, dim=1).unsqueeze(1)
cls_preds.append(cls_pred)
# Two ways to aggregate
# A) Predictions from each bbox level are transformed with FC to a single probability
# for each target class
cls_preds_a = torch.cat(cls_preds, dim=1)
merged_cls_preds = []
for i, m in enumerate(self.boxes_to_classes):
merged_cls_preds.append(m(cls_preds_a[:, :, i]))
merged_cls_preds = torch.cat(merged_cls_preds, 1)
out_a = self.relu(merged_cls_preds)
# B) Predictions from each bbox level are transformed with FC to a vector of output probabilities
cls_preds_b = torch.cat(cls_preds, dim=2).squeeze(1)
out_b = self.inner_classifier(cls_preds_b)
out_b = self.relu(out_b)
# Aggregate results:
out = torch.cat([out_a, out_b], dim=1)
out = self.drop(out)
out = self.final_classifier(out)
return out
class Extractor350_v2(nn.Module):
featuremap_sizes = (20, 9, 1)
channels = (256, 256, 256)
def __init__(self, pretrained):
super(Extractor350_v2, self).__init__()
model = inceptionresnetv2(pretrained=pretrained)
self.stem = nn.Sequential(
model.conv2d_1a,
model.conv2d_2a,
model.conv2d_2b,
model.maxpool_3a,
model.conv2d_3b,
model.conv2d_4a,
model.maxpool_5a,
)
self.low_features_a = nn.Sequential(
model.mixed_5b,
model.repeat,
)
self.low_features_b = nn.Sequential(
model.mixed_6a,
model.repeat_1
)
self.mid_features = nn.Sequential(
model.mixed_7a,
model.repeat_2,
model.block8
)
self.top_features = nn.Sequential(
model.conv2d_7b,
model.avgpool_1a,
)
self.smooth2 = nn.Sequential(
nn.Conv2d(1088, 256, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.smooth3 = nn.Sequential(
nn.Conv2d(2080, 320, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(320, 256, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.top_smooth = nn.Sequential(
nn.Conv2d(1536, 256, kernel_size=1, stride=1, padding=0),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0),
nn.ReLU()
)
# aliases
self.smooth_layers = nn.ModuleList([
self.smooth2,
self.smooth3,
self.top_smooth,
])
def forward(self, x):
out = []
x = self.stem(x)
x = self.low_features_a(x)
x = self.low_features_b(x)
out.append(self.smooth2(x))
x = self.mid_features(x)
out.append(self.smooth3(x))
x = self.top_features(x)
out.append(self.top_smooth(x))
return out
class FurnitureInceptionResNetV4350SSDLike_v3(nn.Module):
def __init__(self, num_classes, pretrained='imagenet'):
super(FurnitureInceptionResNetV4350SSDLike_v3, self).__init__()
self.extractor = Extractor350_v3(pretrained=pretrained)
self.num_classes = num_classes
self.num_anchors = (1, 1, 1)
self.in_channels = self.extractor.channels
self.cls_layers = nn.ModuleList()
for i in range(len(self.in_channels)):
self.cls_layers += [
nn.Conv2d(self.in_channels[i], self.num_anchors[i] * self.num_classes,
kernel_size=3, padding=1),
]
n_levels = len(self.extractor.featuremap_sizes)
self.boxes_to_classes = []
for i in range(num_classes):
self.boxes_to_classes.append(nn.Linear(n_levels, 1))
self.boxes_to_classes = nn.ModuleList(self.boxes_to_classes)
self.relu = nn.ReLU()
self.drop = nn.Dropout(p=0.4)
self.final_classifier = nn.Linear(num_classes, num_classes)
def forward(self, x):
cls_preds = []
xs = self.extractor(x)
# Transform output feature maps to bbox predictions
for i, x in enumerate(xs):
cls_pred = self.cls_layers[i](x)
cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous()
cls_pred = cls_pred.view(cls_pred.size(0), -1, self.num_classes)
# Sum all predictions of all boxes at single level
cls_pred = torch.sum(cls_pred, dim=1).unsqueeze(1)
cls_preds.append(cls_pred)
cls_preds = torch.cat(cls_preds, dim=1)
merged_cls_preds = []
for i, m in enumerate(self.boxes_to_classes):
merged_cls_preds.append(m(cls_preds[:, :, i]))
merged_cls_preds = torch.cat(merged_cls_preds, 1)
out = self.relu(merged_cls_preds)
out = self.drop(out)
out = self.final_classifier(out)
return out
class Extractor350_v3(nn.Module):
featuremap_sizes = (20, 9, 1)
channels = (256, 320, 256)
def __init__(self, pretrained):
super(Extractor350_v3, self).__init__()
model = inceptionresnetv2(pretrained=pretrained)
self.stem = nn.Sequential(
model.conv2d_1a,
model.conv2d_2a,
model.conv2d_2b,
model.maxpool_3a,
model.conv2d_3b,
model.conv2d_4a,
model.maxpool_5a,
)
self.low_features_a = nn.Sequential(
model.mixed_5b,
model.repeat,
)
self.low_features_b = nn.Sequential(
model.mixed_6a,
model.repeat_1
)
self.mid_features = nn.Sequential(
model.mixed_7a,
model.repeat_2,
model.block8
)
self.top_features = nn.Sequential(
model.conv2d_7b,
nn.ReLU(),
model.avgpool_1a,
)
self.smooth2 = nn.Sequential(
nn.Conv2d(1088, 256, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
)
self.smooth3 = nn.Sequential(
nn.Conv2d(2080, 320, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
)
self.top_smooth = nn.Sequential(
nn.Conv2d(1536, 256, kernel_size=1, stride=1, padding=0),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0),
nn.ReLU()
)
# aliases
self.smooth_layers = nn.ModuleList([
self.smooth2,
self.smooth3,
self.top_smooth,
])
def forward(self, x):
out = []
x = self.stem(x)
x = self.low_features_a(x)
x = self.low_features_b(x)
out.append(self.smooth2(x))
x = self.mid_features(x)
out.append(self.smooth3(x))
x = self.top_features(x)
out.append(self.top_smooth(x))
return out
| 30.985149
| 105
| 0.581802
|
3f36cd1d051526c362e038da1090288b1b754e97
| 1,890
|
py
|
Python
|
tests/causal_estimators/test_linear_regression_estimator.py
|
leo-ware/dowhy
|
3a2a79e2159a7f29456dd419a3c90395a384364e
|
[
"MIT"
] | null | null | null |
tests/causal_estimators/test_linear_regression_estimator.py
|
leo-ware/dowhy
|
3a2a79e2159a7f29456dd419a3c90395a384364e
|
[
"MIT"
] | null | null | null |
tests/causal_estimators/test_linear_regression_estimator.py
|
leo-ware/dowhy
|
3a2a79e2159a7f29456dd419a3c90395a384364e
|
[
"MIT"
] | null | null | null |
import pytest
from dowhy.causal_estimators.linear_regression_estimator import LinearRegressionEstimator
from .base import TestEstimator
@pytest.mark.usefixtures("fixed_seed")
class TestLinearRegressionEstimator(object):
@pytest.mark.parametrize(
[
"error_tolerance", "Estimator", "num_common_causes", "num_instruments",
"num_effect_modifiers", "num_treatments", "treatment_is_binary",
"treatment_is_category", "outcome_is_binary"
],
[
(0.1, LinearRegressionEstimator, [0,1], [0,1], [0,], [1, 2],
[True,], [False,], [False,]),
(0.1, LinearRegressionEstimator, [0,1], [0,1], [0,], [1, 2],
[False,], [True,], [False,]),
(0.1, LinearRegressionEstimator, [0,1], [0,1], [0,], [1, 2],
[False,], [False,], [False,]),
]
)
def test_average_treatment_effect(self, error_tolerance, Estimator,
num_common_causes, num_instruments, num_effect_modifiers,
num_treatments, treatment_is_binary, treatment_is_category,
outcome_is_binary
):
estimator_tester = TestEstimator(error_tolerance, Estimator)
estimator_tester.average_treatment_effect_testsuite(
num_common_causes=num_common_causes,
num_instruments = num_instruments,
num_effect_modifiers = num_effect_modifiers,
num_treatments=num_treatments,
treatment_is_binary=treatment_is_binary,
treatment_is_category=treatment_is_category,
outcome_is_binary=outcome_is_binary,
confidence_intervals=[True,],
test_significance=[True,],
method_params={
'num_ci_simulations': 10,
'num_null_simulations': 10
}
)
| 42
| 89
| 0.602646
|
27621b7222946c3a8da985800cb9df45d7463200
| 2,445
|
py
|
Python
|
project-obj/project_obj.py
|
HakkaTjakka/earth-reverse-engineering_github
|
6c52e69fcb33c5c06f634db874785d2454fa32a6
|
[
"Unlicense"
] | null | null | null |
project-obj/project_obj.py
|
HakkaTjakka/earth-reverse-engineering_github
|
6c52e69fcb33c5c06f634db874785d2454fa32a6
|
[
"Unlicense"
] | null | null | null |
project-obj/project_obj.py
|
HakkaTjakka/earth-reverse-engineering_github
|
6c52e69fcb33c5c06f634db874785d2454fa32a6
|
[
"Unlicense"
] | null | null | null |
import numpy as np
import sys
from projection import projection
EARTH_RADIUS = 6371000
if len(sys.argv)==3:
# print (str(len(sys.argv)))
print ('converting ' + sys.argv[1] + ' to ' + sys.argv[2])
with open(sys.argv[1]) as fd:
lines = fd.read().splitlines()
elif len(sys.argv)==1:
with open("in.obj") as fd:
lines = fd.read().splitlines()
#lines = np.array(lines, dtype=np.str)
lines = np.array(lines, dtype=str)
#lines = lines[np.logical_not(np.char.startswith(lines, "vn "))] # delete vertex normals
offset_x=3899275.0
offset_y=348997.0
offset_z=5026376.0
#v 3268023.6848134077 27.84330720361322 -5319793.639094348
# extract vertices
idx = np.where(np.char.startswith(lines, "v "))
v = lines[idx]
v = np.char.split(v, " ")
v = np.array(list(v))[:, 1:].astype(float)
o = v
v[:, 0]+=offset_x
v[:, 1]+=offset_y
v[:, 2]+=offset_z
# convert to lat/lon/ele
rad = np.linalg.norm(v, axis=1)[None, :]
lat = np.arcsin(v[:, 2]/rad)*180/np.pi
lon = (np.arctan2(v[:, 1], v[:, 0])*180/np.pi)[None, :]
rad -= EARTH_RADIUS # TODO: find the correct way to get elevation (this is bad but ellipsoid was worse)
v = np.array([lat, lon, rad]).transpose()[:, 0]
# pick the first point, and use it as the origin to find the local transformation matrix
old_origin = v[0, :2]
new_origin = np.array(projection.fromGeo(old_origin[1], old_origin[0]))
i = np.array(projection.fromGeo(old_origin[1], old_origin[0] + 0.01)) - new_origin
j = np.array(projection.fromGeo(old_origin[1] + 0.01, old_origin[0])) - new_origin
basis = 100*np.array((i, j))
# apply the transformation to every lat,lon in the array
v[:, :2] -= old_origin
v[:, :2] = np.einsum("ij,ni->nj", basis, v[:, :2])
v[:, :2] += new_origin
# swap y and z because minecraft is sideways
v[:, 2], v[:, 1] = v[:, 1].copy(), v[:, 2].copy()
o[:, 2]=v[:, 1]
o_out = []
for i in range(len(o)):
o_out.append("v {} {} {}".format(o[i, 0]-offset_x, o[i, 1]-offset_y, o[i, 2]))
o_out = np.array(o_out, dtype=str)
lines[idx] = o_out
if len(sys.argv)==3:
outfile=sys.argv[2]
elif len(sys.argv)==1:
outfile='out.obj'
#with open("out2.obj", "w") as fd:
with open(outfile, "w") as fd:
fd.write("\n".join(lines))
## convert to string
#v_out = []
#for i in range(len(v)):
# v_out.append("v {} {} {}".format(v[i, 0], v[i, 1], v[i, 2]))
#v_out = np.array(v_out, dtype=str)
#
#lines[idx] = v_out
#with open("out.obj", "w") as fd:
# fd.write("\n".join(lines))
| 26.576087
| 104
| 0.631084
|
dc334b8d5d354ff6b9bed9e3e1b0df4380def93c
| 2,872
|
py
|
Python
|
tests/taint_runner_test.py
|
kyriediculous/mythril
|
0c63d749fbb60f679e91fb6a45baa4bd232db608
|
[
"MIT"
] | 1
|
2020-08-07T01:16:48.000Z
|
2020-08-07T01:16:48.000Z
|
tests/taint_runner_test.py
|
reserve-protocol/mythril
|
5d2de5d744243254cee2a2b8c5cb83ed060e312e
|
[
"MIT"
] | null | null | null |
tests/taint_runner_test.py
|
reserve-protocol/mythril
|
5d2de5d744243254cee2a2b8c5cb83ed060e312e
|
[
"MIT"
] | 1
|
2018-08-12T17:28:34.000Z
|
2018-08-12T17:28:34.000Z
|
import mock
import pytest
from pytest_mock import mocker
from mythril.laser.ethereum.taint_analysis import *
from mythril.laser.ethereum.svm import GlobalState, Node, Edge, LaserEVM, MachineState
def test_execute_state(mocker):
record = TaintRecord()
record.stack = [True, False, True]
state = GlobalState(None, None)
state.mstate.stack = [1, 2, 3]
mocker.patch.object(state, 'get_current_instruction')
state.get_current_instruction.return_value = {"opcode": "ADD"}
# Act
new_record = TaintRunner.execute_state(record, state)
# Assert
assert new_record.stack == [True, True]
assert record.stack == [True, False, True]
def test_execute_node(mocker):
record = TaintRecord()
record.stack = [True, True, False, False]
state_1 = GlobalState(None, None)
state_1.mstate.stack = [1, 2, 3]
state_1.mstate.pc = 1
mocker.patch.object(state_1, 'get_current_instruction')
state_1.get_current_instruction.return_value = {"opcode": "SWAP1"}
state_2 = GlobalState(None, 1)
state_2.mstate.stack = [1, 2, 4, 1]
mocker.patch.object(state_2, 'get_current_instruction')
state_2.get_current_instruction.return_value = {"opcode": "ADD"}
node = Node("Test contract")
node.states = [state_1, state_2]
# Act
records = TaintRunner.execute_node(node, record)
# Assert
assert len(records) == 2
assert records[0].stack == [True, True, False, False]
assert records[1].stack == [True, True, False]
assert state_2 in records[0].states
assert state_1 in record.states
def test_execute(mocker):
state_1 = GlobalState(None, None, MachineState(gas=10000000))
state_1.mstate.stack = [1, 2]
mocker.patch.object(state_1, 'get_current_instruction')
state_1.get_current_instruction.return_value = {"opcode": "PUSH"}
state_2 = GlobalState(None, None, MachineState(gas=10000000))
state_2.mstate.stack = [1, 2, 3]
mocker.patch.object(state_2, 'get_current_instruction')
state_2.get_current_instruction.return_value = {"opcode": "ADD"}
node_1 = Node("Test contract")
node_1.states = [state_1, state_2]
state_3 = GlobalState(None, None, MachineState(gas=10000000))
state_3.mstate.stack = [1, 2]
mocker.patch.object(state_3, 'get_current_instruction')
state_3.get_current_instruction.return_value = {"opcode": "ADD"}
node_2 = Node("Test contract")
node_2.states = [state_3]
edge = Edge(node_1.uid, node_2.uid)
statespace = LaserEVM(None)
statespace.edges = [edge]
statespace.nodes[node_1.uid] = node_1
statespace.nodes[node_2.uid] = node_2
# Act
result = TaintRunner.execute(statespace, node_1, state_1, [True, True])
# Assert
print(result)
assert len(result.records) == 3
assert result.records[2].states == []
assert state_3 in result.records[1].states
| 30.231579
| 86
| 0.694638
|
1b4d5c752e1bc303e7b313bad36a3936ab9353f7
| 2,571
|
py
|
Python
|
app/models/users.py
|
geekspeng/51-read
|
31d9697b50f57e66319870f403717f95a42cdc57
|
[
"MIT"
] | 6
|
2018-10-02T13:01:28.000Z
|
2021-11-21T10:05:06.000Z
|
app/models/users.py
|
geekspeng/51-read
|
31d9697b50f57e66319870f403717f95a42cdc57
|
[
"MIT"
] | 2
|
2020-03-16T14:06:27.000Z
|
2020-12-12T04:08:43.000Z
|
app/models/users.py
|
geekspeng/51-read
|
31d9697b50f57e66319870f403717f95a42cdc57
|
[
"MIT"
] | 3
|
2021-06-28T13:27:08.000Z
|
2021-11-21T10:06:29.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2018/8/30 15:50
# @Author : geekspeng
# @Email : geekspeng@icloud.com
from datetime import datetime
from hashlib import md5
from time import time
import jwt
from flask import current_app
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from app import db, login_manager
class Users(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), index=True, unique=True)
kindle_email = db.Column(db.String(120), default="")
password_hash = db.Column(db.String(128))
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
confirmed = db.Column(db.Boolean(), default=False)
def __repr__(self):
return '<User %r>' % self.email
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def avatar(self, size=20):
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(
digest, size)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
db.session.commit()
def generate_confirmation_token(self, expires_in=600):
return jwt.encode(
{'confirmation': self.id, 'exp': time() + expires_in},
current_app.config['SECRET_KEY'],
algorithm='HS256').decode('utf-8')
@staticmethod
def check_confirmation_token(token):
try:
id = jwt.decode(token, current_app.config['SECRET_KEY'],
algorithms=['HS256'])['confirmation']
except Exception:
return None
return Users.query.get(id)
def generate_reset_password_token(self, expires_in=600):
return jwt.encode(
{'reset_password': self.id, 'exp': time() + expires_in},
current_app.config['SECRET_KEY'],
algorithm='HS256').decode('utf-8')
@staticmethod
def check_reset_password_token(token):
try:
id = jwt.decode(token, current_app.config['SECRET_KEY'],
algorithms=['HS256'])['reset_password']
except:
return None
return Users.query.get(id)
@login_manager.user_loader
def load_user(id):
return Users.query.get(int(id))
| 32.544304
| 76
| 0.644107
|
e4e73929c8ce3b10c26b6bd2bd78112e54f10ee5
| 40,212
|
py
|
Python
|
vyper/parser/expr.py
|
bumplzz69/vyper
|
ac52a4a4ad02f09a20facf0b31a85c0e8e72001b
|
[
"MIT"
] | null | null | null |
vyper/parser/expr.py
|
bumplzz69/vyper
|
ac52a4a4ad02f09a20facf0b31a85c0e8e72001b
|
[
"MIT"
] | null | null | null |
vyper/parser/expr.py
|
bumplzz69/vyper
|
ac52a4a4ad02f09a20facf0b31a85c0e8e72001b
|
[
"MIT"
] | 3
|
2021-04-05T15:25:53.000Z
|
2021-04-05T15:26:13.000Z
|
import ast
import warnings
from vyper.exceptions import (
InvalidLiteralException,
NonPayableViolationException,
StructureException,
TypeMismatchException,
VariableDeclarationException,
ParserException
)
from vyper.parser.lll_node import LLLnode
from vyper.parser import self_call
from vyper.parser import external_call
from vyper.parser.parser_utils import (
getpos,
unwrap_location,
get_original_if_0_prefixed,
get_number_as_fraction,
add_variable_offset,
)
from vyper.utils import (
MemoryPositions,
SizeLimits,
bytes_to_int,
string_to_bytes,
DECIMAL_DIVISOR,
checksum_encode,
is_varname_valid,
)
from vyper.types import (
BaseType,
ByteArrayType,
ContractType,
ListType,
MappingType,
NullType,
StructType,
TupleType,
)
from vyper.types import (
is_base_type,
)
from vyper.types import (
are_units_compatible,
is_numeric_type,
combine_units
)
class Expr(object):
# TODO: Once other refactors are made reevaluate all inline imports
def __init__(self, expr, context):
self.expr = expr
self.context = context
self.expr_table = {
LLLnode: self.get_expr,
ast.Num: self.number,
ast.Str: self.string,
ast.NameConstant: self.constants,
ast.Name: self.variables,
ast.Attribute: self.attribute,
ast.Subscript: self.subscript,
ast.BinOp: self.arithmetic,
ast.Compare: self.compare,
ast.BoolOp: self.boolean_operations,
ast.UnaryOp: self.unary_operations,
ast.Call: self.call,
ast.List: self.list_literals,
ast.Tuple: self.tuple_literals,
ast.Dict: self.dict_fail,
}
expr_type = self.expr.__class__
if expr_type in self.expr_table:
self.lll_node = self.expr_table[expr_type]()
else:
raise Exception("Unsupported operator: %r" % ast.dump(self.expr))
def get_expr(self):
return self.expr
def number(self):
orignum = get_original_if_0_prefixed(self.expr, self.context)
if orignum is None and isinstance(self.expr.n, int):
# Literal (mostly likely) becomes int128
if SizeLimits.in_bounds('int128', self.expr.n) or self.expr.n < 0:
return LLLnode.from_list(self.expr.n, typ=BaseType('int128', unit=None, is_literal=True), pos=getpos(self.expr))
# Literal is large enough (mostly likely) becomes uint256.
else:
return LLLnode.from_list(self.expr.n, typ=BaseType('uint256', unit=None, is_literal=True), pos=getpos(self.expr))
elif isinstance(self.expr.n, float):
numstring, num, den = get_number_as_fraction(self.expr, self.context)
# if not SizeLimits.in_bounds('decimal', num // den):
# if not SizeLimits.MINDECIMAL * den <= num <= SizeLimits.MAXDECIMAL * den:
if not (SizeLimits.MINNUM * den < num < SizeLimits.MAXNUM * den):
raise InvalidLiteralException("Number out of range: " + numstring, self.expr)
if DECIMAL_DIVISOR % den:
raise InvalidLiteralException("Too many decimal places: " + numstring, self.expr)
return LLLnode.from_list(num * DECIMAL_DIVISOR // den, typ=BaseType('decimal', unit=None), pos=getpos(self.expr))
# Binary literal.
elif orignum[:2] == '0b':
str_val = orignum[2:]
total_bits = len(orignum[2:])
total_bits = total_bits if total_bits % 8 == 0 else total_bits + 8 - (total_bits % 8) # ceil8 to get byte length.
if len(orignum[2:]) != total_bits: # Support only full formed bit definitions.
raise InvalidLiteralException("Bit notation requires a multiple of 8 bits / 1 byte. {} bit(s) are missing.".format(total_bits - len(orignum[2:])), self.expr)
byte_len = int(total_bits / 8)
placeholder = self.context.new_placeholder(ByteArrayType(byte_len))
seq = []
seq.append(['mstore', placeholder, byte_len])
for i in range(0, total_bits, 256):
section = str_val[i:i + 256]
int_val = int(section, 2) << (256 - len(section)) # bytes are right padded.
seq.append(
['mstore', ['add', placeholder, i + 32], int_val])
return LLLnode.from_list(['seq'] + seq + [placeholder],
typ=ByteArrayType(byte_len), location='memory', pos=getpos(self.expr), annotation='Create ByteArray (Binary literal): %s' % str_val)
elif len(orignum) == 42:
if checksum_encode(orignum) != orignum:
raise InvalidLiteralException("""Address checksum mismatch. If you are sure this is the
right address, the correct checksummed form is: %s""" % checksum_encode(orignum), self.expr)
return LLLnode.from_list(self.expr.n, typ=BaseType('address', is_literal=True), pos=getpos(self.expr))
elif len(orignum) == 66:
return LLLnode.from_list(self.expr.n, typ=BaseType('bytes32', is_literal=True), pos=getpos(self.expr))
else:
raise InvalidLiteralException("Cannot read 0x value with length %d. Expecting 42 (address incl 0x) or 66 (bytes32 incl 0x)"
% len(orignum), self.expr)
# Byte array literals
def string(self):
bytez, bytez_length = string_to_bytes(self.expr.s)
placeholder = self.context.new_placeholder(ByteArrayType(bytez_length))
seq = []
seq.append(['mstore', placeholder, bytez_length])
for i in range(0, len(bytez), 32):
seq.append(['mstore', ['add', placeholder, i + 32], bytes_to_int((bytez + b'\x00' * 31)[i: i + 32])])
return LLLnode.from_list(['seq'] + seq + [placeholder],
typ=ByteArrayType(bytez_length), location='memory', pos=getpos(self.expr), annotation='Create ByteArray: %s' % bytez)
# True, False, None constants
def constants(self):
if self.expr.value is True:
return LLLnode.from_list(1, typ=BaseType('bool', is_literal=True), pos=getpos(self.expr))
elif self.expr.value is False:
return LLLnode.from_list(0, typ=BaseType('bool', is_literal=True), pos=getpos(self.expr))
elif self.expr.value is None:
return LLLnode.from_list(None, typ=NullType(), pos=getpos(self.expr))
else:
raise Exception("Unknown name constant: %r" % self.expr.value.value)
# Variable names
def variables(self):
builtin_constants = {
'EMPTY_BYTES32': LLLnode.from_list(
[0],
typ=BaseType('bytes32', None, is_literal=True),
pos=getpos(self.expr)
),
'ZERO_ADDRESS': LLLnode.from_list(
[0],
typ=BaseType('address', None, is_literal=True),
pos=getpos(self.expr)
),
'MAX_INT128': LLLnode.from_list(
[SizeLimits.MAXNUM],
typ=BaseType('int128', None, is_literal=True),
pos=getpos(self.expr)
),
'MIN_INT128': LLLnode.from_list(
[SizeLimits.MINNUM],
typ=BaseType('int128', None, is_literal=True),
pos=getpos(self.expr)
),
'MAX_DECIMAL': LLLnode.from_list(
[SizeLimits.MAXDECIMAL],
typ=BaseType('decimal', None, is_literal=True),
pos=getpos(self.expr)
),
'MIN_DECIMAL': LLLnode.from_list(
[SizeLimits.MINDECIMAL],
typ=BaseType('decimal', None, is_literal=True),
pos=getpos(self.expr)
),
'MAX_UINT256': LLLnode.from_list(
[SizeLimits.MAX_UINT256],
typ=BaseType('uint256', None, is_literal=True),
pos=getpos(self.expr)
),
}
if self.expr.id == 'self':
return LLLnode.from_list(['address'], typ='address', pos=getpos(self.expr))
elif self.expr.id in self.context.vars:
var = self.context.vars[self.expr.id]
return LLLnode.from_list(var.pos, typ=var.typ, location='memory', pos=getpos(self.expr), annotation=self.expr.id, mutable=var.mutable)
elif self.expr.id in builtin_constants:
return builtin_constants[self.expr.id]
elif self.expr.id in self.context.constants:
# check if value is compatible with
const = self.context.constants[self.expr.id]
if isinstance(const, ast.AnnAssign): # Handle ByteArrays.
expr = Expr(const.value, self.context).lll_node
return expr
# Other types are already unwrapped, no need
return self.context.constants[self.expr.id]
else:
raise VariableDeclarationException("Undeclared variable: " + self.expr.id, self.expr)
# x.y or x[5]
def attribute(self):
# x.balance: balance of address x
if self.expr.attr == 'balance':
addr = Expr.parse_value_expr(self.expr.value, self.context)
if not is_base_type(addr.typ, 'address'):
raise TypeMismatchException("Type mismatch: balance keyword expects an address as input", self.expr)
return LLLnode.from_list(['balance', addr], typ=BaseType('uint256', {'wei': 1}), location=None, pos=getpos(self.expr))
# x.codesize: codesize of address x
elif self.expr.attr == 'codesize' or self.expr.attr == 'is_contract':
addr = Expr.parse_value_expr(self.expr.value, self.context)
if not is_base_type(addr.typ, 'address'):
raise TypeMismatchException("Type mismatch: codesize keyword expects an address as input", self.expr)
if self.expr.attr == 'codesize':
eval_code = ['extcodesize', addr]
output_type = 'int128'
else:
eval_code = ['gt', ['extcodesize', addr], 0]
output_type = 'bool'
return LLLnode.from_list(eval_code, typ=BaseType(output_type), location=None, pos=getpos(self.expr))
# self.x: global attribute
elif isinstance(self.expr.value, ast.Name) and self.expr.value.id == "self":
if self.expr.attr not in self.context.globals:
raise VariableDeclarationException("Persistent variable undeclared: " + self.expr.attr, self.expr)
var = self.context.globals[self.expr.attr]
return LLLnode.from_list(var.pos, typ=var.typ, location='storage', pos=getpos(self.expr), annotation='self.' + self.expr.attr)
# Reserved keywords
elif isinstance(self.expr.value, ast.Name) and self.expr.value.id in ("msg", "block", "tx"):
key = self.expr.value.id + "." + self.expr.attr
if key == "msg.sender":
if self.context.is_private:
raise ParserException("msg.sender not allowed in private functions.", self.expr)
return LLLnode.from_list(['caller'], typ='address', pos=getpos(self.expr))
elif key == "msg.value":
if not self.context.is_payable:
raise NonPayableViolationException("Cannot use msg.value in a non-payable function", self.expr)
return LLLnode.from_list(['callvalue'], typ=BaseType('uint256', {'wei': 1}), pos=getpos(self.expr))
elif key == "msg.gas":
return LLLnode.from_list(['gas'], typ='uint256', pos=getpos(self.expr))
elif key == "block.difficulty":
return LLLnode.from_list(['difficulty'], typ='uint256', pos=getpos(self.expr))
elif key == "block.timestamp":
return LLLnode.from_list(['timestamp'], typ=BaseType('uint256', {'sec': 1}, True), pos=getpos(self.expr))
elif key == "block.coinbase":
return LLLnode.from_list(['coinbase'], typ='address', pos=getpos(self.expr))
elif key == "block.number":
return LLLnode.from_list(['number'], typ='uint256', pos=getpos(self.expr))
elif key == "block.prevhash":
return LLLnode.from_list(['blockhash', ['sub', 'number', 1]], typ='bytes32', pos=getpos(self.expr))
elif key == "tx.origin":
return LLLnode.from_list(['origin'], typ='address', pos=getpos(self.expr))
else:
raise Exception("Unsupported keyword: " + key)
# Other variables
else:
sub = Expr.parse_variable_location(self.expr.value, self.context)
# contract type
if isinstance(sub.typ, ContractType):
return sub
if not isinstance(sub.typ, StructType):
raise TypeMismatchException("Type mismatch: member variable access not expected", self.expr.value)
attrs = sorted(sub.typ.members.keys())
if self.expr.attr not in attrs:
raise TypeMismatchException("Member %s not found. Only the following available: %s" % (self.expr.attr, " ".join(attrs)), self.expr)
return add_variable_offset(sub, self.expr.attr, pos=getpos(self.expr))
def subscript(self):
sub = Expr.parse_variable_location(self.expr.value, self.context)
if isinstance(sub.typ, (MappingType, ListType)):
if 'value' not in vars(self.expr.slice):
raise StructureException("Array access must access a single element, not a slice", self.expr)
index = Expr.parse_value_expr(self.expr.slice.value, self.context)
elif isinstance(sub.typ, TupleType):
if not isinstance(self.expr.slice.value, ast.Num) or self.expr.slice.value.n < 0 or self.expr.slice.value.n >= len(sub.typ.members):
raise TypeMismatchException("Tuple index invalid", self.expr.slice.value)
index = self.expr.slice.value.n
else:
raise TypeMismatchException("Bad subscript attempt", self.expr.value)
o = add_variable_offset(sub, index, pos=getpos(self.expr))
o.mutable = sub.mutable
return o
def arithmetic_get_reference(self, item):
item_lll = Expr.parse_value_expr(item, self.context)
if isinstance(item, ast.Call):
# We only want to perform call statements once.
placeholder = self.context.new_placeholder(item_lll.typ)
pre_alloc = ['mstore', placeholder, item_lll]
return pre_alloc, LLLnode.from_list(['mload', placeholder], location='memory', typ=item_lll.typ)
else:
return None, item_lll
def arithmetic(self):
pre_alloc_left, left = self.arithmetic_get_reference(self.expr.left)
pre_alloc_right, right = self.arithmetic_get_reference(self.expr.right)
if not is_numeric_type(left.typ) or not is_numeric_type(right.typ):
raise TypeMismatchException("Unsupported types for arithmetic op: %r %r" % (left.typ, right.typ), self.expr)
arithmetic_pair = {left.typ.typ, right.typ.typ}
# Special Case: Simplify any literal to literal arithmetic at compile time.
if left.typ.is_literal and right.typ.is_literal and \
isinstance(right.value, int) and isinstance(left.value, int):
if isinstance(self.expr.op, ast.Add):
val = left.value + right.value
elif isinstance(self.expr.op, ast.Sub):
val = left.value - right.value
elif isinstance(self.expr.op, ast.Mult):
val = left.value * right.value
elif isinstance(self.expr.op, ast.Div):
val = left.value // right.value
elif isinstance(self.expr.op, ast.Mod):
val = left.value % right.value
elif isinstance(self.expr.op, ast.Pow):
val = left.value ** right.value
else:
raise ParserException('Unsupported literal operator: %s' % str(type(self.expr.op)), self.expr)
num = ast.Num(val)
num.source_code = self.expr.source_code
num.lineno = self.expr.lineno
num.col_offset = self.expr.col_offset
return Expr.parse_value_expr(num, self.context)
# Special case with uint256 were int literal may be casted.
if arithmetic_pair == {'uint256', 'int128'}:
# Check right side literal.
if right.typ.is_literal and SizeLimits.in_bounds('uint256', right.value):
right = LLLnode.from_list(right.value, typ=BaseType('uint256', None, is_literal=True), pos=getpos(self.expr))
arithmetic_pair = {left.typ.typ, right.typ.typ}
# Check left side literal.
elif left.typ.is_literal and SizeLimits.in_bounds('uint256', left.value):
left = LLLnode.from_list(left.value, typ=BaseType('uint256', None, is_literal=True), pos=getpos(self.expr))
arithmetic_pair = {left.typ.typ, right.typ.typ}
# Only allow explicit conversions to occur.
if left.typ.typ != right.typ.typ:
raise TypeMismatchException("Cannot implicitly convert {} to {}.".format(left.typ.typ, right.typ.typ), self.expr)
ltyp, rtyp = left.typ.typ, right.typ.typ
if isinstance(self.expr.op, (ast.Add, ast.Sub)):
if left.typ.unit != right.typ.unit and left.typ.unit is not None and right.typ.unit is not None:
raise TypeMismatchException("Unit mismatch: %r %r" % (left.typ.unit, right.typ.unit), self.expr)
if left.typ.positional and right.typ.positional and isinstance(self.expr.op, ast.Add):
raise TypeMismatchException("Cannot add two positional units!", self.expr)
new_unit = left.typ.unit or right.typ.unit
new_positional = left.typ.positional ^ right.typ.positional # xor, as subtracting two positionals gives a delta
op = 'add' if isinstance(self.expr.op, ast.Add) else 'sub'
if ltyp == 'uint256' and isinstance(self.expr.op, ast.Add):
o = LLLnode.from_list(['seq',
# Checks that: a + b >= a
['assert', ['ge', ['add', left, right], left]],
['add', left, right]], typ=BaseType('uint256', new_unit, new_positional), pos=getpos(self.expr))
elif ltyp == 'uint256' and isinstance(self.expr.op, ast.Sub):
o = LLLnode.from_list(['seq',
# Checks that: a >= b
['assert', ['ge', left, right]],
['sub', left, right]], typ=BaseType('uint256', new_unit, new_positional), pos=getpos(self.expr))
elif ltyp == rtyp:
o = LLLnode.from_list([op, left, right], typ=BaseType(ltyp, new_unit, new_positional), pos=getpos(self.expr))
else:
raise Exception("Unsupported Operation '%r(%r, %r)'" % (op, ltyp, rtyp))
elif isinstance(self.expr.op, ast.Mult):
if left.typ.positional or right.typ.positional:
raise TypeMismatchException("Cannot multiply positional values!", self.expr)
new_unit = combine_units(left.typ.unit, right.typ.unit)
if ltyp == rtyp == 'uint256':
o = LLLnode.from_list(['if', ['eq', left, 0], [0],
['seq', ['assert', ['eq', ['div', ['mul', left, right], left], right]],
['mul', left, right]]], typ=BaseType('uint256', new_unit), pos=getpos(self.expr))
elif ltyp == rtyp == 'int128':
o = LLLnode.from_list(['mul', left, right], typ=BaseType('int128', new_unit), pos=getpos(self.expr))
elif ltyp == rtyp == 'decimal':
o = LLLnode.from_list(['with', 'r', right, ['with', 'l', left,
['with', 'ans', ['mul', 'l', 'r'],
['seq',
['assert', ['or', ['eq', ['sdiv', 'ans', 'l'], 'r'], ['iszero', 'l']]],
['sdiv', 'ans', DECIMAL_DIVISOR]]]]], typ=BaseType('decimal', new_unit), pos=getpos(self.expr))
else:
raise Exception("Unsupported Operation 'mul(%r, %r)'" % (ltyp, rtyp))
elif isinstance(self.expr.op, ast.Div):
if left.typ.positional or right.typ.positional:
raise TypeMismatchException("Cannot divide positional values!", self.expr)
new_unit = combine_units(left.typ.unit, right.typ.unit, div=True)
if ltyp == rtyp == 'uint256':
o = LLLnode.from_list(['seq',
# Checks that: b != 0
['assert', right],
['div', left, right]], typ=BaseType('uint256', new_unit), pos=getpos(self.expr))
elif ltyp == rtyp == 'int128':
o = LLLnode.from_list(['sdiv', left, ['clamp_nonzero', right]], typ=BaseType('int128', new_unit), pos=getpos(self.expr))
elif ltyp == rtyp == 'decimal':
o = LLLnode.from_list(['with', 'l', left, ['with', 'r', ['clamp_nonzero', right],
['sdiv', ['mul', 'l', DECIMAL_DIVISOR], 'r']]],
typ=BaseType('decimal', new_unit), pos=getpos(self.expr))
else:
raise Exception("Unsupported Operation 'div(%r, %r)'" % (ltyp, rtyp))
elif isinstance(self.expr.op, ast.Mod):
if left.typ.positional or right.typ.positional:
raise TypeMismatchException("Cannot use positional values as modulus arguments!", self.expr)
if left.typ.unit != right.typ.unit and left.typ.unit is not None and right.typ.unit is not None:
raise TypeMismatchException("Modulus arguments must have same unit", self.expr)
new_unit = left.typ.unit or right.typ.unit
if ltyp == rtyp == 'uint256':
o = LLLnode.from_list(['seq',
['assert', right],
['mod', left, right]], typ=BaseType('uint256', new_unit), pos=getpos(self.expr))
elif ltyp == rtyp:
o = LLLnode.from_list(['smod', left, ['clamp_nonzero', right]], typ=BaseType(ltyp, new_unit), pos=getpos(self.expr))
else:
raise Exception("Unsupported Operation 'mod(%r, %r)'" % (ltyp, rtyp))
elif isinstance(self.expr.op, ast.Pow):
if left.typ.positional or right.typ.positional:
raise TypeMismatchException("Cannot use positional values as exponential arguments!", self.expr)
if right.typ.unit:
raise TypeMismatchException("Cannot use unit values as exponents", self.expr)
if ltyp != 'int128' and ltyp != 'uint256' and isinstance(self.expr.right, ast.Name):
raise TypeMismatchException("Cannot use dynamic values as exponents, for unit base types", self.expr)
if ltyp == rtyp == 'uint256':
o = LLLnode.from_list(['seq',
['assert', ['or', ['or', ['eq', right, 1], ['iszero', right]],
['lt', left, ['exp', left, right]]]],
['exp', left, right]], typ=BaseType('uint256'), pos=getpos(self.expr))
elif ltyp == rtyp == 'int128':
new_unit = left.typ.unit
if left.typ.unit and not isinstance(self.expr.right, ast.Name):
new_unit = {left.typ.unit.copy().popitem()[0]: self.expr.right.n}
o = LLLnode.from_list(['exp', left, right], typ=BaseType('int128', new_unit), pos=getpos(self.expr))
else:
raise TypeMismatchException('Only whole number exponents are supported', self.expr)
else:
raise Exception("Unsupported binop: %r" % self.expr.op)
p = ['seq']
if pre_alloc_left:
p.append(pre_alloc_left)
if pre_alloc_right:
p.append(pre_alloc_right)
if o.typ.typ == 'int128':
p.append(['clamp', ['mload', MemoryPositions.MINNUM], o, ['mload', MemoryPositions.MAXNUM]])
return LLLnode.from_list(p, typ=o.typ, pos=getpos(self.expr))
elif o.typ.typ == 'decimal':
p.append(['clamp', ['mload', MemoryPositions.MINDECIMAL], o, ['mload', MemoryPositions.MAXDECIMAL]])
return LLLnode.from_list(p, typ=o.typ, pos=getpos(self.expr))
if o.typ.typ == 'uint256':
p.append(o)
return LLLnode.from_list(p, typ=o.typ, pos=getpos(self.expr))
else:
raise Exception("%r %r" % (o, o.typ))
def build_in_comparator(self):
from vyper.parser.parser import make_setter
left = Expr(self.expr.left, self.context).lll_node
right = Expr(self.expr.comparators[0], self.context).lll_node
if left.typ.typ != right.typ.subtype.typ:
raise TypeMismatchException("%s cannot be in a list of %s" % (left.typ.typ, right.typ.subtype.typ))
result_placeholder = self.context.new_placeholder(BaseType('bool'))
setter = []
# Load nth item from list in memory.
if right.value == 'multi':
# Copy literal to memory to be compared.
tmp_list = LLLnode.from_list(
obj=self.context.new_placeholder(ListType(right.typ.subtype, right.typ.count)),
typ=ListType(right.typ.subtype, right.typ.count),
location='memory'
)
setter = make_setter(tmp_list, right, 'memory', pos=getpos(self.expr))
load_i_from_list = ['mload', ['add', tmp_list, ['mul', 32, ['mload', MemoryPositions.FREE_LOOP_INDEX]]]]
elif right.location == "storage":
load_i_from_list = ['sload', ['add', ['sha3_32', right], ['mload', MemoryPositions.FREE_LOOP_INDEX]]]
else:
load_i_from_list = ['mload', ['add', right, ['mul', 32, ['mload', MemoryPositions.FREE_LOOP_INDEX]]]]
# Condition repeat loop has to break on.
break_loop_condition = [
'if',
['eq', unwrap_location(left), load_i_from_list],
['seq',
['mstore', '_result', 1], # store true.
'break']
]
# Repeat loop to loop-compare each item in the list.
for_loop_sequence = [
['mstore', result_placeholder, 0],
['with', '_result', result_placeholder,
['repeat', MemoryPositions.FREE_LOOP_INDEX, 0, right.typ.count, break_loop_condition]],
['mload', result_placeholder]
]
# Save list to memory, so one can iterate over it,
# used when literal was created with tmp_list.
if setter:
compare_sequence = ['seq', setter] + for_loop_sequence
else:
compare_sequence = ['seq'] + for_loop_sequence
# Compare the result of the repeat loop to 1, to know if a match was found.
o = LLLnode.from_list([
'eq', 1,
compare_sequence],
typ='bool',
annotation="in comporator"
)
return o
@staticmethod
def _signed_to_unsigned_comparision_op(op):
translation_map = {
'sgt': 'gt',
'sge': 'ge',
'sle': 'le',
'slt': 'lt',
}
if op in translation_map:
return translation_map[op]
else:
return op
def compare(self):
left = Expr.parse_value_expr(self.expr.left, self.context)
right = Expr.parse_value_expr(self.expr.comparators[0], self.context)
if isinstance(right.typ, NullType):
raise InvalidLiteralException('Comparison to None is not allowed, compare against a default value.', self.expr)
if isinstance(left.typ, ByteArrayType) and isinstance(right.typ, ByteArrayType):
if left.typ.maxlen != right.typ.maxlen:
raise TypeMismatchException('Can only compare bytes of the same length', self.expr)
if left.typ.maxlen > 32 or right.typ.maxlen > 32:
raise ParserException('Can only compare bytes of length shorter than 32 bytes', self.expr)
elif isinstance(self.expr.ops[0], ast.In) and \
isinstance(right.typ, ListType):
if not are_units_compatible(left.typ, right.typ.subtype) and not are_units_compatible(right.typ.subtype, left.typ):
raise TypeMismatchException("Can't use IN comparison with different types!", self.expr)
return self.build_in_comparator()
else:
if not are_units_compatible(left.typ, right.typ) and not are_units_compatible(right.typ, left.typ):
raise TypeMismatchException("Can't compare values with different units!", self.expr)
if len(self.expr.ops) != 1:
raise StructureException("Cannot have a comparison with more than two elements", self.expr)
if isinstance(self.expr.ops[0], ast.Gt):
op = 'sgt'
elif isinstance(self.expr.ops[0], ast.GtE):
op = 'sge'
elif isinstance(self.expr.ops[0], ast.LtE):
op = 'sle'
elif isinstance(self.expr.ops[0], ast.Lt):
op = 'slt'
elif isinstance(self.expr.ops[0], ast.Eq):
op = 'eq'
elif isinstance(self.expr.ops[0], ast.NotEq):
op = 'ne'
else:
raise Exception("Unsupported comparison operator")
# Compare (limited to 32) byte arrays.
if isinstance(left.typ, ByteArrayType) and isinstance(left.typ, ByteArrayType):
left = Expr(self.expr.left, self.context).lll_node
right = Expr(self.expr.comparators[0], self.context).lll_node
def load_bytearray(side):
if side.location == 'memory':
return ['mload', ['add', 32, side]]
elif side.location == 'storage':
return ['sload', ['add', 1, ['sha3_32', side]]]
return LLLnode.from_list(
[op, load_bytearray(left), load_bytearray(right)], typ='bool', pos=getpos(self.expr))
# Compare other types.
if not is_numeric_type(left.typ) or not is_numeric_type(right.typ):
if op not in ('eq', 'ne'):
raise TypeMismatchException("Invalid type for comparison op", self.expr)
left_type, right_type = left.typ.typ, right.typ.typ
# Special Case: comparison of a literal integer. If in valid range allow it to be compared.
if {left_type, right_type} == {'int128', 'uint256'} and {left.typ.is_literal, right.typ.is_literal} == {True, False}:
comparison_allowed = False
if left.typ.is_literal and SizeLimits.in_bounds(right_type, left.value):
comparison_allowed = True
elif right.typ.is_literal and SizeLimits.in_bounds(left_type, right.value):
comparison_allowed = True
op = self._signed_to_unsigned_comparision_op(op)
if comparison_allowed:
return LLLnode.from_list([op, left, right], typ='bool', pos=getpos(self.expr))
elif {left_type, right_type} == {'uint256', 'uint256'}:
op = self._signed_to_unsigned_comparision_op(op)
elif (left_type in ('decimal', 'int128') or right_type in ('decimal', 'int128')) and left_type != right_type:
raise TypeMismatchException(
'Implicit conversion from {} to {} disallowed, please convert.'.format(left_type, right_type),
self.expr
)
if left_type == right_type:
return LLLnode.from_list([op, left, right], typ='bool', pos=getpos(self.expr))
else:
raise TypeMismatchException("Unsupported types for comparison: %r %r" % (left_type, right_type), self.expr)
def boolean_operations(self):
# Iterate through values
for value in self.expr.values:
# Check for calls at assignment
if self.context.in_assignment and isinstance(value, ast.Call):
raise StructureException("Boolean operations with calls may not be performed on assignment", self.expr)
# Check for boolean operations with non-boolean inputs
_expr = Expr.parse_value_expr(value, self.context)
if not is_base_type(_expr.typ, 'bool'):
raise TypeMismatchException("Boolean operations can only be between booleans!", self.expr)
# TODO: Handle special case of literals and simplify at compile time
# Check for valid ops
if isinstance(self.expr.op, ast.And):
op = 'and'
elif isinstance(self.expr.op, ast.Or):
op = 'or'
else:
raise Exception("Unsupported bool op: " + self.expr.op)
# Handle different numbers of inputs
count = len(self.expr.values)
if count < 2:
raise StructureException("Expected at least two arguments for a bool op", self.expr)
elif count == 2:
left = Expr.parse_value_expr(self.expr.values[0], self.context)
right = Expr.parse_value_expr(self.expr.values[1], self.context)
return LLLnode.from_list([op, left, right], typ='bool', pos=getpos(self.expr))
else:
left = Expr.parse_value_expr(self.expr.values[0], self.context)
right = Expr.parse_value_expr(self.expr.values[1], self.context)
p = ['seq', [op, left, right]]
values = self.expr.values[2:]
while len(values) > 0:
value = Expr.parse_value_expr(values[0], self.context)
p = [op, value, p]
values = values[1:]
return LLLnode.from_list(p, typ='bool', pos=getpos(self.expr))
# Unary operations (only "not" supported)
def unary_operations(self):
operand = Expr.parse_value_expr(self.expr.operand, self.context)
if isinstance(self.expr.op, ast.Not):
if isinstance(operand.typ, BaseType) and operand.typ.typ == 'bool':
return LLLnode.from_list(["iszero", operand], typ='bool', pos=getpos(self.expr))
else:
raise TypeMismatchException("Only bool is supported for not operation, %r supplied." % operand.typ, self.expr)
elif isinstance(self.expr.op, ast.USub):
if not is_numeric_type(operand.typ):
raise TypeMismatchException("Unsupported type for negation: %r" % operand.typ, operand)
if operand.typ.is_literal and 'int' in operand.typ.typ:
num = ast.Num(0 - operand.value)
num.source_code = self.expr.source_code
num.lineno = self.expr.lineno
num.col_offset = self.expr.col_offset
return Expr.parse_value_expr(num, self.context)
return LLLnode.from_list(["sub", 0, operand], typ=operand.typ, pos=getpos(self.expr))
else:
raise StructureException("Only the 'not' unary operator is supported")
# Function calls
def call(self):
from vyper.functions import (
dispatch_table,
)
if isinstance(self.expr.func, ast.Name):
function_name = self.expr.func.id
if function_name in dispatch_table:
return dispatch_table[function_name](self.expr, self.context)
# Struct constructors do not need `self` prefix.
elif function_name in self.context.structs:
if not self.context.in_assignment:
raise StructureException("Struct constructor must be called in RHS of assignment.", self.expr)
args = self.expr.args
if len(args) != 1:
raise StructureException("Struct constructor is called with one argument only", self.expr)
arg = args[0]
if not isinstance(arg, ast.Dict):
raise TypeMismatchException("Struct can only be constructed with a dict", self.expr)
sub = Expr.struct_literals(arg, self.context)
if sub.typ.name is not None:
raise TypeMismatchException("Struct can only be constructed with a dict", self.expr)
typ = StructType(sub.typ.members, function_name)
# OR:
# sub.typ = typ
# return sub
return LLLnode(sub.value, typ=typ, args=sub.args, location=sub.location, pos=getpos(self.expr), add_gas_estimate=sub.add_gas_estimate, valency=sub.valency, annotation=function_name)
else:
err_msg = "Not a top-level function: {}".format(function_name)
if function_name in [x.split('(')[0] for x, _ in self.context.sigs['self'].items()]:
err_msg += ". Did you mean self.{}?".format(function_name)
raise StructureException(err_msg, self.expr)
elif isinstance(self.expr.func, ast.Attribute) and isinstance(self.expr.func.value, ast.Name) and self.expr.func.value.id == "self":
return self_call.make_call(self.expr, self.context)
else:
return external_call.make_external_call(self.expr, self.context)
def list_literals(self):
if not len(self.expr.elts):
raise StructureException("List must have elements", self.expr)
o = []
out_type = None
for elt in self.expr.elts:
o.append(Expr(elt, self.context).lll_node)
if not out_type:
out_type = o[-1].typ
previous_type = o[-1].typ.subtype.typ if hasattr(o[-1].typ, 'subtype') else o[-1].typ
current_type = out_type.subtype.typ if hasattr(out_type, 'subtype') else out_type
if len(o) > 1 and previous_type != current_type:
raise TypeMismatchException("Lists may only contain one type", self.expr)
return LLLnode.from_list(["multi"] + o, typ=ListType(out_type, len(o)), pos=getpos(self.expr))
def dict_fail(self):
warnings.warn(
"Anonymous structs have been removed in"
" favor of named structs, see VIP300",
DeprecationWarning
)
raise InvalidLiteralException("Invalid literal: %r" % ast.dump(self.expr), self.expr)
def struct_literals(expr, context):
o = {}
members = {}
for key, value in zip(expr.keys, expr.values):
if not isinstance(key, ast.Name) or not is_varname_valid(key.id, context.custom_units, context.structs):
raise TypeMismatchException("Invalid member variable for struct: %r" % vars(key).get('id', key), key)
if key.id in o:
raise TypeMismatchException("Member variable duplicated: " + key.id, key)
o[key.id] = Expr(value, context).lll_node
members[key.id] = o[key.id].typ
return LLLnode.from_list(["multi"] + [o[key] for key in sorted(list(o.keys()))], typ=StructType(members, None), pos=getpos(expr))
def tuple_literals(self):
if not len(self.expr.elts):
raise StructureException("Tuple must have elements", self.expr)
o = []
for elt in self.expr.elts:
o.append(Expr(elt, self.context).lll_node)
return LLLnode.from_list(["multi"] + o, typ=TupleType(o), pos=getpos(self.expr))
# Parse an expression that results in a value
def parse_value_expr(expr, context):
return unwrap_location(Expr(expr, context).lll_node)
# Parse an expression that represents an address in memory or storage
def parse_variable_location(expr, context):
o = Expr(expr, context).lll_node
if not o.location:
raise Exception("Looking for a variable location, instead got a value")
return o
| 51.225478
| 197
| 0.590321
|
ede0ea0a1955d40cf19fd9faa1243c25c3c9690f
| 5,471
|
py
|
Python
|
redshift_paper/code/conversions.py
|
jkadowaki/paper_plots
|
d52a4d8a5676d5d2e00fbda008c9ad7978e68d8c
|
[
"MIT"
] | null | null | null |
redshift_paper/code/conversions.py
|
jkadowaki/paper_plots
|
d52a4d8a5676d5d2e00fbda008c9ad7978e68d8c
|
[
"MIT"
] | null | null | null |
redshift_paper/code/conversions.py
|
jkadowaki/paper_plots
|
d52a4d8a5676d5d2e00fbda008c9ad7978e68d8c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from collections.abc import Iterable
import numpy as np
# Constants
RAD2SEC = 206265 # Arcseconds in a Radian
DEG2RAD = np.pi / 180 # Radians in a Degree
MIN2SEC = 60 # (Arc)Seconds in a(n) (Arc)Minute
DEG2SEC = 3600 # Arc-seconds in a Degree
HOUR2DEG = 360/24 # Degrees in an Hour
NUM_DECIMAL_PLACES = 3 # User-defined rounding preference
################################################################################
"""
CONVERSIONS.py
List of useful methods for common unit conversions in astronomy for the
local universe, where Hubble's Law (v = cz = H0*d) applies.
Methods:
(1) GET_ABSOLUTE_MAGNITUDE: Converts apparent to absolute magnitudes.
(2) COORD2DEG: Converts coordinates in HH:MM:SS/DD:MM:SS format to degrees.
(3) GET_ANGULAR_SIZE: Computes angular distance between 2 coordinates.
(4) GET_PHYSICAL_SIZE: Computes physical distance given angular size.
"""
################################################################################
def get_absolute_magnitude(app_magnitude, velocity, extinction=0, H0=70):
"""
Computes an object's absolute magnitude given its apparent magnitude,
its recessional veloctiy, and an assumed Hubble's constant.
ARGS:
app_magnitude (float): Object's Apparent Magnitude
velocity (float): Recessional Velocity in (km/s) Attributed to the
Universe's Expansion Rate
extinction (float): The amount of extinction due to line-of-sight dust
H0 (float): Hubble's Constant [(km/s) / Mpc]
RETURNS:
abs_magnitude (float): Object's Absolute Magnitude
"""
abs_magnitude = app_magnitude - 5 * np.log10(velocity / H0) - 25 - extinction
return np.round(abs_magnitude, NUM_DECIMAL_PLACES)
################################################################################
def convert_single_coord(coord, right_ascension):
"""
Converts 1 RA or Declination coordinate to degrees.
ARGS:
coord (float/str): Right ascension or declination in fractional
degrees or in HH:MM:SS (RA) / DD:MM:SS (dec).
right_ascension (bool): Flag to indicate right ascension (True)
or declination (False).
RETURNS:
degree (float)
"""
try:
# Return if coord is already in units of fractional degrees
return float(coord)
except:
if right_ascension:
# Converts HH:MM:SS --> degree
hour, min, sec = coord.split(':')
ra = (int(hour) + int(min)/MIN2SEC + float(sec)/DEG2SEC) * HOUR2DEG
return ra
else:
# Converts DD:AM:AS --> degree
deg, min, sec = coord.split(':')
dec = int(deg) + int(min)/MIN2SEC + float(sec)/DEG2SEC
return dec
#------------------------------------------------------------------------------#
def coord2degree(coord, right_ascension=True):
"""
Converts a single or an iterable of coords to degrees.
ARGS:
coord (float/str -or- array of floats/strs)
RETURNS:
(float -or- array of floats): Converted coordinate(s) in degrees
"""
# Checks if coord is a single value or an iterable
if not isinstance(coord, Iterable):
return convert_single_coord(coord, right_ascension)
return np.array([convert_single_coord(c, right_ascension) for c in coord])
################################################################################
def get_angular_size(ra1, dec1, ra2, dec2):
"""
Computes the projected angular size/separation between two points.
ARGS:
ra1 (float or string): 1st Object's Right Ascension
dec1 (float or string): 1st Object's Declination
ra2 (float or string): 2nd Object's Right Ascension
dec2 (float or string): 2nd Object's Declination
RETURNS:
angular_size (float): Angular size/separation (arcsec) between 2 points.
"""
ra1 = coord2degree(ra1, right_ascension=True)
ra2 = coord2degree(ra2, right_ascension=True)
dec1 = coord2degree(dec1, right_ascension=False)
dec2 = coord2degree(dec2, right_ascension=False)
# Computes RA & Dec Offsets (in arcsec)
ra_offset = DEG2SEC * (ra2-ra1) * np.cos((dec1+dec2)/2 * DEG2RAD)
dec_offset = DEG2SEC * (dec2-dec1)
# Computes Angular Separation (in arcsec)
angular_size = np.sqrt( ra_offset**2 + dec_offset**2 )
return np.round(angular_size, NUM_DECIMAL_PLACES)
################################################################################
def get_physical_size(angular_size, velocity, H0=70):
"""
Computes the projected physical size/separation for a given angular size or
separation in the local universe.
ARGS:
angular_size (float): Angular size/separation (arcsec) between 2 points.
velcity (float): Recessional Velocity (km/s) due to expansion
H0 (float): Hubble's Constant [(km/s) / Mpc]
RETURNS:
physical_size (float): Projected physical size/separation (Mpc)
between two points.
"""
# Computes Physical Separation (in Mpc)
physical_size = angular_size * velocity / RAD2SEC / H0
return np.round(physical_size, NUM_DECIMAL_PLACES)
################################################################################
| 36.231788
| 81
| 0.579602
|
7904bc1044928c7eaf3de1c8ab22b76106971ace
| 945
|
py
|
Python
|
saw-remote-api/python/tests/saw/test_llvm_array_swap.py
|
msaaltink/saw-script
|
2e4fc0603da85bb1b188d4739a3386e25eea50ab
|
[
"BSD-3-Clause"
] | 411
|
2015-06-09T22:00:47.000Z
|
2022-03-30T11:41:23.000Z
|
saw-remote-api/python/tests/saw/test_llvm_array_swap.py
|
msaaltink/saw-script
|
2e4fc0603da85bb1b188d4739a3386e25eea50ab
|
[
"BSD-3-Clause"
] | 1,151
|
2015-06-12T20:46:31.000Z
|
2022-03-23T02:56:32.000Z
|
saw-remote-api/python/tests/saw/test_llvm_array_swap.py
|
msaaltink/saw-script
|
2e4fc0603da85bb1b188d4739a3386e25eea50ab
|
[
"BSD-3-Clause"
] | 65
|
2015-06-10T17:52:26.000Z
|
2022-02-10T18:17:06.000Z
|
from pathlib import Path
import unittest
from saw_client import *
from saw_client.llvm import Contract, array, array_ty, void, i32
class ArraySwapContract(Contract):
def specification(self):
a0 = self.fresh_var(i32, "a0")
a1 = self.fresh_var(i32, "a1")
a = self.alloc(array_ty(2, i32),
points_to=array(a0, a1))
self.execute_func(a)
self.points_to(a[0], a1)
self.points_to(a[1], a0)
self.returns(void)
class LLVMArraySwapTest(unittest.TestCase):
def test_llvm_array_swap(self):
connect(reset_server=True)
if __name__ == "__main__": view(LogResults())
bcname = str(Path('tests','saw','test-files', 'llvm_array_swap.bc'))
mod = llvm_load_module(bcname)
result = llvm_verify(mod, 'array_swap', ArraySwapContract())
self.assertIs(result.is_success(), True)
if __name__ == "__main__":
unittest.main()
| 27.794118
| 76
| 0.639153
|
dede81c59fbf6021e5fbcb63af84e1cff4e04d5d
| 1,107
|
py
|
Python
|
linear_regression_scripts.py
|
lucas-mascena/Numerical_Methods
|
e17a8564ed96e2ed7826de21c8340b597047b750
|
[
"MIT"
] | null | null | null |
linear_regression_scripts.py
|
lucas-mascena/Numerical_Methods
|
e17a8564ed96e2ed7826de21c8340b597047b750
|
[
"MIT"
] | null | null | null |
linear_regression_scripts.py
|
lucas-mascena/Numerical_Methods
|
e17a8564ed96e2ed7826de21c8340b597047b750
|
[
"MIT"
] | null | null | null |
'''
For-loop method to search for the parameters
a and b of a straight line equation.
'''
#data points and constants:
x = [3,4,5,6,7,8]
y = [0,7,17,26,35,45]
n = len(x)
#initial value of summation variables:
sumx = sumxy = sumx2 = sumy = 0
for i in range(n):
sumx += x[i]
sumy += y[i]
sumx2 += x[i]**2
sumxy += x[i]*y[i]
xm = sumx/n
ym = sumy/n
#Calculates a and b:
a = (ym*sumx2 - xm*sumxy)/(sumx2 - n*xm**2)
b = (sumxy-xm*sumy)/(sumx2 - n*xm**2)
#Results:
print('The straight line equation:')
print('y = (%.3f) + (%.3f)x' % (a,b))
'''
Numpy library based method to search for parameters
a and b of a straight line equation.
'''
a = b = 0
#import numpy as np
from numpy import array, sum, mean
#arrays and constants:
x = array([3,4,5,6,7,8], float)
y = array([0,7,17,26,35,45], float)
n = len(x)
#Calculates a and b parameters:
a = (mean(y)*sum(x**2) - mean(x)*sum(x*y))/(sum(x**2)-(n*mean(x)**2))
b = (sum(x*y) - (mean(x)*sum(y)))/(sum(x**2) - (n*mean(x)**2))
#Results:
print('The straight line equation:')
print('y = (%.3f) + (%.3f)x' % (a,b))
| 26.357143
| 70
| 0.573622
|
156f6b186c546851ed61e1aebae622f907861fb4
| 1,535
|
py
|
Python
|
src/geocoding/slf/__init__.py
|
msgis/ngsi-timeseries-api
|
5cc7a8beab748cecfd5fba61740f3730361d4e31
|
[
"MIT"
] | null | null | null |
src/geocoding/slf/__init__.py
|
msgis/ngsi-timeseries-api
|
5cc7a8beab748cecfd5fba61740f3730361d4e31
|
[
"MIT"
] | null | null | null |
src/geocoding/slf/__init__.py
|
msgis/ngsi-timeseries-api
|
5cc7a8beab748cecfd5fba61740f3730361d4e31
|
[
"MIT"
] | 1
|
2020-06-14T19:57:59.000Z
|
2020-06-14T19:57:59.000Z
|
"""
Support for working with NGSI Simple Location Format (SLF) data.
SLF is a lightweight format to represent simple 2D geometric figures such as
points, lines and polygons that is used to encode NGSI entity locations as well
as figures in NGSI geographical queries. You can read about it in the
*"Geospatial properties of entities"* and *"Geographical Queries"* sections of
the NGSI spec: http://fiware.github.io/specifications/ngsiv2/stable/.
Note that SLF uses the WGS84 coordinate system
(https://en.wikipedia.org/wiki/World_Geodetic_System#WGS84) and so points are
specified as ``(latitude, longitude)`` pairs whereas in GeoJSON the first
coordinate of a point is the longitude and the second is the latitude.
The ``geotypes`` module provides data types for all the SLF figures and the
``querytypes`` module builds on those types to provide data types to represent
NGSI geographical queries. The ``queryparser`` module provides parsing of NGSI
query strings into ASTs of SLF data types. The ``jsoncodec`` module serialises
SLF data type instances to GeoJSON whereas ``wktcodec`` serialises to WKT.
Additionally, the ``locparser`` module extracts location information from NGSI
entities to build SLF data type instances.
Below is bird-eye view of the components in the ``slf`` package.
.. image:: slf-components.png
"""
from .geotypes import *
from .jsoncodec import encode
from .locparser import from_location_attribute
from .queryparser import from_geo_params
from .querytypes import *
from .wktcodec import encode_as_wkt
| 43.857143
| 79
| 0.793485
|
6e6b039c08ad9b6869a1d6170ea98dc65b860f76
| 37,210
|
py
|
Python
|
Competition-Solutions/Audio/GIZ NLP Agricultural Keyword Spotter/Solution 1/model_pl_A_cleaned.py
|
ZindiAfrica/Natural-Language-Processing-NLP-
|
41763b83677f1a4853af397a34d8a82fa9ac45fc
|
[
"MIT"
] | null | null | null |
Competition-Solutions/Audio/GIZ NLP Agricultural Keyword Spotter/Solution 1/model_pl_A_cleaned.py
|
ZindiAfrica/Natural-Language-Processing-NLP-
|
41763b83677f1a4853af397a34d8a82fa9ac45fc
|
[
"MIT"
] | null | null | null |
Competition-Solutions/Audio/GIZ NLP Agricultural Keyword Spotter/Solution 1/model_pl_A_cleaned.py
|
ZindiAfrica/Natural-Language-Processing-NLP-
|
41763b83677f1a4853af397a34d8a82fa9ac45fc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 13 00:08:10 2020
@author: Shiro
all augmentation , no sampler, efficinetB7 + pseudo labels
"""
import librosa
print(librosa.__version__)
import scipy.io.wavfile
from efficientnet_pytorch import EfficientNet
from audiomentations import Compose, AddGaussianNoise, TimeStretch, PitchShift, Shift, Gain
from torch import nn
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import *
from torch.utils.data import DataLoader
from torch.cuda.amp import autocast, GradScaler
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torch.nn.utils.rnn import *
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.cuda.amp import autocast, GradScaler
import torchvision
import torchvision.models as models
import librosa
import librosa.display
import os
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
from tqdm import tqdm
from sklearn.metrics import f1_score
import random
from sklearn.model_selection import StratifiedKFold
SEED = 42
def seed_everything(seed_value):
random.seed(seed_value)
np.random.seed(seed_value)
torch.manual_seed(seed_value)
os.environ['PYTHONHASHSEED'] = str(seed_value)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
seed_everything(SEED)
### --- SpectAugment --- ###
class DropStripes(nn.Module):
def __init__(self, dim, drop_width, stripes_num):
"""Drop stripes.
Args:
dim: int, dimension along which to drop
drop_width: int, maximum width of stripes to drop
stripes_num: int, how many stripes to drop
"""
super(DropStripes, self).__init__()
assert dim in [2, 3] # dim 2: time; dim 3: frequency
self.dim = dim
self.drop_width = drop_width
self.stripes_num = stripes_num
def forward(self, input, replacement):
"""input: (batch_size, channels, time_steps, freq_bins)"""
assert input.ndimension() == 4
if self.training is False:
return input
else:
batch_size = input.shape[0]
total_width = input.shape[self.dim]
for n in range(batch_size):
self.transform_slice(input[n], total_width, replacement=replacement[n])
return input
def transform_slice(self, e, total_width, replacement=0.):
"""e: (channels, time_steps, freq_bins)"""
for _ in range(self.stripes_num):
distance = torch.randint(low=0, high=self.drop_width, size=(1,))[0]
bgn = torch.randint(low=0, high=total_width - distance, size=(1,))[0]
#print(replacement.shape)
if self.dim == 2:
e[:, bgn : bgn + distance, :] = replacement
elif self.dim == 3:
e[:, :, bgn : bgn + distance] = replacement
class SpecAugmentation(nn.Module):
def __init__(self, time_drop_width, time_stripes_num, freq_drop_width,
freq_stripes_num, replace="mean"):
"""Spec augmetation.
[ref] Park, D.S., Chan, W., Zhang, Y., Chiu, C.C., Zoph, B., Cubuk, E.D.
and Le, Q.V., 2019. Specaugment: A simple data augmentation method
for automatic speech recognition. arXiv preprint arXiv:1904.08779.
Args:
time_drop_width: int
time_stripes_num: int
freq_drop_width: int
freq_stripes_num: int
"""
super(SpecAugmentation, self).__init__()
self.time_dropper = DropStripes(dim=2, drop_width=time_drop_width,
stripes_num=time_stripes_num)
self.freq_dropper = DropStripes(dim=3, drop_width=freq_drop_width,
stripes_num=freq_stripes_num)
self.replace = replace
def forward(self, input):
#print(input.shape)
if self.replace == "zero":
replacement = torch.zeros(len(input)).to(input.device)
else:
replacement = input.mean(-1).mean(-1)
x = self.time_dropper(input, replacement=replacement)
x = self.freq_dropper(x, replacement=replacement)
return x
### --- MixUp --- ###
class Mixup(object):
def __init__(self, mixup_alpha, random_seed=1234):
"""Mixup coefficient generator.
"""
self.mixup_alpha = mixup_alpha
self.random_state = np.random.RandomState(random_seed)
def get_lambda(self, batch_size):
"""Get mixup random coefficients.
Args:
batch_size: int
Returns:
mixup_lambdas: (batch_size,)
"""
mixup_lambdas = []
for n in range(0, batch_size, 2):
lam = self.random_state.beta(self.mixup_alpha, self.mixup_alpha, 1)[0]
mixup_lambdas.append(lam)
mixup_lambdas.append(1. - lam)
return np.array(mixup_lambdas)
def do_mixup(x, mixup_lambda):
"""Mixup x of even indexes (0, 2, 4, ...) with x of odd indexes
(1, 3, 5, ...).
Args:
x: (batch_size * 2, ...)
mixup_lambda: (batch_size * 2,)
Returns:
out: (batch_size, ...)
"""
out = (x[0 :: 2].transpose(0, -1) * mixup_lambda[0 :: 2] + \
x[1 :: 2].transpose(0, -1) * mixup_lambda[1 :: 2]).transpose(0, -1)
return out
### --- MODEL --- ###
class AudioClassifier(nn.Module):
def __init__(self, backbone, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
"""Classifier for a new task using pretrained Cnn14 as a sub module.
"""
super(AudioClassifier, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
self.bn = nn.BatchNorm2d(3)
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.backbone = backbone
def forward(self, input, mixup_lambda=None):
"""Input: (batch_size, length, data_length)
"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
with autocast(False):
x = self.logmel_extractor(x)
if self.training:
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
x = self.bn(torch.cat([x,x,x], dim=1))
x = self.backbone(x)
return x
class AudioClassifierHub(nn.Module):
def __init__(self, backbone, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
"""Classifier for a new task using pretrained Cnn14 as a sub module.
"""
super(AudioClassifierHub, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
self.bn = nn.BatchNorm2d(3)
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.backbone = backbone #nn.Sequential(*list(EfficientNet.from_pretrained('efficientnet-b5').children())[:-2])
in_feat = backbone.classifier.in_features
self.backbone.classifier = nn.Linear(in_feat, classes_num)
def forward(self, input, mixup_lambda=None):
"""Input: (batch_size, length, data_length)
"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
with autocast(False):
x = self.logmel_extractor(x)
if self.training:
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
x = self.bn(torch.cat([x,x,x], dim=1))
x = self.backbone(x)
return x
### --- Sampler ---###
from torch.utils.data import Dataset, Sampler, DistributedSampler, DataLoader
class AgrinetDatasetSampler(Sampler):
def __init__(self, dataset):
self.num_samples = len(dataset)
self.indices = list(range(self.num_samples))
label_to_count = {}
for idx in self.indices:
label = self._get_label(dataset, idx)
if label in label_to_count:
label_to_count[label] += 1
else:
label_to_count[label] = 1
weights = [1.0 / label_to_count[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(
self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
def _get_label(self, dataset, idx):
label = dataset.get_label(idx)
return label
### --- Dataset and Dataloader --- ####
class AudioGeneratorDataset(torch.utils.data.Dataset):
def __init__(self, path_audio, y, resample_freq = 32000, max_length=3, augmentation=[], validation=False, num_class=264, pseudo_labels=None):
self.labels2idx = {'Pump': 0, 'Spinach': 1, 'abalimi': 2, 'afukirira': 3, 'agriculture': 4, 'akammwanyi': 5, 'akamonde': 6, 'akasaanyi': 7, 'akatunda': 8, 'akatungulu': 9,
'akawuka': 10, 'amakoola': 11, 'amakungula': 12, 'amalagala': 13, 'amappapaali': 14, 'amatooke': 15, 'banana': 16, 'beans': 17, 'bibala': 18, 'bulimi': 19, 'butterfly': 20, 'cabbages': 21,
'cassava': 22, 'caterpillar': 23, 'caterpillars': 24, 'coffee': 25, 'crop': 26, 'ddagala': 27, 'dig': 28, 'disease': 29, 'doodo': 30, 'drought': 31, 'ebbugga': 32, 'ebibala': 33, 'ebigimusa': 34,
'ebijanjaalo': 35, 'ebijjanjalo': 36, 'ebikajjo': 37, 'ebikolo': 38, 'ebikongoliro': 39, 'ebikoola': 40, 'ebimera': 41, 'ebinyebwa': 42, 'ebirime': 43, 'ebisaanyi': 44, 'ebisooli': 45,
'ebisoolisooli': 46, 'ebitooke': 47, 'ebiwojjolo': 48, 'ebiwuka': 49, 'ebyobulimi': 50, 'eddagala': 51, 'eggobe': 52, 'ejjobyo': 53, 'ekibala': 54, 'ekigimusa': 55, 'ekijanjaalo': 56,
'ekikajjo': 57, 'ekikolo': 58, 'ekikoola': 59, 'ekimera': 60, 'ekirime': 61, 'ekirwadde': 62, 'ekisaanyi': 63, 'ekitooke': 64, 'ekiwojjolo': 65, 'ekyeya': 66, 'emboga': 67, 'emicungwa': 68,
'emisiri': 69, 'emiyembe': 70, 'emmwanyi': 71, 'endagala': 72, 'endokwa': 73, 'endwadde': 74, 'enkota': 75, 'ennima': 76, 'ennimiro': 77, 'ennyaanya': 78, 'ensigo': 79, 'ensiringanyi': 80, 'ensujju': 81,
'ensuku': 82, 'ensukusa': 83, 'enva endiirwa': 84, 'eppapaali': 85, 'faamu': 86, 'farm': 87, 'farmer': 88, 'farming instructor': 89, 'fertilizer': 90, 'fruit': 91, 'fruit picking': 92,
'garden': 93, 'greens': 94, 'ground nuts': 95, 'harvest': 96, 'harvesting': 97, 'insect': 98, 'insects': 99, 'irish potatoes': 100, 'irrigate': 101, 'kaamulali': 102, 'kasaanyi': 103, 'kassooli': 104,
'kikajjo': 105, 'kikolo': 106, 'kisaanyi': 107, 'kukungula': 108, 'leaf': 109, 'leaves': 110, 'lumonde': 111, 'lusuku': 112, 'maize': 113, 'maize stalk borer': 114, 'maize streak virus': 115, 'mango': 116, 'mangoes': 117, 'matooke': 118,
'matooke seedlings': 119, 'medicine': 120, 'miceere': 121, 'micungwa': 122, 'mpeke': 123, 'muceere': 124, 'mucungwa': 125, 'mulimi': 126, 'munyeera': 127, 'muwogo': 128,
'nakavundira': 129, 'nambaale': 130, 'namuginga': 131, 'ndwadde': 132, 'nfukirira': 133, 'nnakati': 134, 'nnasale beedi': 135, 'nnimiro': 136, 'nnyaanya': 137, 'npk': 138, 'nursery bed': 139,
'obulimi': 140, 'obulwadde': 141, 'obumonde': 142, 'obusaanyi': 143, 'obutunda': 144, 'obutungulu': 145, 'obuwuka': 146, 'okufukirira': 147, 'okufuuyira': 148, 'okugimusa': 149, 'okukkoola': 150,
'okukungula': 151, 'okulima': 152, 'okulimibwa': 153, 'okunnoga': 154, 'okusaasaana': 155, 'okusaasaanya': 156, 'okusiga': 157,
'okusimba': 158, 'okuzifuuyira': 159, 'olusuku': 160, 'omuceere': 161, 'omucungwa': 162, 'omulimi': 163, 'omulimisa': 164, 'omusiri': 165, 'omuyembe': 166,
'onion': 167, 'orange': 168, 'pampu': 169, 'passion fruit': 170, 'pawpaw': 171, 'pepper': 172, 'plant': 173, 'plantation': 174, 'ppaapaali': 175, 'pumpkin': 176, 'rice': 177, 'seed': 178,
'sikungula': 179, 'sow': 180, 'spray': 181, 'spread': 182, 'suckers': 183, 'sugarcane': 184, 'sukumawiki': 185, 'super grow': 186, 'sweet potatoes': 187, 'tomatoes': 188, 'vegetables': 189,
'watermelon': 190, 'weeding': 191, 'worm': 192}
self.idx2labels = {k:v for v,k in self.labels2idx.items()}
identity = np.eye(num_class)
self.augmentation = set(augmentation)
self.samples = path_audio
self.max_length = max_length # 99% are shorter than 3 sec
self.resample_freq=resample_freq
self.validation = validation
self.y = np.array([identity[self.labels2idx[t]] for t in y]).astype(np.float32) # convert into one hot label to have same dimension than pseudo label data
self.num_class = num_class
self.noise = Compose([AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.6),
TimeStretch(min_rate=0.8, max_rate=1.25, p=0.6),
PitchShift(min_semitones=-4, max_semitones=4, p=0.5),
Shift(min_fraction=-0.5, max_fraction=0.5, p=0.5),
Gain(min_gain_in_db=-12, max_gain_in_db=12, p=0.6),
])
if pseudo_labels is not None:
self.add_pl(pseudo_labels[0], pseudo_labels[1])
def load_raw_audio(self, x):
signal_f = np.zeros((self.max_length*self.resample_freq)).astype(np.float32)
signal, sr_orig = librosa.load(x, sr=self.resample_freq)
if sr_orig != self.resample_freq:
signal = librosa.resample(signal, orig_sr=sr_orig, target_sr=self.resample_freq, res_type="kaiser_best")
shape = len(signal)
if self.validation:
signal = signal[:self.max_length*self.resample_freq]
signal_f[:len(signal)] = signal
else:
if shape > self.max_length*self.resample_freq:
start = np.random.randint(0, shape - self.max_length*self.resample_freq)
signal_f = signal[start:start+self.max_length*self.resample_freq]
elif shape == self.max_length*self.resample_freq:
signal = signal[:self.max_length*self.resample_freq]
signal_f[:len(signal)] = signal
else:
start = np.random.randint(0, self.max_length*self.resample_freq-shape)
shape = len(signal[start:start+self.max_length*self.resample_freq])
signal_f[start:start+shape] = signal[start:start+self.max_length*self.resample_freq]
return signal_f.astype(np.float32)
## add pseudo label to the samples variable
def add_pl(self, path_test, pseudo_labels):
print("#old ", len(self.y), len(self.samples))
self.y = np.concatenate((self.y, pseudo_labels), axis=0)
self.samples.extend(path_test)
print("#new ", len(self.y), len(self.samples))
def __getitem__(self, index):
l = []
# label
labels_one_hot = torch.as_tensor(self.y[index]).type(torch.float32)
# load signal
signal_raw = self.load_raw_audio(self.samples[index] )
# add Environment Noise
if "noise" in self.augmentation:
signal_raw = self.noise(samples=signal_raw, sample_rate=self.resample_freq)
l.append( torch.tensor(signal_raw) )
l.append(labels_one_hot)
l.append(torch.tensor(index))
return tuple(l)
def __len__(self):
return len(self.samples)
def get_label(self, idx):
label = self.y[idx]
#label = self.parse_label(label)
return label
class AudioGeneratorDatasetTest(torch.utils.data.Dataset):
def __init__(self, path_audio, resample_freq = 32000, max_length=3, num_class=264, ):
self.labels2idx = {'Pump': 0, 'Spinach': 1, 'abalimi': 2, 'afukirira': 3, 'agriculture': 4, 'akammwanyi': 5, 'akamonde': 6, 'akasaanyi': 7, 'akatunda': 8, 'akatungulu': 9,
'akawuka': 10, 'amakoola': 11, 'amakungula': 12, 'amalagala': 13, 'amappapaali': 14, 'amatooke': 15, 'banana': 16, 'beans': 17, 'bibala': 18, 'bulimi': 19, 'butterfly': 20, 'cabbages': 21,
'cassava': 22, 'caterpillar': 23, 'caterpillars': 24, 'coffee': 25, 'crop': 26, 'ddagala': 27, 'dig': 28, 'disease': 29, 'doodo': 30, 'drought': 31, 'ebbugga': 32, 'ebibala': 33, 'ebigimusa': 34,
'ebijanjaalo': 35, 'ebijjanjalo': 36, 'ebikajjo': 37, 'ebikolo': 38, 'ebikongoliro': 39, 'ebikoola': 40, 'ebimera': 41, 'ebinyebwa': 42, 'ebirime': 43, 'ebisaanyi': 44, 'ebisooli': 45,
'ebisoolisooli': 46, 'ebitooke': 47, 'ebiwojjolo': 48, 'ebiwuka': 49, 'ebyobulimi': 50, 'eddagala': 51, 'eggobe': 52, 'ejjobyo': 53, 'ekibala': 54, 'ekigimusa': 55, 'ekijanjaalo': 56,
'ekikajjo': 57, 'ekikolo': 58, 'ekikoola': 59, 'ekimera': 60, 'ekirime': 61, 'ekirwadde': 62, 'ekisaanyi': 63, 'ekitooke': 64, 'ekiwojjolo': 65, 'ekyeya': 66, 'emboga': 67, 'emicungwa': 68,
'emisiri': 69, 'emiyembe': 70, 'emmwanyi': 71, 'endagala': 72, 'endokwa': 73, 'endwadde': 74, 'enkota': 75, 'ennima': 76, 'ennimiro': 77, 'ennyaanya': 78, 'ensigo': 79, 'ensiringanyi': 80, 'ensujju': 81,
'ensuku': 82, 'ensukusa': 83, 'enva endiirwa': 84, 'eppapaali': 85, 'faamu': 86, 'farm': 87, 'farmer': 88, 'farming instructor': 89, 'fertilizer': 90, 'fruit': 91, 'fruit picking': 92,
'garden': 93, 'greens': 94, 'ground nuts': 95, 'harvest': 96, 'harvesting': 97, 'insect': 98, 'insects': 99, 'irish potatoes': 100, 'irrigate': 101, 'kaamulali': 102, 'kasaanyi': 103, 'kassooli': 104,
'kikajjo': 105, 'kikolo': 106, 'kisaanyi': 107, 'kukungula': 108, 'leaf': 109, 'leaves': 110, 'lumonde': 111, 'lusuku': 112, 'maize': 113, 'maize stalk borer': 114, 'maize streak virus': 115, 'mango': 116, 'mangoes': 117, 'matooke': 118,
'matooke seedlings': 119, 'medicine': 120, 'miceere': 121, 'micungwa': 122, 'mpeke': 123, 'muceere': 124, 'mucungwa': 125, 'mulimi': 126, 'munyeera': 127, 'muwogo': 128,
'nakavundira': 129, 'nambaale': 130, 'namuginga': 131, 'ndwadde': 132, 'nfukirira': 133, 'nnakati': 134, 'nnasale beedi': 135, 'nnimiro': 136, 'nnyaanya': 137, 'npk': 138, 'nursery bed': 139,
'obulimi': 140, 'obulwadde': 141, 'obumonde': 142, 'obusaanyi': 143, 'obutunda': 144, 'obutungulu': 145, 'obuwuka': 146, 'okufukirira': 147, 'okufuuyira': 148, 'okugimusa': 149, 'okukkoola': 150,
'okukungula': 151, 'okulima': 152, 'okulimibwa': 153, 'okunnoga': 154, 'okusaasaana': 155, 'okusaasaanya': 156, 'okusiga': 157,
'okusimba': 158, 'okuzifuuyira': 159, 'olusuku': 160, 'omuceere': 161, 'omucungwa': 162, 'omulimi': 163, 'omulimisa': 164, 'omusiri': 165, 'omuyembe': 166,
'onion': 167, 'orange': 168, 'pampu': 169, 'passion fruit': 170, 'pawpaw': 171, 'pepper': 172, 'plant': 173, 'plantation': 174, 'ppaapaali': 175, 'pumpkin': 176, 'rice': 177, 'seed': 178,
'sikungula': 179, 'sow': 180, 'spray': 181, 'spread': 182, 'suckers': 183, 'sugarcane': 184, 'sukumawiki': 185, 'super grow': 186, 'sweet potatoes': 187, 'tomatoes': 188, 'vegetables': 189,
'watermelon': 190, 'weeding': 191, 'worm': 192}
self.idx2labels = {k:v for v,k in self.labels2idx.items()}
self.samples = path_audio
self.max_length = max_length # 99% are shorter than 3 sec
self.resample_freq=resample_freq
self.num_class = num_class
def load_raw_audio(self, x):
signal_f = np.zeros((self.max_length*self.resample_freq)).astype(np.float32)
signal, sr_orig = librosa.load(x, sr=self.resample_freq)
if sr_orig != self.resample_freq:
signal = librosa.resample(signal, orig_sr=sr_orig, target_sr=self.resample_freq, res_type="kaiser_best")
shape = len(signal)
signal = signal[:self.max_length*self.resample_freq]
signal_f[:len(signal)] = signal
return signal_f.astype(np.float32)
def __getitem__(self, index):
l = []
# load signal
signal_raw = self.load_raw_audio(self.samples[index] )
l.append( torch.tensor(signal_raw) )
l.append(torch.tensor(index))
return tuple(l)
def __len__(self):
return len(self.samples)
## -- Fonction Loop -- ##
# train one epoch
def train_fn(model, dataloader, optimizer, loss_fn, cfg, accumulation=2, l_mixup=1.0,verbose=False):
model.train()
total_loss = 0.
t=tqdm(dataloader, disable=not verbose )
scaler = GradScaler()
optimizer.zero_grad()
N = 0.
if l_mixup>0:
mixup = Mixup(mixup_alpha=l_mixup, random_seed=SEED)
for i, batch in enumerate(t):
inputs, labels, indices = batch
inputs = inputs.to(cfg.device, dtype=torch.float)
labels = labels.to(cfg.device, dtype=torch.float)
lambda_mixup = None
if l_mixup>0:
lambda_mixup = torch.as_tensor(mixup.get_lambda(batch_size=len(inputs))).to(cfg.device, dtype=torch.float)
labels = do_mixup(labels, lambda_mixup)
labels[labels>1.0] = 1.0
with autocast(cfg.use_apex):
outputs = model(inputs, lambda_mixup)
#outputs = torch.clamp(outputs,0.0,1.0)
#labels = torch.clamp(labels,0.0,1.0)
loss = loss_fn(outputs, labels )
N += len(inputs)
#print(loss.shape)
if len(loss.shape) == 2:
loss = loss.sum(1).mean()
else:
loss = loss.mean()
if torch.isnan(loss):
print("loss error")
print(torch.isnan(outputs).sum())
loss[torch.isnan(loss)] = 0.0
total_loss += loss.item()
else:
total_loss += loss.item()
if cfg.use_apex:
loss = loss/accumulation
scaler.scale(loss).backward()
else:
loss = loss/accumulation
loss.backward()
if (i+1)%accumulation == 0 or i-1 == len(t):
if cfg.use_apex:
scaler.step(optimizer)
# Updates the scale for next iteration.
scaler.update()
optimizer.zero_grad()
else:
optimizer.step()
optimizer.zero_grad()
t.set_description("Loss : {0}".format(total_loss/(i+1)))
t.refresh()
return total_loss/N
# evaluation
def evals_fn(model, dataloader, optimizer, cfg, loss_fn, activation=False, verbose=False):
total_loss = 0.
t=tqdm(dataloader, disable=~verbose)
y_true = []
y_preds = []
model.eval()
device = cfg.device
with torch.no_grad():
for i, batch in enumerate(t):
inputs, labels, indices = batch
inputs = inputs.to(device, dtype=torch.float)
labels = labels.to(device, dtype=torch.float)
outputs = model(inputs, None)
if activation:
output = torch.softmax(outputs, dim=-1)
#outputs = torch.clamp(outputs,0.0,1.0)
#labels = torch.clamp(labels,0.0,1.0)
loss = loss_fn(outputs, labels )
#print(loss.shape)
if len(loss.shape) == 2:
loss = loss.sum(1).mean()
else:
loss = loss.mean()
total_loss += loss.item()
t.set_description("Loss : {0}".format(total_loss/(i+1)))
t.refresh()
y_true.append(labels.detach().cpu().numpy())
y_preds.append( outputs.cpu().detach().numpy())
return np.concatenate(y_preds), np.concatenate(y_true), total_loss/(i+1)
# inference on test
def inference_fn(model, dataloader, optimizer, cfg,activation=False, verbose=False):
total_loss = 0.
t=tqdm(dataloader,disable=~verbose)
y_true = []
y_preds = []
model.eval()
device = cfg.device
with torch.no_grad():
for i, batch in enumerate(t):
inputs, indices = batch
inputs = inputs.to(device, dtype=torch.float)
outputs = model(inputs, None)
#outputs = torch.clamp(outputs,0.0,1.0)
#labels = torch.clamp(labels,0.0,1.0)
#print(loss.shape)
y_preds.append( outputs.cpu().detach().numpy())
return np.concatenate(y_preds)
## -- Loss Function -- ##
def bce(outputs, targets):
eps = 1e-5
p1=targets*(torch.log(outputs+eps))
p0=(1-targets)*torch.log(1-outputs+eps)
loss = p0 + p1
return -loss
# get additionnal data paths
def get_additional_data(path):
labels = []
all_paths = []
for name in os.listdir(path):
pname = os.path.join(path, name)
for filename in os.listdir(pname):
fname = os.path.join(pname, filename)
all_paths.append(fname)
labels.append(name)
return all_paths, labels
class Config():
def __init__(self):
self.num_class = 193
self.resample_freq=48000
self.max_length=3 # seconds
self.device = "cuda:0"
self.use_apex =True
self.verbose=False
self.epochs = 205
self.accumulation = 1
self.batch_size = 16
self.l_mixup=1.0
self.background = False
self.kfold=5
self.name = "model_pl_A"
self.save_name = f"{self.name}-GIZ-{self.kfold}kfolds"
import time
cfg = Config()
labels2idx = {'Pump': 0, 'Spinach': 1, 'abalimi': 2, 'afukirira': 3, 'agriculture': 4, 'akammwanyi': 5, 'akamonde': 6, 'akasaanyi': 7, 'akatunda': 8, 'akatungulu': 9,
'akawuka': 10, 'amakoola': 11, 'amakungula': 12, 'amalagala': 13, 'amappapaali': 14, 'amatooke': 15, 'banana': 16, 'beans': 17, 'bibala': 18, 'bulimi': 19, 'butterfly': 20, 'cabbages': 21,
'cassava': 22, 'caterpillar': 23, 'caterpillars': 24, 'coffee': 25, 'crop': 26, 'ddagala': 27, 'dig': 28, 'disease': 29, 'doodo': 30, 'drought': 31, 'ebbugga': 32, 'ebibala': 33, 'ebigimusa': 34,
'ebijanjaalo': 35, 'ebijjanjalo': 36, 'ebikajjo': 37, 'ebikolo': 38, 'ebikongoliro': 39, 'ebikoola': 40, 'ebimera': 41, 'ebinyebwa': 42, 'ebirime': 43, 'ebisaanyi': 44, 'ebisooli': 45,
'ebisoolisooli': 46, 'ebitooke': 47, 'ebiwojjolo': 48, 'ebiwuka': 49, 'ebyobulimi': 50, 'eddagala': 51, 'eggobe': 52, 'ejjobyo': 53, 'ekibala': 54, 'ekigimusa': 55, 'ekijanjaalo': 56,
'ekikajjo': 57, 'ekikolo': 58, 'ekikoola': 59, 'ekimera': 60, 'ekirime': 61, 'ekirwadde': 62, 'ekisaanyi': 63, 'ekitooke': 64, 'ekiwojjolo': 65, 'ekyeya': 66, 'emboga': 67, 'emicungwa': 68,
'emisiri': 69, 'emiyembe': 70, 'emmwanyi': 71, 'endagala': 72, 'endokwa': 73, 'endwadde': 74, 'enkota': 75, 'ennima': 76, 'ennimiro': 77, 'ennyaanya': 78, 'ensigo': 79, 'ensiringanyi': 80, 'ensujju': 81,
'ensuku': 82, 'ensukusa': 83, 'enva endiirwa': 84, 'eppapaali': 85, 'faamu': 86, 'farm': 87, 'farmer': 88, 'farming instructor': 89, 'fertilizer': 90, 'fruit': 91, 'fruit picking': 92,
'garden': 93, 'greens': 94, 'ground nuts': 95, 'harvest': 96, 'harvesting': 97, 'insect': 98, 'insects': 99, 'irish potatoes': 100, 'irrigate': 101, 'kaamulali': 102, 'kasaanyi': 103, 'kassooli': 104,
'kikajjo': 105, 'kikolo': 106, 'kisaanyi': 107, 'kukungula': 108, 'leaf': 109, 'leaves': 110, 'lumonde': 111, 'lusuku': 112, 'maize': 113, 'maize stalk borer': 114, 'maize streak virus': 115, 'mango': 116, 'mangoes': 117, 'matooke': 118,
'matooke seedlings': 119, 'medicine': 120, 'miceere': 121, 'micungwa': 122, 'mpeke': 123, 'muceere': 124, 'mucungwa': 125, 'mulimi': 126, 'munyeera': 127, 'muwogo': 128,
'nakavundira': 129, 'nambaale': 130, 'namuginga': 131, 'ndwadde': 132, 'nfukirira': 133, 'nnakati': 134, 'nnasale beedi': 135, 'nnimiro': 136, 'nnyaanya': 137, 'npk': 138, 'nursery bed': 139,
'obulimi': 140, 'obulwadde': 141, 'obumonde': 142, 'obusaanyi': 143, 'obutunda': 144, 'obutungulu': 145, 'obuwuka': 146, 'okufukirira': 147, 'okufuuyira': 148, 'okugimusa': 149, 'okukkoola': 150,
'okukungula': 151, 'okulima': 152, 'okulimibwa': 153, 'okunnoga': 154, 'okusaasaana': 155, 'okusaasaanya': 156, 'okusiga': 157,
'okusimba': 158, 'okuzifuuyira': 159, 'olusuku': 160, 'omuceere': 161, 'omucungwa': 162, 'omulimi': 163, 'omulimisa': 164, 'omusiri': 165, 'omuyembe': 166,
'onion': 167, 'orange': 168, 'pampu': 169, 'passion fruit': 170, 'pawpaw': 171, 'pepper': 172, 'plant': 173, 'plantation': 174, 'ppaapaali': 175, 'pumpkin': 176, 'rice': 177, 'seed': 178,
'sikungula': 179, 'sow': 180, 'spray': 181, 'spread': 182, 'suckers': 183, 'sugarcane': 184, 'sukumawiki': 185, 'super grow': 186, 'sweet potatoes': 187, 'tomatoes': 188, 'vegetables': 189,
'watermelon': 190, 'weeding': 191, 'worm': 192}
# Options for Logmel
mel_bins = 64
fmin = 20
fmax = 24000
window_size = 1024
hop_size = 320
audioset_classes_num = cfg.num_class
# data aug
augmentations = ["noise"]
# load data train/test and pseudo labels
pseudo_labels=pd.read_csv("submission_ensemblingv5.csv")
train = pd.read_csv("Train.csv")
test = pd.read_csv("SampleSubmission.csv")
train["fn"] = train["fn"].apply(lambda x: x.replace("audio_files", "audio_files-48000"))
test["fn"] = test["fn"].apply(lambda x: x.replace("audio_files", "audio_files-48000"))
pseudo_labels["fn"] = pseudo_labels["fn"].apply(lambda x: x.replace("audio_files", "audio_files-48000"))
# get additional path
paths_add, labels_add = get_additional_data("AdditionalUtterances-48000/latest_keywords")
paths_add2, labels_add2 = get_additional_data("nlp_keywords-48000")
# loss function
loss_fnt =nn.BCEWithLogitsLoss(reduction="none") # CELoss#bce#
loss_bce = nn.BCEWithLogitsLoss(reduction="none")
skf = StratifiedKFold(n_splits=cfg.kfold, random_state=42, shuffle=True)
# merge all paths and labels
paths = np.array(train.fn.values.tolist() + paths_add + paths_add2)
targets_names = np.array(train.label.values.tolist() + labels_add+ labels_add2)
# create test dataset
dataset_test = AudioGeneratorDatasetTest(test.fn.values.tolist(), resample_freq=cfg.resample_freq, max_length=cfg.max_length, num_class=cfg.num_class )
test_dataloader = DataLoader(dataset_test, batch_size=cfg.batch_size, shuffle=False, num_workers=0)
columns_labels = [dataset_test.idx2labels[i] for i in range(cfg.num_class)] #get name for labels
if __name__ == "__main__":
oof_preds = []
oof_targets = []
oof_loss = []
for fold, (train_idx, val_idx) in enumerate(skf.split(np.zeros(len(paths)), targets_names)):
print(f"## FOLD {fold}")
start = time.time()
# train/val dataset and dataloader + pseudo labels
dataset = AudioGeneratorDataset(paths[train_idx].tolist(), targets_names[train_idx],
resample_freq=cfg.resample_freq, max_length=cfg.max_length,
augmentation=augmentations, validation=False, num_class=cfg.num_class, pseudo_labels=(pseudo_labels.fn.values.tolist(), pseudo_labels[columns_labels].values) )
dataset_val = AudioGeneratorDataset(paths[val_idx].tolist(), targets_names[val_idx],
resample_freq=cfg.resample_freq, max_length=cfg.max_length, augmentation=[], validation=True,
num_class=cfg.num_class )
#train_dataloader = DataLoader(dataset, batch_size=cfg.batch_size, shuffle=False, num_workers=8, drop_last=True, sampler=AgrinetDatasetSampler( dataset))
train_dataloader = DataLoader(dataset, batch_size=cfg.batch_size, shuffle=True, num_workers=8, drop_last=True)
val_dataloader = DataLoader(dataset_val, batch_size=cfg.batch_size, shuffle=False, num_workers=1)
## -- LOAD MODEL -- ##
backbone = EfficientNet.from_pretrained('efficientnet-b7', num_classes=cfg.num_class)
model = AudioClassifier(backbone, cfg.resample_freq, window_size, hop_size, mel_bins, fmin, fmax, cfg.num_class).to(cfg.device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, amsgrad=False, weight_decay=1e-5) # torch.optim.SGD(model.parameters(), lr=1e-3, momentum=5e-4, nesterov=True)#
reducer = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=7, verbose=True, min_lr=1e-6)
best_score_bce= np.inf
best_score_ce = np.inf
for e in range(cfg.epochs):
# train one epoch
train_fn(model, train_dataloader, optimizer, loss_fnt, cfg, accumulation=cfg.accumulation, l_mixup=cfg.l_mixup, verbose=cfg.verbose)
# eval
preds5, targets5, val_loss_bce =evals_fn(model, val_dataloader, optimizer, cfg, loss_bce, activation=False)
reducer.step(val_loss_bce)
if best_score_bce > val_loss_bce :
best_score_bce = val_loss_bce
torch.save(model.state_dict(), cfg.save_name + f"-fold{fold}-bce.pth")
#torch.save(optimizer.state_dict(), "optimizer-"+ cfg.save_name)
print("bceloss - score improved : ", val_loss_bce, round(time.time()-start))
else:
print("bceloss score not improve : ", val_loss_bce, " Best : ", best_score_bce, round(time.time()-start))
# save best loss/predictions
model.load_state_dict(torch.load(cfg.save_name + f"-fold{fold}-bce.pth"))
preds, targets, val_loss_bce =evals_fn(model, val_dataloader, optimizer, cfg, loss_bce, activation=False)
oof_preds.append(preds)
oof_targets.append(targets)
oof_loss.append(val_loss_bce)
oof_preds = np.concatenate(oof_preds, axis=0)
oof_targets = np.concatenate(oof_targets)
print("final loss : ", oof_loss)
# compute oof validation
results = nn.BCEWithLogitsLoss(reduction="none")(torch.as_tensor(oof_preds), torch.as_tensor(oof_targets)).sum(1).mean().item()
print(results)
# load model and do prediction for each fold
preds_test = []
for fold, (train_idx, val_idx) in enumerate(skf.split(np.zeros(len(paths)), targets_names)):
print(f"## FOLD {fold}")
model.load_state_dict(torch.load(cfg.save_name + f"-fold{fold}-bce.pth"))
preds_test_fold =inference_fn(model, test_dataloader, optimizer, cfg)
preds_test.append(torch.sigmoid(torch.as_tensor(preds_test_fold)).numpy())
preds_test = np.stack(preds_test)
ids = np.array([dataset_test.labels2idx[x] for x in test.columns[1:]])
# average prediction of folds
preds_test_withids = preds_test.mean(0)[:, ids]
test[test.columns[1:]] = preds_test_withids
# save csv prediction test
test = pd.read_csv("SampleSubmission.csv")
test[test.columns[1:]] = preds_test_withids
if cfg.l_mixup == 0:
#test.to_csv(f"{cfg.name}-{cfg.kfold}folds-CV-{round(results,5)}-seed{SEED}-bs{cfg.batch_size}.csv", index=False)
test.to_csv(f"{cfg.name}-{cfg.kfold}folds-CV-seed{SEED}-bs{cfg.batch_size}.csv", index=False)
else:
#test.to_csv(f"{cfg.name}-{cfg.kfold}folds-CV-{round(results,5)}-seed{SEED}-bs{cfg.batch_size}-mixup.csv", index=False)
test.to_csv(f"{cfg.name}-{cfg.kfold}folds-CV-seed{SEED}-bs{cfg.batch_size}-mixup.csv", index=False)
| 45.322777
| 243
| 0.61556
|
a9c0e3ce940993472b58862516dbaf497869f0d8
| 5,521
|
py
|
Python
|
prediction.py
|
jfmalloy1/UltraMarathon_Prediction
|
8eef7bd2860ce255994d32a0150c09b3b655cee7
|
[
"MIT"
] | null | null | null |
prediction.py
|
jfmalloy1/UltraMarathon_Prediction
|
8eef7bd2860ce255994d32a0150c09b3b655cee7
|
[
"MIT"
] | null | null | null |
prediction.py
|
jfmalloy1/UltraMarathon_Prediction
|
8eef7bd2860ce255994d32a0150c09b3b655cee7
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
from datetime import timedelta
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.utils.data import random_split
from torch import Tensor
from torch.nn import Linear
from torch.nn import Sigmoid
from torch.nn import Module
from torch.optim import SGD
from torch.nn import MSELoss
from torch.nn.init import xavier_uniform_
from tqdm import tqdm
def remove_GSheets_labels(weeks):
""" Removes the annoying " - Sheet1" file extension given through GSheets
Args:
weeks (list): list of all weeks to be included in data cleaning
"""
for week in weeks:
#Targets
targets = os.listdir("Data/Targets_" + week)
for race in targets:
os.rename("Data/Targets_" + week + "/" + race,
"Data/Targets_" + week + "/" + race[:-13] + ".csv")
#Results
results = os.listdir("Data/Results_" + week)
for race in results:
os.rename("Data/Results_" + week + "/" + race,
"Data/Results_" + week + "/" + race[:-13] + ".csv")
def merge(weeks):
""" Merges target & results dataframes into one dataframe based on names
Args:
weeks (list): weeks (well, technically weekends) to merge
"""
### Created one merged dataframe of all targets + results, merged on First & Last name for each race
merged_df = pd.DataFrame()
for week in weeks:
print(week)
for race in tqdm(os.listdir("Data/Targets_" + week)):
if race.endswith(".csv"):
#print(race)
target_df = pd.read_csv("Data/Targets_" + week + "/" + race)
results_df = pd.read_csv("Data/Results_" + week + "/" + race)
merged_df = merged_df.append(
pd.merge(target_df, results_df, on=["First", "Last"]))
merged_df.to_csv("Data/fullRaceData.csv")
def get_seconds(t):
""" Turn the string target & finish times into seconds
Args:
t (str): time, in HH:MM:SS form
Returns:
float: number of seconds denoted by time string
"""
t = [int(x) for x in t.split(":")]
d = timedelta(hours=t[0], minutes=t[1], seconds=t[2])
return d.seconds
def clean_data(weeks):
""" Takes separate target & result data, merges it, and
calculates the seconds elapsed for both target and result data
"""
### Merge targets & results dataframe - should only be run once
merge(weeks)
#Change HH:MM:SS finish times to seconds
df = pd.read_csv("Data/fullRaceData.csv")
df["Target_Seconds"] = df["Target"].apply(get_seconds)
df["Time_Seconds"] = df["Time"].apply(get_seconds)
df.to_csv("Data/fullRaceData.csv")
def split(df, percent_train):
test_size = round(len(df) * percent_train)
train_size = len(df) - test_size
print(test_size, train_size)
return random_split(df, [train_size, test_size])
# model definition
class MLP(Module):
"""
Defines the NN model - in this case, there are 3 hidden layers,
8 inputs (defined by data) in the 1st, 6 inputs in the second,
and 6 in the 3rd (with 1 output). The first two are activated by a sigmoid function,
weighted by a xavier initalization scheme.
"""
# define model elements
def __init__(self, n_inputs):
super(MLP, self).__init__()
# input to first hidden layer
self.hidden1 = Linear(n_inputs, n_inputs)
xavier_uniform_(self.hidden1.weight)
self.act1 = Sigmoid()
# second hidden layer
self.hidden2 = Linear(n_inputs, n_inputs - 2)
xavier_uniform_(self.hidden2.weight)
self.act2 = Sigmoid()
# third hidden layer and output (this is new!)
self.hidden3 = Linear(n_inputs - 2, n_inputs - 3)
xavier_uniform_(self.hidden3.weight)
self.act3 = Sigmoid()
# fourth hidden layer
self.hidden4 = Linear(n_inputs - 3, 1)
xavier_uniform_(self.hidden4.weight)
# forward propagate input
def forward(self, X):
# input to first hidden layer
X = self.hidden1(X)
X = self.act1(X)
# second hidden layer
X = self.hidden2(X)
X = self.act2(X)
#third hidden layer
X = self.hidden3(X)
X = self.act3(X)
# third hidden layer and output
X = self.hidden4(X)
return X
def main():
# #Remove " - Sheet1" from specific weeks - ###NOTE: should only be run once per file
# weeks = ["April09"]
# remove_GSheets_labels(weeks)
# ### Clean data - should only be needed once every time a new week is added
# weeks = ["March05", "March12", "March19", "March26", "April09"]
# clean_data(weeks)
df = pd.read_csv("Data/fullRaceData.csv",
usecols=[
"Rank_x", "Age Rank", "Results", "Finishes", "Age_y",
"GP", "Rank_y", "Target_Seconds", "Time_Seconds"
])
#Make sure data is only numbers
df["Rank_x"] = df["Rank_x"].str[:-1]
df["Age Rank"] = df["Age Rank"].str[:-1]
df = df.apply(pd.to_numeric)
train, test = split(df, 0.33)
#Prepare data loaders
train_dl = DataLoader(train, batch_size=32, shuffle=True)
test_dl = DataLoader(test, batch_size=1024, shuffle=False)
#Define the NN
model = MLP(8)
# # train the model
# for i, (inputs, targets) in enumerate(train_dl):
# print(i)
if __name__ == "__main__":
main()
| 31.016854
| 104
| 0.61094
|
2beab4fbd6c3f30291ee717f53a4dc0085faed8a
| 131,671
|
py
|
Python
|
TEST3D/GUI/0010801_page_skelbdy/log.py
|
usnistgov/OOF3D
|
4fd423a48aea9c5dc207520f02de53ae184be74c
|
[
"X11"
] | 31
|
2015-04-01T15:59:36.000Z
|
2022-03-18T20:21:47.000Z
|
TEST3D/GUI/0010801_page_skelbdy/log.py
|
usnistgov/OOF3D
|
4fd423a48aea9c5dc207520f02de53ae184be74c
|
[
"X11"
] | 3
|
2015-02-06T19:30:24.000Z
|
2017-05-25T14:14:31.000Z
|
TEST3D/GUI/0010801_page_skelbdy/log.py
|
usnistgov/OOF3D
|
4fd423a48aea9c5dc207520f02de53ae184be74c
|
[
"X11"
] | 7
|
2015-01-23T15:19:22.000Z
|
2021-06-09T09:03:59.000Z
|
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
import tests
#Testing the different ways of creating a skeleton boundary
#From Nodes, From Segments, From Faces, From Elements
findWidget('OOF3D').resize(550, 350)
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Microstructure')
checkpoint page installed Microstructure
findWidget('OOF3D:Microstructure Page:Pane').set_position(225)
findWidget('OOF3D:Microstructure Page:Pane').set_position(156)
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint meshable button set
checkpoint microstructure page sensitized
findMenu(findWidget('OOF3D:MenuBar'), 'File:Load:Data').activate()
checkpoint toplevel widget mapped Dialog-Data
findWidget('Dialog-Data').resize(190, 67)
findWidget('Dialog-Data:filename').set_text('TEST_DATA/two_walls.skeleton')
findWidget('Dialog-Data:gtk-ok').clicked()
findWidget('OOF3D:Microstructure Page:Pane').set_position(159)
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint active area status updated
checkpoint microstructure page sensitized
checkpoint meshable button set
checkpoint Field page sensitized
checkpoint Materials page updated
checkpoint mesh page subproblems sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint pinnodes page sensitized
checkpoint boundary page updated
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint Solver page sensitized
checkpoint microstructure page sensitized
findWidget('OOF3D:Microstructure Page:Pane').set_position(225)
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint skeleton selection page groups sensitized
checkpoint microstructure page sensitized
checkpoint meshable button set
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint skeleton selection page groups sensitized
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint skeleton selection page groups sensitized
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint skeleton selection page groups sensitized
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint skeleton selection page groups sensitized
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page groups sensitized
checkpoint Field page sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint mesh page sensitized
checkpoint pinnodes page sensitized
checkpoint boundary page updated
checkpoint skeleton selection page selection sensitized
checkpoint Solver page sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint pinnodes page sensitized
checkpoint pinnodes page sensitized
checkpoint pinnodes page sensitized
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint toplevel widget mapped OOF3D Activity Viewer
checkpoint boundary page updated
findWidget('OOF3D Activity Viewer').resize(400, 300)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.File.Load.Data
findMenu(findWidget('OOF3D:MenuBar'), 'Windows:Graphics:New').activate()
checkpoint Move Node toolbox info updated
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame').size_allocate(gtk.gdk.Rectangle(0, 29, 380, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame').size_allocate(gtk.gdk.Rectangle(0, 29, 380, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint toplevel widget mapped OOF3D Graphics 1
findWidget('OOF3D Graphics 1').resize(1000, 800)
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame').size_allocate(gtk.gdk.Rectangle(0, 29, 380, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Windows.Graphics.New
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D').resize(550, 350)
findWidget('OOF3D Graphics 1:Pane0:LayerScroll').get_vadjustment().set_value( 5.0000000000000e+00)
findWidget('OOF3D Graphics 1:Pane0:LayerScroll').get_vadjustment().set_value( 1.0000000000000e+01)
findWidget('OOF3D Graphics 1:Pane0:LayerScroll').get_vadjustment().set_value( 1.5000000000000e+01)
findWidget('OOF3D Graphics 1:Pane0:LayerScroll').get_vadjustment().set_value( 2.0000000000000e+01)
findWidget('OOF3D Graphics 1:Pane0:LayerScroll').get_vadjustment().set_value( 2.5000000000000e+01)
findWidget('OOF3D Graphics 1:Pane0:LayerScroll').get_vadjustment().set_value( 3.0000000000000e+01)
findWidget('OOF3D Graphics 1:Pane0:LayerScroll').get_vadjustment().set_value( 3.5000000000000e+01)
findWidget('OOF3D Graphics 1:Pane0:LayerScroll').get_vadjustment().set_value( 4.0000000000000e+01)
findWidget('OOF3D Graphics 1:Pane0:LayerScroll').get_vadjustment().set_value( 4.5000000000000e+01)
findWidget('OOF3D Graphics 1:Pane0:LayerScroll').get_vadjustment().set_value( 5.0000000000000e+01)
findWidget('OOF3D Graphics 1:Pane0:LayerScroll').get_vadjustment().set_value( 5.5000000000000e+01)
findWidget('OOF3D Graphics 1:Pane0:LayerScroll').get_vadjustment().set_value( 6.0000000000000e+01)
findWidget('OOF3D Graphics 1:Pane0:LayerScroll').get_vadjustment().set_value( 6.5000000000000e+01)
findWidget('OOF3D Graphics 1:Pane0:LayerScroll').get_vadjustment().set_value( 7.0000000000000e+01)
findWidget('OOF3D Graphics 1:Pane0:LayerScroll:LayerList').get_selection().select_path((13,))
findWidget('OOF3D Graphics 1:Pane0:LayerScroll').get_vadjustment().set_value( 7.5000000000000e+01)
checkpoint OOF.Graphics_1.Layer.Select
findWidget('OOF3D Graphics 1:Pane0:LayerScroll').get_vadjustment().set_value( 7.2000000000000e+01)
tree=findWidget('OOF3D Graphics 1:Pane0:LayerScroll:LayerList')
column = tree.get_column(2)
tree.row_activated((13,), column)
checkpoint toplevel widget mapped Dialog-Edit Graphics Layer
findWidget('Dialog-Edit Graphics Layer').resize(291, 191)
setComboBox(findWidget('Dialog-Edit Graphics Layer:how:Bitmap:filter:Chooser'), 'Not')
findWidget('Dialog-Edit Graphics Layer').resize(336, 221)
setComboBox(findWidget('Dialog-Edit Graphics Layer:how:Bitmap:filter:Not:a:Chooser'), 'Group')
findWidget('Dialog-Edit Graphics Layer').resize(368, 249)
setComboBox(findWidget('Dialog-Edit Graphics Layer:how:Bitmap:filter:Not:a:Group:group'), '#000000')
findWidget('Dialog-Edit Graphics Layer:gtk-ok').clicked()
checkpoint Graphics_1 Voxel Info updated
checkpoint Graphics_1 Pin Nodes updated
checkpoint OOF.Graphics_1.Layer.Edit
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2:tumble').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.9300000000000e+02,y= 3.4700000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.9300000000000e+02,y= 3.4600000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.9400000000000e+02,y= 3.4500000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.9500000000000e+02,y= 3.4000000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.9500000000000e+02,y= 3.3900000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.9500000000000e+02,y= 3.3500000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.9500000000000e+02,y= 3.3400000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.9700000000000e+02,y= 3.3200000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.9700000000000e+02,y= 3.3100000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.9700000000000e+02,y= 3.3000000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.9800000000000e+02,y= 3.2700000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.0100000000000e+02,y= 3.2300000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.0200000000000e+02,y= 3.1900000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.0400000000000e+02,y= 3.1500000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.0500000000000e+02,y= 3.1500000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.0700000000000e+02,y= 3.1100000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.0900000000000e+02,y= 3.0900000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.1100000000000e+02,y= 3.0500000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.1300000000000e+02,y= 3.0000000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.1400000000000e+02,y= 3.0000000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.1500000000000e+02,y= 2.9700000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.1500000000000e+02,y= 2.9400000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.1600000000000e+02,y= 2.9100000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.1600000000000e+02,y= 2.8900000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.1600000000000e+02,y= 2.8400000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.1600000000000e+02,y= 2.8300000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.1700000000000e+02,y= 2.8300000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.1700000000000e+02,y= 2.8100000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.1700000000000e+02,y= 2.8000000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.1700000000000e+02,y= 2.7900000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.1700000000000e+02,y= 2.7900000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame').size_allocate(gtk.gdk.Rectangle(0, 29, 380, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
setComboBox(findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBChooser'), 'Skeleton Selection')
findWidget('OOF3D Graphics 1:Pane0:Pane2:select').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame').size_allocate(gtk.gdk.Rectangle(0, 29, 380, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.0300000000000e+02,y= 3.8000000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.0300000000000e+02,y= 3.8000000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Messages 1').resize(543, 200)
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.Graphics_1.Toolbox.Select_Element.Single_Element
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Selection')
checkpoint page installed Skeleton Selection
findWidget('OOF3D:Skeleton Selection Page:Pane').set_position(227)
findWidget('OOF3D:Skeleton Selection Page:Mode:Face').clicked()
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page selection sensitized
findWidget('OOF3D').resize(550, 376)
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
setComboBox(findWidget('OOF3D:Skeleton Selection Page:Pane:Selection:FaceAction:Chooser'), 'Select from Selected Elements')
findWidget('OOF3D:Skeleton Selection Page:Pane:Selection:FaceHistory:OK').clicked()
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.FaceSelection.Select_from_Selected_Elements
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2:tumble').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.6400000000000e+02,y= 1.6900000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.4500000000000e+02,y= 1.7200000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.4500000000000e+02,y= 1.7200000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Boundaries')
checkpoint page installed Skeleton Boundaries
findWidget('OOF3D:Skeleton Boundaries Page:Pane').set_position(300)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 125)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Face boundary from faces')
findWidget('Dialog-New Boundary').resize(368, 153)
assert tests.BoundaryNewDialogCheck0('Face boundary from faces','Outward','Inward',)
assert tests.BoundaryNewDialogCheck1('Face boundary from faces','<selection>','Outward')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.4400000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Face boundary from faces:direction'), 'Inward')
assert tests.BoundaryNewDialogCheck0('Face boundary from faces','Outward','Inward',)
assert tests.BoundaryNewDialogCheck1('Face boundary from faces','<selection>','Inward')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.4400000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.6800000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Point boundary from faces')
assert tests.BoundaryNewDialogCheck0('Point boundary from faces')
assert tests.BoundaryNewDialogCheck1('Point boundary from faces','<selection>')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.6800000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 4.7100000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.1000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 4.7100000000000e+02)
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Selection')
checkpoint page installed Skeleton Selection
findWidget('OOF3D:Skeleton Selection Page:Pane:Selection:Clear').clicked()
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.FaceSelection.Clear
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Boundaries')
checkpoint page installed Skeleton Boundaries
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll:BoundaryList').get_selection().unselect_all()
checkpoint boundary page updated
checkpoint boundary page updated
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Skeleton Selection:Element:Clear').clicked()
checkpoint OOF.Graphics_1.Toolbox.Select_Element.Clear
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.1100000000000e+02,y= 2.5700000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 1.2600000000000e+02,y= 2.7900000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.2600000000000e+02,y= 2.7900000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
setComboBox(findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Skeleton Selection:Element:Method:Chooser'), 'ByDominantPixel')
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2:select').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 8.9000000000000e+01,y= 2.3300000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 8.9000000000000e+01,y= 2.3300000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Messages 1').resize(553, 200)
checkpoint OOF.Graphics_1.Toolbox.Select_Element.ByDominantPixel
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Selection')
checkpoint page installed Skeleton Selection
findWidget('OOF3D:Skeleton Selection Page:Pane:Selection:FaceHistory:OK').clicked()
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.FaceSelection.Select_from_Selected_Elements
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Boundaries')
checkpoint page installed Skeleton Boundaries
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 125)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Face boundary from faces')
assert tests.BoundaryNewDialogCheck0('Face boundary from faces')
assert tests.BoundaryNewDialogCheck1('Face boundary from faces','<selection>')
findWidget('Dialog-New Boundary').resize(368, 153)
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.9200000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.9200000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Face boundary from faces:direction'), 'Outward')
assert tests.BoundaryNewDialogCheck0('Face boundary from faces','Outward','Inward',)
assert tests.BoundaryNewDialogCheck1('Face boundary from faces','<selection>','Outward')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.9200000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.1600000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll:BoundaryList').get_selection().unselect_all()
checkpoint boundary page updated
checkpoint boundary page updated
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Selection')
checkpoint page installed Skeleton Selection
findWidget('OOF3D:Skeleton Selection Page:Pane:Selection:Clear').clicked()
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint OOF.FaceSelection.Clear
checkpoint skeleton selection page updated
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Boundaries')
checkpoint page installed Skeleton Boundaries
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(404, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Face boundary from elements')
assert tests.BoundaryNewDialogCheck0('Face boundary from elements','Outward','Inward',)
assert tests.BoundaryNewDialogCheck1('Face boundary from elements','<selection>','Outward')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Face boundary from elements:direction'), 'Inward')
assert tests.BoundaryNewDialogCheck0('Face boundary from elements','Outward','Inward',)
assert tests.BoundaryNewDialogCheck1('Face boundary from elements','<selection>','Inward')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.6400000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll:BoundaryList').get_selection().unselect_all()
checkpoint boundary page updated
checkpoint boundary page updated
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Selection')
checkpoint page installed Skeleton Selection
findWidget('OOF3D:Skeleton Selection Page:Mode:Segment').clicked()
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
setComboBox(findWidget('OOF3D:Skeleton Selection Page:Pane:Selection:SegmentAction:Chooser'), 'Select from Selected Elements')
findWidget('OOF3D:Skeleton Selection Page:Pane:Selection:SegmentHistory:OK').clicked()
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.SegmentSelection.Select_from_Selected_Elements
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Skeleton Selection:Element:Clear').clicked()
checkpoint OOF.Graphics_1.Toolbox.Select_Element.Clear
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Boundaries')
checkpoint page installed Skeleton Boundaries
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(404, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Point boundary from segments')
assert tests.BoundaryNewDialogCheck0('Point boundary from segments')
assert tests.BoundaryNewDialogCheck1('Point boundary from segments','<selection>')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 5.9100000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.0000000000000e+00)
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 5.9100000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 125)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Edge boundary from segments')
assert tests.BoundaryNewDialogCheck2('Edge boundary from segments','No edge sequence',)
assert tests.BoundaryNewDialogCheck3('Edge boundary from segments','<selection>','No edge sequence')
findWidget('Dialog-New Boundary').resize(368, 153)
findWidget('Dialog-New Boundary:gtk-cancel').clicked()
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll:BoundaryList').get_selection().unselect_all()
checkpoint boundary page updated
checkpoint boundary page updated
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Skeleton Selection:Element:Undo').clicked()
checkpoint OOF.Graphics_1.Toolbox.Select_Element.Undo
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Selection')
checkpoint page installed Skeleton Selection
findWidget('OOF3D:Skeleton Selection Page:Pane:Selection:Clear').clicked()
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.SegmentSelection.Clear
findWidget('OOF3D:Skeleton Selection Page:Mode:Node').clicked()
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
setComboBox(findWidget('OOF3D:Skeleton Selection Page:Pane:Selection:NodeAction:Chooser'), 'Select from Selected Elements')
findWidget('OOF3D:Skeleton Selection Page:Pane:Selection:NodeHistory:OK').clicked()
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page selection sensitized
checkpoint OOF.NodeSelection.Select_from_Selected_Elements
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Skeleton Selection:Element:Clear').clicked()
checkpoint OOF.Graphics_1.Toolbox.Select_Element.Clear
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Boundaries')
checkpoint page installed Skeleton Boundaries
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 125)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Point boundary from nodes')
assert tests.BoundaryNewDialogCheck0('Point boundary from nodes')
assert tests.BoundaryNewDialogCheck1('Point boundary from nodes','<selection>')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 6.1500000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.0000000000000e+00)
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 6.1500000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 125)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Edge boundary from nodes')
assert tests.BoundaryNewDialogCheck2('Edge boundary from nodes','No edge sequence',)
assert tests.BoundaryNewDialogCheck3('Edge boundary from nodes','<selection>','No edge sequence')
findWidget('Dialog-New Boundary').resize(368, 153)
findWidget('Dialog-New Boundary:gtk-cancel').clicked()
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll:BoundaryList').get_selection().unselect_all()
checkpoint boundary page updated
checkpoint boundary page updated
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Selection')
checkpoint page installed Skeleton Selection
findWidget('OOF3D:Skeleton Selection Page:Pane:Selection:Clear').clicked()
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.NodeSelection.Clear
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Skeleton Selection:Select:Segment').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2:tumble').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.0000000000000e+02,y= 1.4000000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.9500000000000e+02,y= 1.3800000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.9500000000000e+02,y= 1.3800000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2:select').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.1900000000000e+02,y= 5.3200000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.1900000000000e+02,y= 5.3200000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint OOF.Graphics_1.Toolbox.Select_Segment.Single_Segment
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.2700000000000e+02,y= 4.0900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.2700000000000e+02,y= 4.0900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint OOF.Graphics_1.Toolbox.Select_Segment.Single_Segment
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8100000000000e+02,y= 3.8700000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.8100000000000e+02,y= 3.8700000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint OOF.Graphics_1.Toolbox.Select_Segment.Single_Segment
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.9400000000000e+02,y= 3.2100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.9400000000000e+02,y= 3.2100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint OOF.Graphics_1.Toolbox.Select_Segment.Single_Segment
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.1800000000000e+02,y= 2.8900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.1800000000000e+02,y= 2.8900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint OOF.Graphics_1.Toolbox.Select_Segment.Single_Segment
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.0500000000000e+02,y= 3.3600000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.0500000000000e+02,y= 3.3600000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint OOF.Graphics_1.Toolbox.Select_Segment.Single_Segment
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.5300000000000e+02,y= 3.7400000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.5300000000000e+02,y= 3.7400000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint OOF.Graphics_1.Toolbox.Select_Segment.Single_Segment
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Boundaries')
checkpoint page installed Skeleton Boundaries
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 125)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Point boundary from segments')
assert tests.BoundaryNewDialogCheck0('Point boundary from segments')
assert tests.BoundaryNewDialogCheck1('Point boundary from segments','<selection>')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.0000000000000e+00)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 6.3900000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 125)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Edge boundary from segments')
assert tests.BoundaryNewDialogCheck0('Edge boundary from segments','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z')
assert tests.BoundaryNewDialogCheck1('Edge boundary from segments','<selection>','-X to +X')
findWidget('Dialog-New Boundary').resize(368, 153)
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.0000000000000e+00)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 6.6300000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 5.8000000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 5.8000000000000e+02)
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2:tumble').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8500000000000e+02,y= 1.5400000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.8400000000000e+02,y= 1.5400000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.5600000000000e+02,y= 1.5400000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.5500000000000e+02,y= 1.5400000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from segments:direction'), '+X to -X')
assert tests.BoundaryNewDialogCheck0('Edge boundary from segments','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from segments','<selection>','+X to -X')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 5.8000000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 6.0400000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from segments:direction'), '+Y to -Y')
assert tests.BoundaryNewDialogCheck0('Edge boundary from segments','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from segments','<selection>','+Y to -Y')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 6.0400000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 6.2800000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from segments:direction'), '-Y to +Y')
assert tests.BoundaryNewDialogCheck0('Edge boundary from segments','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from segments','<selection>','-Y to +Y')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 6.2800000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 6.5200000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from segments:direction'), '+Z to -Z')
assert tests.BoundaryNewDialogCheck0('Edge boundary from segments','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from segments','<selection>','+Z to -Z')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 6.5200000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 6.7600000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from segments:direction'), '-Z to +Z')
assert tests.BoundaryNewDialogCheck0('Edge boundary from segments','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from segments','<selection>','-Z to +Z')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 6.7600000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.0000000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll:BoundaryList').get_selection().unselect_all()
checkpoint boundary page updated
checkpoint boundary page updated
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Selection')
checkpoint page installed Skeleton Selection
setComboBox(findWidget('OOF3D:Skeleton Selection Page:Pane:Selection:NodeAction:Chooser'), 'Select from Selected Segments')
findWidget('OOF3D:Skeleton Selection Page:Pane:Selection:NodeHistory:OK').clicked()
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint OOF.NodeSelection.Select_from_Selected_Segments
checkpoint skeleton selection page updated
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Skeleton Selection:Segment:Clear').clicked()
checkpoint OOF.Graphics_1.Toolbox.Select_Segment.Clear
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.0700000000000e+02,y= 2.6400000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.1400000000000e+02,y= 2.6400000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.1600000000000e+02,y= 2.6400000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Boundaries')
checkpoint page installed Skeleton Boundaries
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Point boundary from nodes')
assert tests.BoundaryNewDialogCheck0('Point boundary from nodes')
assert tests.BoundaryNewDialogCheck1('Point boundary from nodes','<selection>')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.3500000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.0000000000000e+00)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 8.0700000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 125)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Edge boundary from nodes')
assert tests.BoundaryNewDialogCheck2('Edge boundary from nodes','No edge sequence',)
assert tests.BoundaryNewDialogCheck3('Edge boundary from nodes','<selection>','No edge sequence')
findWidget('Dialog-New Boundary').resize(368, 153)
findWidget('Dialog-New Boundary:gtk-cancel').clicked()
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll:BoundaryList').get_selection().unselect_all()
checkpoint boundary page updated
checkpoint boundary page updated
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Skeleton Selection:Segment:Clear').clicked()
checkpoint OOF.Graphics_1.Toolbox.Select_Segment.Clear
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Selection')
checkpoint page installed Skeleton Selection
findWidget('OOF3D:Skeleton Selection Page:Pane:Selection:Clear').clicked()
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint OOF.NodeSelection.Clear
checkpoint skeleton selection page updated
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.0100000000000e+02,y= 2.9400000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.2600000000000e+02,y= 3.0000000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.2600000000000e+02,y= 3.0000000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D Graphics 1:Pane0:Pane2:select').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Skeleton Selection:Select:Node').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.6200000000000e+02,y= 5.2200000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.6200000000000e+02,y= 5.2200000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.Graphics_1.Toolbox.Select_Node.Single_Node
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.4100000000000e+02,y= 4.0500000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.4100000000000e+02,y= 4.0500000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.Graphics_1.Toolbox.Select_Node.Single_Node
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.1800000000000e+02,y= 3.0000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.1800000000000e+02,y= 3.0000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.Graphics_1.Toolbox.Select_Node.Single_Node
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.9100000000000e+02,y= 2.9300000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.9100000000000e+02,y= 2.9300000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.Graphics_1.Toolbox.Select_Node.Single_Node
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton Boundaries')
checkpoint page installed Skeleton Boundaries
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 125)
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.0000000000000e+00)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 8.3100000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 125)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Edge boundary from nodes')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','-X to +X')
findWidget('Dialog-New Boundary').resize(368, 153)
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.0000000000000e+00)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.2400000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.2400000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from nodes:direction'), '+X to -X')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','+X to -X')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.2400000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.4800000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from nodes:direction'), '+Y to -Y')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','+Y to -Y')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.4800000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.7200000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from nodes:direction'), '-Y to +Y')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','-Y to +Y')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.7200000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.9600000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from nodes:direction'), '+Z to -Z')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','+Z to -Z')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.9600000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 8.2000000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from nodes:direction'), '-Z to +Z')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','-Z to +Z')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 8.2000000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 8.4400000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll:BoundaryList').get_selection().unselect_all()
checkpoint boundary page updated
checkpoint boundary page updated
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2:tumble').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.1000000000000e+02,y= 1.9100000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.1800000000000e+02,y= 1.9300000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.1800000000000e+02,y= 1.9300000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2:select').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.7500000000000e+02,y= 2.0300000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.7500000000000e+02,y= 2.0300000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.Graphics_1.Toolbox.Select_Node.Single_Node
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Point boundary from nodes')
assert tests.BoundaryNewDialogCheck0('Point boundary from nodes')
assert tests.BoundaryNewDialogCheck1('Point boundary from nodes','<selection>')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 9.2700000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.0000000000000e+00)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 9.9900000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 125)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Edge boundary from nodes')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','-Z to +Z')
findWidget('Dialog-New Boundary').resize(368, 153)
findWidget('Dialog-New Boundary:gtk-cancel').clicked()
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll:BoundaryList').get_selection().unselect_all()
checkpoint boundary page updated
checkpoint boundary page updated
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.0200000000000e+02,y= 3.0000000000000e+02,button=1,state=20,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.0200000000000e+02,y= 3.0000000000000e+02,button=1,state=276,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.Graphics_1.Toolbox.Select_Node.Single_Node
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 125)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Edge boundary from nodes')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','-Z to +Z')
findWidget('Dialog-New Boundary').resize(368, 153)
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 8.6800000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 8.6800000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from nodes:direction'), '+Z to -Z')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','+Z to -Z')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 8.6800000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 8.9200000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from nodes:direction'), '-Y to +Y')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','-Y to +Y')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 8.9200000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 9.1600000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from nodes:direction'), '+Y to -Y')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','+Y to -Y')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 9.1600000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 9.4000000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from nodes:direction'), '+X to -X')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','+X to -X')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 9.4000000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 9.6400000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from nodes:direction'), '-X to +X')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','-X to +X')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 9.6400000000000e+02)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 9.8800000000000e+02)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll:BoundaryList').get_selection().unselect_all()
checkpoint boundary page updated
checkpoint boundary page updated
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2:tumble').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.1500000000000e+02,y= 2.2600000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.8200000000000e+02,y= 2.2400000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.8200000000000e+02,y= 2.2400000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.5100000000000e+02,y= 1.6000000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.4100000000000e+02,y= 1.7100000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.4100000000000e+02,y= 1.7100000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2:select').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.3500000000000e+02,y= 2.4500000000000e+02,button=1,state=20,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.3500000000000e+02,y= 2.4500000000000e+02,button=1,state=276,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.Graphics_1.Toolbox.Select_Node.Single_Node
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.6100000000000e+02,y= 1.5500000000000e+02,button=1,state=20,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.6100000000000e+02,y= 1.5500000000000e+02,button=1,state=276,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.Graphics_1.Toolbox.Select_Node.Single_Node
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Point boundary from nodes')
assert tests.BoundaryNewDialogCheck0('Point boundary from nodes')
assert tests.BoundaryNewDialogCheck1('Point boundary from nodes','<selection>')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.0950000000000e+03)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.0000000000000e+00)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.1670000000000e+03)
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2:tumble').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.3800000000000e+02,y= 2.3900000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.4400000000000e+02,y= 2.1900000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.4400000000000e+02,y= 2.1900000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 125)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Edge boundary from nodes')
assert tests.BoundaryNewDialogCheck2('Edge boundary from nodes','No edge sequence',)
assert tests.BoundaryNewDialogCheck3('Edge boundary from nodes','<selection>','No edge sequence')
findWidget('Dialog-New Boundary').resize(368, 153)
findWidget('Dialog-New Boundary:gtk-cancel').clicked()
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll:BoundaryList').get_selection().unselect_all()
checkpoint boundary page updated
checkpoint boundary page updated
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.9700000000000e+02,y= 3.6600000000000e+02,button=1,state=20,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.9700000000000e+02,y= 3.6600000000000e+02,button=1,state=276,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2:select').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.9600000000000e+02,y= 3.6600000000000e+02,button=1,state=20,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.9600000000000e+02,y= 3.6600000000000e+02,button=1,state=276,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.Graphics_1.Toolbox.Select_Node.Single_Node
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 125)
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.0000000000000e+00)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.1910000000000e+03)
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2:tumble').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.3700000000000e+02,y= 2.0800000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.3800000000000e+02,y= 2.0900000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.1000000000000e+02,y= 2.2900000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 125)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Edge boundary from nodes')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','-X to +X')
findWidget('Dialog-New Boundary').resize(368, 153)
findWidget('Dialog-New Boundary:gtk-cancel').clicked()
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.8600000000000e+02,y= 2.3500000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 1.8700000000000e+02,y= 2.3500000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.1200000000000e+02,y= 2.3400000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.1200000000000e+02,y= 2.3400000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll:BoundaryList').get_selection().unselect_all()
checkpoint boundary page updated
checkpoint boundary page updated
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2:select').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.3300000000000e+02,y= 2.3600000000000e+02,button=1,state=20,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.3300000000000e+02,y= 2.3600000000000e+02,button=1,state=276,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.Graphics_1.Toolbox.Select_Node.Single_Node
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.7200000000000e+02,y= 1.7000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.7200000000000e+02,y= 1.7000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.Graphics_1.Toolbox.Select_Node.Single_Node
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 4.7600000000000e+02,y= 1.9300000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 4.7600000000000e+02,y= 1.9300000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.Graphics_1.Toolbox.Select_Node.Single_Node
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 4.8400000000000e+02,y= 1.3600000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 4.8400000000000e+02,y= 1.3600000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.Graphics_1.Toolbox.Select_Node.Single_Node
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 125)
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.0000000000000e+00)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.2150000000000e+03)
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane2:tumble').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.5200000000000e+02,y= 1.6000000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.5200000000000e+02,y= 1.5900000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.4900000000000e+02,y= 1.4500000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.4900000000000e+02,y= 1.4500000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.2700000000000e+02,y= 1.2600000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.2800000000000e+02,y= 1.2700000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.4400000000000e+02,y= 1.3600000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.5700000000000e+02,y= 1.5800000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.4900000000000e+02,y= 1.5400000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.4900000000000e+02,y= 1.5400000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 125)
setComboBox(findWidget('Dialog-New Boundary:constructor:Chooser'), 'Edge boundary from nodes')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','-X to +X')
findWidget('Dialog-New Boundary').resize(368, 153)
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 7.0000000000000e+00)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.2390000000000e+03)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.0120000000000e+03)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.0120000000000e+03)
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.4400000000000e+02,y= 2.6600000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.4400000000000e+02,y= 2.6700000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.0800000000000e+02,y= 3.4200000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 671)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.0800000000000e+02,y= 3.4200000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from nodes:direction'), '+X to -X')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','+X to -X')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.0120000000000e+03)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.0360000000000e+03)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from nodes:direction'), '+Y to -Y')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','+Y to -Y')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.0360000000000e+03)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.0600000000000e+03)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from nodes:direction'), '-Y to +Y')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','-Y to +Y')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.0600000000000e+03)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.0840000000000e+03)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from nodes:direction'), '+Z to -Z')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','+Z to -Z')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.0840000000000e+03)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.1080000000000e+03)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:New').clicked()
checkpoint toplevel widget mapped Dialog-New Boundary
findWidget('Dialog-New Boundary').resize(368, 153)
setComboBox(findWidget('Dialog-New Boundary:constructor:Edge boundary from nodes:direction'), '-Z to +Z')
assert tests.BoundaryNewDialogCheck0('Edge boundary from nodes','-X to +X','+X to -X','-Y to +Y','+Y to -Y','-Z to +Z','+Z to -Z',)
assert tests.BoundaryNewDialogCheck1('Edge boundary from nodes','<selection>','-Z to +Z')
findWidget('Dialog-New Boundary:gtk-ok').clicked()
checkpoint boundary page updated
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.1080000000000e+03)
checkpoint boundary page updated
checkpoint boundary page updated
checkpoint OOF.Skeleton.Boundary.Construct
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 2.4000000000000e+01)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll').get_vadjustment().set_value( 1.1320000000000e+03)
findWidget('OOF3D:Skeleton Boundaries Page:Pane:Boundaries:BoundaryListScroll:BoundaryList').get_selection().unselect_all()
checkpoint boundary page updated
checkpoint boundary page updated
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Skeleton Selection:Node:Clear').clicked()
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint OOF.Graphics_1.Toolbox.Select_Node.Clear
findMenu(findWidget('OOF3D:MenuBar'), 'File:Save:Python_Log').activate()
checkpoint toplevel widget mapped Dialog-Python_Log
findWidget('Dialog-Python_Log').resize(190, 95)
findWidget('Dialog-Python_Log:filename').set_text('boundary.log')
findWidget('Dialog-Python_Log:gtk-ok').clicked()
checkpoint OOF.File.Save.Python_Log
assert tests.filediff('boundary.log')
widget_0=findWidget('OOF3D')
handled_0=widget_0.event(event(gtk.gdk.DELETE,window=widget_0.window))
checkpoint OOF.Graphics_1.File.Close
| 71.405098
| 160
| 0.814561
|
1a8510ee599a6c29fb020f56cc93099a93989c2e
| 628
|
py
|
Python
|
appr/api/gevent_app.py
|
sergeyberezansky/appr
|
03168addf05c3efd779dad5168fb0a80d0512100
|
[
"Apache-2.0"
] | 31
|
2017-07-05T07:25:31.000Z
|
2021-01-18T22:21:57.000Z
|
appr/api/gevent_app.py
|
sergeyberezansky/appr
|
03168addf05c3efd779dad5168fb0a80d0512100
|
[
"Apache-2.0"
] | 48
|
2017-06-27T15:48:29.000Z
|
2021-01-26T21:02:27.000Z
|
appr/api/gevent_app.py
|
sergeyberezansky/appr
|
03168addf05c3efd779dad5168fb0a80d0512100
|
[
"Apache-2.0"
] | 17
|
2017-07-05T07:25:38.000Z
|
2021-01-20T14:52:29.000Z
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import os
from gevent.wsgi import WSGIServer
from appr.api.app import create_app
class GeventApp(object):
def __init__(self, options=None):
self.args_options = options or {}
os.environ['APPR_DB_CLASS'] = self.args_options.db_class
print("Listening %s:%s" % (self.args_options.bind, self.args_options.port))
self.http_server = WSGIServer((self.args_options.bind, self.args_options.port),
create_app())
def run(self):
self.http_server.serve_forever()
| 29.904762
| 87
| 0.678344
|
c207a5e135bb29ed6e8c8fd7c871ce0be75bcc50
| 6,580
|
py
|
Python
|
paleomix/tools/bam_stats/common.py
|
MikkelSchubert/paleomix
|
5c6414060088ba178ff1c400bdbd45d2f6b1aded
|
[
"MIT"
] | 33
|
2015-04-08T10:44:19.000Z
|
2021-11-01T14:23:40.000Z
|
paleomix/tools/bam_stats/common.py
|
MikkelSchubert/paleomix
|
5c6414060088ba178ff1c400bdbd45d2f6b1aded
|
[
"MIT"
] | 41
|
2015-07-17T12:46:16.000Z
|
2021-10-13T06:47:25.000Z
|
paleomix/tools/bam_stats/common.py
|
MikkelSchubert/paleomix
|
5c6414060088ba178ff1c400bdbd45d2f6b1aded
|
[
"MIT"
] | 19
|
2015-01-23T07:09:39.000Z
|
2021-04-06T09:30:21.000Z
|
#!/usr/bin/python3
#
# Copyright (c) 2012 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import collections
import logging
import os
import paleomix.common.argparse as argparse
import pysam
from paleomix.common.fileutils import swap_ext
from paleomix.common.formats.bed import read_bed_file, sort_bed_by_bamfile
class BAMStatsError(RuntimeError):
pass
def collect_readgroups(args, handle):
readgroups = {None: {"SM": "<NA>", "LB": "<NA>"}}
if args.ignore_readgroups:
return readgroups
for readgroup in handle.header.get("RG", ()):
key_id = readgroup["ID"]
sample = readgroup["SM"]
library = readgroup["LB"]
readgroups[key_id] = {"SM": sample, "LB": library}
return readgroups
def collect_references(args, handle):
if args.regions:
lengths = collections.defaultdict(int)
for region in args.regions:
lengths[region.name] += region.end - region.start
lengths = dict(lengths)
elif handle.nreferences <= args.max_contigs:
lengths = dict(zip(handle.references, handle.lengths))
else:
lengths = {"<Genome>": sum(handle.lengths)}
return lengths
def collect_bed_regions(filename):
regions = []
name_cache = {}
for record in read_bed_file(filename):
if not record.name:
record.name = "%s*" % (record.contig,)
record.contig = name_cache.setdefault(record.contig, record.contig)
record.name = name_cache.setdefault(record.name, record.name)
regions.append(record)
return regions
def parse_arguments(argv, ext):
prog = "paleomix %s" % (ext.strip("."),)
usage = "%s [options] sorted.bam [out%s]" % (prog, ext)
parser = argparse.ArgumentParser(prog=prog, usage=usage)
parser.add_argument(
"infile",
metavar="BAM",
help="Filename of a sorted BAM file. If set to '-' "
"the file is read from STDIN.",
)
parser.add_argument(
"outfile",
metavar="OUTPUT",
nargs="?",
help="Filename of output table; defaults to name of "
"the input BAM with a '%s' extension. If "
"set to '-' the table is printed to STDOUT." % (ext,),
)
parser.add_argument(
"--target-name",
default=None,
metavar="NAME",
help="Name used for 'Target' column; defaults to the "
"filename of the BAM file.",
)
parser.add_argument(
"--regions-file",
default=None,
dest="regions_fpath",
help="BED file containing regions of interest; %s "
"is calculated only for these grouping by the "
"name used in the BED file, or the contig name "
"if no name has been specified for a record." % (ext.strip("."),),
)
parser.add_argument(
"--max-contigs",
default=100,
type=int,
help="The maximum number of contigs allowed in a BAM "
"file. If this number is exceeded, the entire "
"set of contigs is aggregated into one pseudo-"
"contig named '<Genome>'. This is done to "
"limit table sizes",
)
parser.add_argument(
"--ignore-readgroups",
default=False,
action="store_true",
help="Ignore readgroup information in reads, and only "
"provide aggregated statistics; this is required "
"if readgroup information is missing or partial",
)
parser.add_argument(
"--overwrite-output",
default=False,
action="store_true",
help="Overwrite output file if it it exists; by "
"default, the script will terminate if the file "
"already exists.",
)
args = parser.parse_args(argv)
if not args.outfile:
args.outfile = swap_ext(args.infile, ext)
if args.ignore_readgroups:
args.get_readgroup_func = _get_readgroup_ignored
else:
args.get_readgroup_func = _get_readgroup
if not args.target_name:
if args.infile == "-":
args.target_name = "<STDIN>"
else:
args.target_name = os.path.basename(args.infile)
if os.path.exists(args.outfile) and not args.overwrite_output:
parser.error(
"Destination filename already exists (%r); use option "
"--overwrite-output to allow overwriting of this file." % (args.outfile,)
)
return args
def main_wrapper(process_func, argv, ext):
log = logging.getLogger(__name__)
args = parse_arguments(argv, ext)
args.regions = []
if args.regions_fpath:
try:
args.regions = collect_bed_regions(args.regions_fpath)
except ValueError as error:
log.error("Failed to parse BED file %r: %s", args.regions_fpath, error)
return 1
log.info("Opening %r", args.infile)
with pysam.AlignmentFile(args.infile) as handle:
sort_order = handle.header.get("HD", {}).get("SO")
if sort_order is None:
log.warning("BAM file %r is not marked as sorted!", args.infile)
elif sort_order != "coordinate":
log.error(
"BAM file %r is %s-sorted, but coordinate-sorting is required",
args.infile,
sort_order,
)
return 1
sort_bed_by_bamfile(handle, args.regions)
return process_func(handle, args)
def _get_readgroup(record):
try:
return record.get_tag("RG")
except KeyError:
return None
def _get_readgroup_ignored(_):
return None
| 32.254902
| 85
| 0.641185
|
98df0636e57380866d9e79323e37425519c7e325
| 1,936
|
py
|
Python
|
sector/types/blockchain_format/reward_chain_block.py
|
bithadder/sector-blockchain
|
ce63d162cd8c0c7c85ae64d6d6e8bede0a8675e6
|
[
"Apache-2.0"
] | 13
|
2021-07-06T12:45:25.000Z
|
2021-09-10T22:24:52.000Z
|
sector/types/blockchain_format/reward_chain_block.py
|
bithadder/sector-blockchain
|
ce63d162cd8c0c7c85ae64d6d6e8bede0a8675e6
|
[
"Apache-2.0"
] | null | null | null |
sector/types/blockchain_format/reward_chain_block.py
|
bithadder/sector-blockchain
|
ce63d162cd8c0c7c85ae64d6d6e8bede0a8675e6
|
[
"Apache-2.0"
] | 6
|
2021-07-06T01:14:53.000Z
|
2021-07-18T05:33:02.000Z
|
from dataclasses import dataclass
from typing import Optional
from blspy import G2Element
from sector.types.blockchain_format.proof_of_space import ProofOfSpace
from sector.types.blockchain_format.sized_bytes import bytes32
from sector.types.blockchain_format.vdf import VDFInfo
from sector.util.ints import uint8, uint32, uint128
from sector.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class RewardChainBlockUnfinished(Streamable):
total_iters: uint128
signage_point_index: uint8
pos_ss_cc_challenge_hash: bytes32
proof_of_space: ProofOfSpace
challenge_chain_sp_vdf: Optional[VDFInfo] # Not present for first sp in slot
challenge_chain_sp_signature: G2Element
reward_chain_sp_vdf: Optional[VDFInfo] # Not present for first sp in slot
reward_chain_sp_signature: G2Element
@dataclass(frozen=True)
@streamable
class RewardChainBlock(Streamable):
weight: uint128
height: uint32
total_iters: uint128
signage_point_index: uint8
pos_ss_cc_challenge_hash: bytes32
proof_of_space: ProofOfSpace
challenge_chain_sp_vdf: Optional[VDFInfo] # Not present for first sp in slot
challenge_chain_sp_signature: G2Element
challenge_chain_ip_vdf: VDFInfo
reward_chain_sp_vdf: Optional[VDFInfo] # Not present for first sp in slot
reward_chain_sp_signature: G2Element
reward_chain_ip_vdf: VDFInfo
infused_challenge_chain_ip_vdf: Optional[VDFInfo] # Iff deficit < 16
is_transaction_block: bool
def get_unfinished(self) -> RewardChainBlockUnfinished:
return RewardChainBlockUnfinished(
self.total_iters,
self.signage_point_index,
self.pos_ss_cc_challenge_hash,
self.proof_of_space,
self.challenge_chain_sp_vdf,
self.challenge_chain_sp_signature,
self.reward_chain_sp_vdf,
self.reward_chain_sp_signature,
)
| 35.2
| 81
| 0.768595
|
f06c9aba61c6e5a860c4ec69c42f50809f3800c6
| 3,369
|
py
|
Python
|
cathay/settings.py
|
calvin620707/CathayCsvConverter
|
1a4275383564b81b9fb5c6d045389e50c51ecce1
|
[
"MIT"
] | null | null | null |
cathay/settings.py
|
calvin620707/CathayCsvConverter
|
1a4275383564b81b9fb5c6d045389e50c51ecce1
|
[
"MIT"
] | null | null | null |
cathay/settings.py
|
calvin620707/CathayCsvConverter
|
1a4275383564b81b9fb5c6d045389e50c51ecce1
|
[
"MIT"
] | null | null | null |
"""
Django settings for cathay project.
Generated by 'django-admin startproject' using Django 2.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import logging
import os
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm%g5mmemf17k@n_e+o5-ur&#s88da+9roay2c#@!v2d^wys%1^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'converter',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cathay.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cathay.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
logging.basicConfig(
format='[%(asctime)-15s] [%(levelname)s] [%(filename)s:%(lineno)s:%(funcName)s] %(message)s',
level=logging.INFO
)
HOUSE_RENT = 18000
# Configure Django App for Heroku.
django_heroku.settings(locals())
| 26.320313
| 97
| 0.697833
|
fb2fd8de619ebbc1cb45499a90f4c580d28658ec
| 2,007
|
py
|
Python
|
sdk/python/v1beta1/setup.py
|
ChenjunZou/katib
|
6a07daae796c29d24f63375cce71b75c4eee8d9c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/v1beta1/setup.py
|
ChenjunZou/katib
|
6a07daae796c29d24f63375cce71b75c4eee8d9c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/v1beta1/setup.py
|
ChenjunZou/katib
|
6a07daae796c29d24f63375cce71b75c4eee8d9c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
with open('requirements.txt') as f:
REQUIRES = f.readlines()
setuptools.setup(
name='kubeflow-katib',
version='0.0.6',
author="Kubeflow Authors",
author_email='premnath.vel@gmail.com',
license="Apache License Version 2.0",
url="https://github.com/kubeflow/katib/tree/master/sdk/python/v1beta1",
description="Katib Python SDK for APIVersion v1beta1",
long_description="Katib Python SDK for APIVersion v1beta1",
packages=setuptools.find_packages(
include=("kubeflow*")),
package_data={},
include_package_data=False,
zip_safe=False,
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=REQUIRES
)
| 37.867925
| 75
| 0.679621
|
575234fa05babc650cbca1802839304982af7745
| 525
|
py
|
Python
|
tests/conftest.py
|
timgates42/guv
|
d7bac2ca6a73cc2059969af08223b82f3e187922
|
[
"MIT"
] | 120
|
2015-01-05T15:15:26.000Z
|
2020-07-28T11:25:10.000Z
|
tests/conftest.py
|
timgates42/guv
|
d7bac2ca6a73cc2059969af08223b82f3e187922
|
[
"MIT"
] | 22
|
2015-01-12T21:52:32.000Z
|
2017-01-22T18:18:20.000Z
|
tests/conftest.py
|
timgates42/guv
|
d7bac2ca6a73cc2059969af08223b82f3e187922
|
[
"MIT"
] | 13
|
2015-01-18T11:42:34.000Z
|
2021-07-15T10:59:22.000Z
|
import pytest
from guv.greenio import socket
from guv import listen
@pytest.fixture(scope='session')
def pub_addr():
"""A working public address that is considered always available
"""
return 'gnu.org', 80
@pytest.fixture(scope='session')
def fail_addr():
"""An address that nothing is listening on
"""
return '192.0.0.0', 1000
@pytest.fixture(scope='function')
def gsock():
return socket()
@pytest.fixture(scope='function')
def server_sock():
sock = listen(('', 0))
return sock
| 16.935484
| 67
| 0.670476
|
c492c9bd203df7564d1fe3bd1ebbd2371ccfd1cd
| 14,876
|
py
|
Python
|
export_pmd.py
|
ousttrue/blpymeshio
|
6f79c0b1c902f297ebb562cee633fbbab8790cd6
|
[
"MIT"
] | 4
|
2019-10-02T20:46:18.000Z
|
2021-03-16T23:35:48.000Z
|
export_pmd.py
|
ousttrue/blpymeshio
|
6f79c0b1c902f297ebb562cee633fbbab8790cd6
|
[
"MIT"
] | 3
|
2018-08-27T16:15:33.000Z
|
2020-05-19T17:29:50.000Z
|
export_pmd.py
|
ousttrue/blpymeshio
|
6f79c0b1c902f297ebb562cee633fbbab8790cd6
|
[
"MIT"
] | 5
|
2019-05-13T18:23:39.000Z
|
2021-07-05T20:56:10.000Z
|
#!BPY
# coding: utf-8
"""
Name: 'MikuMikuDance model (.pmd)...'
Blender: 248
Group: 'Export'
Tooltip: 'Export PMD file for MikuMikuDance.'
"""
__author__= ["ousttrue"]
__version__= "2.5"
__url__=()
__bpydoc__="""
pmd Importer
This script exports a pmd model.
20100318: first implementation.
20100519: refactoring. use C extension.
20100530: implement basic features.
20100612: integrate 2.4 and 2.5.
20100616: implement rigid body.
20100619: fix rigid body, bone weight.
20100626: refactoring.
20100629: sphere map.
20100710: toon texture & bone group.
20100711: separate vertex with normal or uv.
20100724: update for Blender2.53.
20100731: add full python module.
20101005: update for Blender2.54.
20101228: update for Blender2.55.
20110429: update for Blender2.57b.
20110522: implement RigidBody and Constraint.
20111002: update for pymeshio-2.1.0
20120117: fix empty morph bug(ciritical).
"""
import io
from . import bl
from . import exporter
from pymeshio import common
from pymeshio import pmd
from pymeshio import englishmap
from pymeshio.pmd import writer
import bpy
import bpy_extras.io_utils # pylint: disable=E0401
def near(x, y, EPSILON=1e-5):
d=x-y
return d>=-EPSILON and d<=EPSILON
def toCP932(s):
return s.encode('cp932')
def write(ex, path):
model=pmd.Model()
o=ex.root.o
englishName=o.name
name=o[bl.MMD_MB_NAME] if bl.MMD_MB_NAME in o else 'Blenderエクスポート'
comment=o[bl.MMD_MB_COMMENT] if bl.MMD_MB_COMMENT in o else 'Blnderエクスポート\n'
englishComment=o[bl.MMD_COMMENT] if bl.MMD_COMMENT in o else 'blender export\n'
model.name=name.encode('cp932')
model.english_name=englishName.encode('cp932')
model.comment=comment.encode('cp932')
model.english_comment=englishComment.encode('cp932')
# 頂点
model.vertices=[pmd.Vertex(
# convert right-handed z-up to left-handed y-up
common.Vector3(pos[0], pos[2], pos[1]),
# convert right-handed z-up to left-handed y-up
common.Vector3(attribute.nx, attribute.nz, attribute.ny),
# reverse vertical
common.Vector2(attribute.u, 1.0-attribute.v),
ex.skeleton.indexByName(b0),
ex.skeleton.indexByName(b1),
int(100*weight),
# edge flag, 0: enable edge, 1: not edge
0
)
for pos, attribute, b0, b1, weight in ex.oneSkinMesh.vertexArray.zip()]
# 面とマテリアル
vertexCount=ex.oneSkinMesh.getVertexCount()
for material_name, indices in ex.oneSkinMesh.vertexArray.each():
#print('material:', material_name)
try:
m=bl.material.get(material_name)
except KeyError as e:
m=exporter.oneskinmesh.DefaultMaterial()
def get_texture_name(texture):
pos=texture.replace("\\", "/").rfind("/")
if pos==-1:
return texture
else:
return texture[pos+1:]
textures=[get_texture_name(path)
for path in bl.material.eachEnalbeTexturePath(m)]
#print(textures)
# マテリアル
model.materials.append(pmd.Material(
# diffuse_color
common.RGB(m.diffuse_color[0], m.diffuse_color[1], m.diffuse_color[2]),
m.use_transparency and m.alpha or 1.0,
# specular_factor
0 if m.specular_toon_size<1e-5 else m.specular_toon_size * 10,
# specular_color
common.RGB(m.specular_color[0], m.specular_color[1], m.specular_color[2]),
# ambient_color
common.RGB(m.mirror_color[0], m.mirror_color[1], m.mirror_color[2]),
# toon
0,
# flag
1 if m.subsurface_scattering.use else 0,
# vertex_count
len(indices),
# texture
('*'.join(textures) if len(textures)>0 else "").encode('cp932')
))
# 面
for i in indices:
assert(i<vertexCount)
for i in range(0, len(indices), 3):
# reverse triangle
model.indices.append(indices[i+2])
model.indices.append(indices[i+1])
model.indices.append(indices[i])
boneMap=dict([(b.name, i) for i, b in enumerate(ex.skeleton.bones)])
# bones
for i, b in enumerate(ex.skeleton.bones):
# name
v=englishmap.getUnicodeBoneName(b.name)
if not v:
v=[b.name, b.name]
assert(v)
bone=pmd.Bone(v[1].encode('cp932'))
# english name
bone_english_name=toCP932(b.name)
if len(bone_english_name)>=20:
print('bone_english_name', bone_english_name)
#assert(len(bone_english_name)<20)
bone.english_name=bone_english_name
bone.parent_index=b.parent_index
bone.tail_index=b.tail_index
bone.ik_index=0
if b.constraint==exporter.bonebuilder.CONSTRAINT_NONE:
bone.type=pmd.Bone.ROTATE_MOVE
else:
bone.type=pmd.Bone.ROTATE
if not b.isVisible:
bone.type=pmd.Bone.UNVISIBLE
bone.tail_index=0
if b.constraint==exporter.bonebuilder.CONSTRAINT_LIMIT_ROTATION:
bone.type=pmd.Bone.ROLLING
if b.constraint==exporter.bonebuilder.CONSTRAINT_COPY_ROTATION:
if b.constraintInfluence==1.0:
bone.type=pmd.Bone.ROTATE_INFL
bone.ik_index=boneMap[b.constraintTarget]
else:
bone.type=pmd.Bone.TWEAK
bone.tail_index=boneMap[b.constraintTarget]
bone.ik_index=int(b.constraintInfluence * 100)
# convert right-handed z-up to left-handed y-up
bone.pos.x=b.pos[0] if not near(b.pos[0], 0) else 0
bone.pos.y=b.pos[2] if not near(b.pos[2], 0) else 0
bone.pos.z=b.pos[1] if not near(b.pos[1], 0) else 0
model.bones.append(bone)
# IK
for ik in ex.skeleton.ik_list:
solver=pmd.IK()
solver.index=ik.target_index
model.bones[ik.target_index].type=pmd.Bone.IK
model.bones[ik.target_index].ik_index=0
solver.target=ik.effector_index
model.bones[ik.effector_index].type=pmd.Bone.IK_TARGET
solver.length=len(ik.chain)
for i, chain in enumerate(ik.chain):
solver.children.append(chain.index)
model.bones[chain.index].type=pmd.Bone.IK_ROTATE_INFL
model.bones[chain.index].ik_index=ik.target_index
solver.iterations=ik.iterations
solver.weight=ik.weight
model.ik_list.append(solver)
for i, b in enumerate(model.bones):
if b.type==pmd.Bone.IK_TARGET:
b.tail_index=0
if b.type==pmd.Bone.IK_ROTATE_INFL or b.type==pmd.Bone.IK_TARGET:
if model.bones[b.parent_index].type==pmd.Bone.IK_ROTATE_INFL:
b.ik_index=model.bones[b.parent_index].ik_index
print(i, b.name, b.type)
# 表情
for i, m in enumerate(ex.oneSkinMesh.morphList):
v=englishmap.getUnicodeSkinName(m.name)
if not v:
v=[m.name, m.name, 4]
assert(v)
# morph
morph=pmd.Morph(v[1].encode("cp932"))
morph.english_name=m.name.encode("cp932")
m.type=v[2]
morph.type=v[2]
for index, offset in m.offsets:
# convert right-handed z-up to left-handed y-up
morph.append(index, offset[0], offset[2], offset[1])
morph.vertex_count=len(m.offsets)
model.morphs.append(morph)
# 表情枠
# type==0はbase
for i, m in enumerate(ex.oneSkinMesh.morphList):
if m.type==3:
model.morph_indices.append(i)
for i, m in enumerate(ex.oneSkinMesh.morphList):
if m.type==2:
model.morph_indices.append(i)
for i, m in enumerate(ex.oneSkinMesh.morphList):
if m.type==1:
model.morph_indices.append(i)
for i, m in enumerate(ex.oneSkinMesh.morphList):
if m.type==4:
model.morph_indices.append(i)
# ボーングループ
for g in ex.skeleton.bone_groups:
name=englishmap.getUnicodeBoneGroupName(g[0])
if not name:
name=g[0]
englishName=g[0]
model.bone_group_list.append(pmd.BoneGroup(
(name+'\n').encode('cp932'),
(englishName).encode('cp932')
))
# ボーングループメンバー
for i, b in enumerate(ex.skeleton.bones):
if i==0:
continue
#if b.type in [6, 7]:
# continue
g=ex.skeleton.getBoneGroup(b)
if g:
model.bone_display_list.append((i, g))
# toon
toonMeshObject=None
for o in bl.object.each():
try:
if o.name.startswith(bl.TOON_TEXTURE_OBJECT):
toonMeshObject=o
break
except:
p(o.name)
if toonMeshObject:
toonMesh=bl.object.getData(toonMeshObject)
toonMaterial=bl.mesh.getMaterial(toonMesh, 0)
material_names=[ name for name, dummy in ex.oneSkinMesh.vertexArray.each() ]
for i in range(10):
t=bl.material.getTexture(toonMaterial, i)
if t:
model.toon_textures[i]=("%s" % t.name).encode('cp932')
# update toon_index
for material_name, material in zip(material_names, model.materials):
try:
m=bl.material.get(material_name)
if any(t == slot.texture for slot in m.texture_slots if slot is not None):
material.toon_index=i
except KeyError as e:
pass
else:
model.toon_textures[i]=("toon%02d.bmp" % (i+1)).encode('cp932')
else:
for i in range(10):
model.toon_textures[i]=("toon%02d.bmp" % (i+1)).encode('cp932')
# rigid body
rigidNameMap={}
for i, obj in enumerate(ex.oneSkinMesh.rigidbodies):
name=obj[bl.RIGID_NAME] if bl.RIGID_NAME in obj else obj.name
#print('rigidbody', name)
rigidNameMap[name]=i
boneIndex=boneMap[obj[bl.RIGID_BONE_NAME]]
if boneIndex==0:
boneIndex=-1
bone=ex.skeleton.bones[0]
else:
bone=ex.skeleton.bones[boneIndex]
# x, z, y -> x, y, z
if obj[bl.RIGID_SHAPE_TYPE]==0:
shape_type=pmd.SHAPE_SPHERE
shape_size=common.Vector3(obj.scale[0], 0, 0)
elif obj[bl.RIGID_SHAPE_TYPE]==1:
shape_type=pmd.SHAPE_BOX
shape_size=common.Vector3(obj.scale[0], obj.scale[2], obj.scale[1])
elif obj[bl.RIGID_SHAPE_TYPE]==2:
shape_type=pmd.SHAPE_CAPSULE
shape_size=common.Vector3(obj.scale[0], obj.scale[2], 0)
rigidBody=pmd.RigidBody(
name.encode('cp932'),
collision_group=obj[bl.RIGID_GROUP],
no_collision_group=obj[bl.RIGID_INTERSECTION_GROUP],
bone_index=boneIndex,
shape_position=common.Vector3(
obj.location.x-bone.pos[0],
obj.location.z-bone.pos[2],
obj.location.y-bone.pos[1]),
shape_rotation=common.Vector3(
-obj.rotation_euler[0],
-obj.rotation_euler[2],
-obj.rotation_euler[1]),
shape_type=shape_type,
shape_size=shape_size,
mass=obj[bl.RIGID_WEIGHT],
linear_damping=obj[bl.RIGID_LINEAR_DAMPING],
angular_damping=obj[bl.RIGID_ANGULAR_DAMPING],
restitution=obj[bl.RIGID_RESTITUTION],
friction=obj[bl.RIGID_FRICTION],
mode=obj[bl.RIGID_PROCESS_TYPE]
)
model.rigidbodies.append(rigidBody)
# constraint
model.joints=[pmd.Joint(
name=obj[bl.CONSTRAINT_NAME].encode('cp932'),
rigidbody_index_a=rigidNameMap[obj[bl.CONSTRAINT_A]],
rigidbody_index_b=rigidNameMap[obj[bl.CONSTRAINT_B]],
position=common.Vector3(
obj.location[0],
obj.location[2],
obj.location[1]),
rotation=common.Vector3(
-obj.rotation_euler[0],
-obj.rotation_euler[2],
-obj.rotation_euler[1]),
translation_limit_min=common.Vector3(
obj[bl.CONSTRAINT_POS_MIN][0],
obj[bl.CONSTRAINT_POS_MIN][1],
obj[bl.CONSTRAINT_POS_MIN][2]
),
translation_limit_max=common.Vector3(
obj[bl.CONSTRAINT_POS_MAX][0],
obj[bl.CONSTRAINT_POS_MAX][1],
obj[bl.CONSTRAINT_POS_MAX][2]
),
rotation_limit_min=common.Vector3(
obj[bl.CONSTRAINT_ROT_MIN][0],
obj[bl.CONSTRAINT_ROT_MIN][1],
obj[bl.CONSTRAINT_ROT_MIN][2]),
rotation_limit_max=common.Vector3(
obj[bl.CONSTRAINT_ROT_MAX][0],
obj[bl.CONSTRAINT_ROT_MAX][1],
obj[bl.CONSTRAINT_ROT_MAX][2]),
spring_constant_translation=common.Vector3(
obj[bl.CONSTRAINT_SPRING_POS][0],
obj[bl.CONSTRAINT_SPRING_POS][1],
obj[bl.CONSTRAINT_SPRING_POS][2]),
spring_constant_rotation=common.Vector3(
obj[bl.CONSTRAINT_SPRING_ROT][0],
obj[bl.CONSTRAINT_SPRING_ROT][1],
obj[bl.CONSTRAINT_SPRING_ROT][2])
)
for obj in ex.oneSkinMesh.constraints]
bl.message('write: %s' % path)
with io.open(path, 'wb') as f:
return writer.write(f, model)
def _execute(filepath='', **kwargs):
active=bl.object.getActive()
if not active:
print("abort. no active object.")
return
ex=exporter.Exporter()
ex.setup()
write(ex, filepath)
bl.object.activate(active)
return {'FINISHED'}
class ExportPmd(bpy.types.Operator, bpy_extras.io_utils.ExportHelper):
'''Export to PMD file format (.pmd)'''
bl_idname = 'export_scene.mmd_pmd'
bl_label = 'Export PMD'
filename_ext = '.pmd'
filter_glob = bpy.props.StringProperty(
default='*.pmd', options={'HIDDEN'})
use_selection = bpy.props.BoolProperty(
name='Selection Only',
description='Export selected objects only',
default=False)
def execute(self, context):
bl.initialize('pmd_export', context.scene)
_execute(**self.as_keywords(
ignore=('check_existing', 'filter_glob', 'use_selection')))
bl.finalize()
return {'FINISHED'}
@classmethod
def menu_func(klass, self, context):
default_path=bpy.data.filepath.replace('.blend', '.pmd')
self.layout.operator(klass.bl_idname,
text='Miku Miku Dance Model(.pmd)',
icon='PLUGIN'
).filepath=default_path
| 34.276498
| 98
| 0.59391
|
3264670301198a064e03858f5e315775f1ed2bef
| 20,987
|
py
|
Python
|
test/python/quantum_info/operators/test_operator.py
|
ma5x/qiskit-terra
|
7e1969297b1f9a40371dfa38cdc09487efbb1084
|
[
"Apache-2.0"
] | null | null | null |
test/python/quantum_info/operators/test_operator.py
|
ma5x/qiskit-terra
|
7e1969297b1f9a40371dfa38cdc09487efbb1084
|
[
"Apache-2.0"
] | null | null | null |
test/python/quantum_info/operators/test_operator.py
|
ma5x/qiskit-terra
|
7e1969297b1f9a40371dfa38cdc09487efbb1084
|
[
"Apache-2.0"
] | 1
|
2020-02-27T14:05:24.000Z
|
2020-02-27T14:05:24.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Tests for Operator matrix linear operator class."""
import unittest
import logging
import numpy as np
from numpy.testing import assert_allclose
import scipy.linalg as la
from qiskit import QiskitError
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.extensions.standard import HGate, CHGate, CXGate
from qiskit.test import QiskitTestCase
from qiskit.quantum_info.operators.operator import Operator
from qiskit.quantum_info.operators.predicates import matrix_equal
logger = logging.getLogger(__name__)
class OperatorTestCase(QiskitTestCase):
"""Test utils for Operator"""
# Pauli-matrix unitaries
UI = np.eye(2)
UX = np.array([[0, 1], [1, 0]])
UY = np.array([[0, -1j], [1j, 0]])
UZ = np.diag([1, -1])
UH = np.array([[1, 1], [1, -1]]) / np.sqrt(2)
@classmethod
def rand_rho(cls, n):
"""Return random density matrix"""
seed = np.random.randint(0, np.iinfo(np.int32).max)
logger.debug("rand_rho RandomState seeded with seed=%s", seed)
rng = np.random.RandomState(seed)
psi = rng.rand(n) + 1j * rng.rand(n)
rho = np.outer(psi, psi.conj())
rho /= np.trace(rho)
return rho
@classmethod
def rand_matrix(cls, rows, cols=None, real=False):
"""Return a random matrix."""
seed = np.random.randint(0, np.iinfo(np.int32).max)
logger.debug("rand_matrix RandomState seeded with seed=%s", seed)
rng = np.random.RandomState(seed)
if cols is None:
cols = rows
if real:
return rng.rand(rows, cols)
return rng.rand(rows, cols) + 1j * rng.rand(rows, cols)
def simple_circuit_no_measure(self):
"""Return a unitary circuit and the corresponding unitary array."""
qr = QuantumRegister(3)
circ = QuantumCircuit(qr)
circ.h(qr[0])
circ.x(qr[1])
circ.ry(np.pi / 2, qr[2])
y90 = (1 / np.sqrt(2)) * np.array([[1, -1], [1, 1]])
target = Operator(np.kron(y90, np.kron(self.UX, self.UH)))
return circ, target
def simple_circuit_with_measure(self):
"""Return a unitary circuit with measurement."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
circ = QuantumCircuit(qr, cr)
circ.h(qr[0])
circ.x(qr[1])
circ.measure(qr, cr)
return circ
class TestOperator(OperatorTestCase):
"""Tests for Operator linear operator class."""
def test_init_array_qubit(self):
"""Test subsystem initialization from N-qubit array."""
# Test automatic inference of qubit subsystems
mat = self.rand_matrix(8, 8)
op = Operator(mat)
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (8, 8))
self.assertEqual(op.input_dims(), (2, 2, 2))
self.assertEqual(op.output_dims(), (2, 2, 2))
op = Operator(mat, input_dims=8, output_dims=8)
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (8, 8))
self.assertEqual(op.input_dims(), (2, 2, 2))
self.assertEqual(op.output_dims(), (2, 2, 2))
def test_init_array(self):
"""Test initialization from array."""
mat = np.eye(3)
op = Operator(mat)
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (3, 3))
self.assertEqual(op.input_dims(), (3,))
self.assertEqual(op.output_dims(), (3,))
mat = self.rand_matrix(2 * 3 * 4, 4 * 5)
op = Operator(mat, input_dims=[4, 5], output_dims=[2, 3, 4])
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (4 * 5, 2 * 3 * 4))
self.assertEqual(op.input_dims(), (4, 5))
self.assertEqual(op.output_dims(), (2, 3, 4))
def test_init_array_except(self):
"""Test initialization exception from array."""
mat = self.rand_matrix(4, 4)
self.assertRaises(QiskitError, Operator, mat, input_dims=[4, 2])
self.assertRaises(QiskitError, Operator, mat, input_dims=[2, 4])
self.assertRaises(QiskitError, Operator, mat, input_dims=5)
def test_init_operator(self):
"""Test initialization from Operator."""
op1 = Operator(self.rand_matrix(4, 4))
op2 = Operator(op1)
self.assertEqual(op1, op2)
def test_circuit_init(self):
"""Test initialization from a circuit."""
# Test tensor product of 1-qubit gates
circuit = QuantumCircuit(3)
circuit.h(0)
circuit.x(1)
circuit.ry(np.pi / 2, 2)
op = Operator(circuit)
y90 = (1 / np.sqrt(2)) * np.array([[1, -1], [1, 1]])
target = np.kron(y90, np.kron(self.UX, self.UH))
global_phase_equivalent = matrix_equal(
op.data, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
# Test decomposition of Controlled-u1 gate
lam = np.pi / 4
circuit = QuantumCircuit(2)
circuit.cu1(lam, 0, 1)
op = Operator(circuit)
target = np.diag([1, 1, 1, np.exp(1j * lam)])
global_phase_equivalent = matrix_equal(
op.data, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
# Test decomposition of controlled-H gate
circuit = QuantumCircuit(2)
circuit.ch(0, 1)
op = Operator(circuit)
target = np.kron(self.UI, np.diag([1, 0])) + np.kron(
self.UH, np.diag([0, 1]))
global_phase_equivalent = matrix_equal(
op.data, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
def test_instruction_init(self):
"""Test initialization from a circuit."""
gate = CXGate()
op = Operator(gate).data
target = gate.to_matrix()
global_phase_equivalent = matrix_equal(op, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
gate = CHGate()
op = Operator(gate).data
had = HGate().to_matrix()
target = np.kron(had, np.diag([0, 1])) + np.kron(
np.eye(2), np.diag([1, 0]))
global_phase_equivalent = matrix_equal(op, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
def test_circuit_init_except(self):
"""Test initialization from circuit with measure raises exception."""
circuit = self.simple_circuit_with_measure()
self.assertRaises(QiskitError, Operator, circuit)
def test_equal(self):
"""Test __eq__ method"""
mat = self.rand_matrix(2, 2, real=True)
self.assertEqual(Operator(np.array(mat, dtype=complex)),
Operator(mat))
mat = self.rand_matrix(4, 4)
self.assertEqual(Operator(mat.tolist()),
Operator(mat))
def test_data(self):
"""Test Operator representation string property."""
mat = self.rand_matrix(2, 2)
op = Operator(mat)
assert_allclose(mat, op.data)
def test_dim(self):
"""Test Operator dim property."""
mat = self.rand_matrix(4, 4)
self.assertEqual(Operator(mat).dim, (4, 4))
self.assertEqual(Operator(mat, input_dims=[4], output_dims=[4]).dim, (4, 4))
self.assertEqual(Operator(mat, input_dims=[2, 2], output_dims=[2, 2]).dim, (4, 4))
def test_input_dims(self):
"""Test Operator input_dims method."""
op = Operator(self.rand_matrix(2 * 3 * 4, 4 * 5),
input_dims=[4, 5], output_dims=[2, 3, 4])
self.assertEqual(op.input_dims(), (4, 5))
self.assertEqual(op.input_dims(qargs=[0, 1]), (4, 5))
self.assertEqual(op.input_dims(qargs=[1, 0]), (5, 4))
self.assertEqual(op.input_dims(qargs=[0]), (4,))
self.assertEqual(op.input_dims(qargs=[1]), (5,))
def test_output_dims(self):
"""Test Operator output_dims method."""
op = Operator(self.rand_matrix(2 * 3 * 4, 4 * 5),
input_dims=[4, 5], output_dims=[2, 3, 4])
self.assertEqual(op.output_dims(), (2, 3, 4))
self.assertEqual(op.output_dims(qargs=[0, 1, 2]), (2, 3, 4))
self.assertEqual(op.output_dims(qargs=[2, 1, 0]), (4, 3, 2))
self.assertEqual(op.output_dims(qargs=[2, 0, 1]), (4, 2, 3))
self.assertEqual(op.output_dims(qargs=[0]), (2,))
self.assertEqual(op.output_dims(qargs=[1]), (3,))
self.assertEqual(op.output_dims(qargs=[2]), (4,))
self.assertEqual(op.output_dims(qargs=[0, 2]), (2, 4))
self.assertEqual(op.output_dims(qargs=[2, 0]), (4, 2))
def test_reshape(self):
"""Test Operator reshape method."""
op = Operator(self.rand_matrix(8, 8))
reshaped1 = op.reshape(input_dims=[8], output_dims=[8])
reshaped2 = op.reshape(input_dims=[4, 2], output_dims=[2, 4])
self.assertEqual(op.output_dims(), (2, 2, 2))
self.assertEqual(op.input_dims(), (2, 2, 2))
self.assertEqual(reshaped1.output_dims(), (8,))
self.assertEqual(reshaped1.input_dims(), (8,))
self.assertEqual(reshaped2.output_dims(), (2, 4))
self.assertEqual(reshaped2.input_dims(), (4, 2))
def test_copy(self):
"""Test Operator copy method"""
mat = np.eye(2)
orig = Operator(mat)
cpy = orig.copy()
cpy._data[0, 0] = 0.0
self.assertFalse(cpy == orig)
def test_is_unitary(self):
"""Test is_unitary method."""
# X-90 rotation
X90 = la.expm(-1j * 0.5 * np.pi * np.array([[0, 1], [1, 0]]) / 2)
self.assertTrue(Operator(X90).is_unitary())
# Non-unitary should return false
self.assertFalse(Operator([[1, 0], [0, 0]]).is_unitary())
def test_to_operator(self):
"""Test to_operator method."""
op1 = Operator(self.rand_matrix(4, 4))
op2 = op1.to_operator()
self.assertEqual(op1, op2)
def test_conjugate(self):
"""Test conjugate method."""
matr = self.rand_matrix(2, 4, real=True)
mati = self.rand_matrix(2, 4, real=True)
op = Operator(matr + 1j * mati)
uni_conj = op.conjugate()
self.assertEqual(uni_conj, Operator(matr - 1j * mati))
def test_transpose(self):
"""Test transpose method."""
matr = self.rand_matrix(2, 4, real=True)
mati = self.rand_matrix(2, 4, real=True)
op = Operator(matr + 1j * mati)
uni_t = op.transpose()
self.assertEqual(uni_t, Operator(matr.T + 1j * mati.T))
def test_adjoint(self):
"""Test adjoint method."""
matr = self.rand_matrix(2, 4, real=True)
mati = self.rand_matrix(2, 4, real=True)
op = Operator(matr + 1j * mati)
uni_adj = op.adjoint()
self.assertEqual(uni_adj, Operator(matr.T - 1j * mati.T))
def test_compose_except(self):
"""Test compose different dimension exception"""
self.assertRaises(QiskitError,
Operator(np.eye(2)).compose,
Operator(np.eye(3)))
self.assertRaises(QiskitError, Operator(np.eye(2)).compose, 2)
def test_compose(self):
"""Test compose method."""
op1 = Operator(self.UX)
op2 = Operator(self.UY)
targ = Operator(np.dot(self.UY, self.UX))
self.assertEqual(op1.compose(op2), targ)
self.assertEqual(op1 @ op2, targ)
targ = Operator(np.dot(self.UX, self.UY))
self.assertEqual(op2.compose(op1), targ)
self.assertEqual(op2 @ op1, targ)
def test_dot(self):
"""Test dot method."""
op1 = Operator(self.UY)
op2 = Operator(self.UX)
targ = Operator(np.dot(self.UY, self.UX))
self.assertEqual(op1.dot(op2), targ)
self.assertEqual(op1 * op2, targ)
targ = Operator(np.dot(self.UX, self.UY))
self.assertEqual(op2.dot(op1), targ)
self.assertEqual(op2 * op1, targ)
def test_compose_front(self):
"""Test front compose method."""
opYX = Operator(self.UY).compose(Operator(self.UX), front=True)
matYX = np.dot(self.UY, self.UX)
self.assertEqual(opYX, Operator(matYX))
opXY = Operator(self.UX).compose(Operator(self.UY), front=True)
matXY = np.dot(self.UX, self.UY)
self.assertEqual(opXY, Operator(matXY))
def test_compose_subsystem(self):
"""Test subsystem compose method."""
# 3-qubit operator
mat = self.rand_matrix(8, 8)
mat_a = self.rand_matrix(2, 2)
mat_b = self.rand_matrix(2, 2)
mat_c = self.rand_matrix(2, 2)
op = Operator(mat)
op1 = Operator(mat_a)
op2 = Operator(np.kron(mat_b, mat_a))
op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))
# op3 qargs=[0, 1, 2]
targ = np.dot(np.kron(mat_c, np.kron(mat_b, mat_a)), mat)
self.assertEqual(op.compose(op3, qargs=[0, 1, 2]), Operator(targ))
# op3 qargs=[2, 1, 0]
targ = np.dot(np.kron(mat_a, np.kron(mat_b, mat_c)), mat)
self.assertEqual(op.compose(op3, qargs=[2, 1, 0]), Operator(targ))
# op2 qargs=[0, 1]
targ = np.dot(np.kron(np.eye(2), np.kron(mat_b, mat_a)), mat)
self.assertEqual(op.compose(op2, qargs=[0, 1]), Operator(targ))
# op2 qargs=[2, 0]
targ = np.dot(np.kron(mat_a, np.kron(np.eye(2), mat_b)), mat)
self.assertEqual(op.compose(op2, qargs=[2, 0]), Operator(targ))
# op1 qargs=[0]
targ = np.dot(np.kron(np.eye(4), mat_a), mat)
self.assertEqual(op.compose(op1, qargs=[0]), Operator(targ))
# op1 qargs=[1]
targ = np.dot(np.kron(np.eye(2), np.kron(mat_a, np.eye(2))), mat)
self.assertEqual(op.compose(op1, qargs=[1]), Operator(targ))
# op1 qargs=[2]
targ = np.dot(np.kron(mat_a, np.eye(4)), mat)
self.assertEqual(op.compose(op1, qargs=[2]), Operator(targ))
def test_dot_subsystem(self):
"""Test subsystem dot method."""
# 3-qubit operator
mat = self.rand_matrix(8, 8)
mat_a = self.rand_matrix(2, 2)
mat_b = self.rand_matrix(2, 2)
mat_c = self.rand_matrix(2, 2)
op = Operator(mat)
op1 = Operator(mat_a)
op2 = Operator(np.kron(mat_b, mat_a))
op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))
# op3 qargs=[0, 1, 2]
targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a)))
self.assertEqual(op.dot(op3, qargs=[0, 1, 2]), Operator(targ))
# op3 qargs=[2, 1, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c)))
self.assertEqual(op.dot(op3, qargs=[2, 1, 0]), Operator(targ))
# op2 qargs=[0, 1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a)))
self.assertEqual(op.dot(op2, qargs=[0, 1]), Operator(targ))
# op2 qargs=[2, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b)))
self.assertEqual(op.dot(op2, qargs=[2, 0]), Operator(targ))
# op1 qargs=[0]
targ = np.dot(mat, np.kron(np.eye(4), mat_a))
self.assertEqual(op.dot(op1, qargs=[0]), Operator(targ))
# op1 qargs=[1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2))))
self.assertEqual(op.dot(op1, qargs=[1]), Operator(targ))
# op1 qargs=[2]
targ = np.dot(mat, np.kron(mat_a, np.eye(4)))
self.assertEqual(op.dot(op1, qargs=[2]), Operator(targ))
def test_compose_front_subsystem(self):
"""Test subsystem front compose method."""
# 3-qubit operator
mat = self.rand_matrix(8, 8)
mat_a = self.rand_matrix(2, 2)
mat_b = self.rand_matrix(2, 2)
mat_c = self.rand_matrix(2, 2)
op = Operator(mat)
op1 = Operator(mat_a)
op2 = Operator(np.kron(mat_b, mat_a))
op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))
# op3 qargs=[0, 1, 2]
targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a)))
self.assertEqual(op.compose(op3, qargs=[0, 1, 2], front=True), Operator(targ))
# op3 qargs=[2, 1, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c)))
self.assertEqual(op.compose(op3, qargs=[2, 1, 0], front=True), Operator(targ))
# op2 qargs=[0, 1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a)))
self.assertEqual(op.compose(op2, qargs=[0, 1], front=True), Operator(targ))
# op2 qargs=[2, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b)))
self.assertEqual(op.compose(op2, qargs=[2, 0], front=True), Operator(targ))
# op1 qargs=[0]
targ = np.dot(mat, np.kron(np.eye(4), mat_a))
self.assertEqual(op.compose(op1, qargs=[0], front=True), Operator(targ))
# op1 qargs=[1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2))))
self.assertEqual(op.compose(op1, qargs=[1], front=True), Operator(targ))
# op1 qargs=[2]
targ = np.dot(mat, np.kron(mat_a, np.eye(4)))
self.assertEqual(op.compose(op1, qargs=[2], front=True), Operator(targ))
def test_power(self):
"""Test power method."""
X90 = la.expm(-1j * 0.5 * np.pi * np.array([[0, 1], [1, 0]]) / 2)
op = Operator(X90)
self.assertEqual(op.power(2), Operator([[0, -1j], [-1j, 0]]))
self.assertEqual(op.power(4), Operator(-1 * np.eye(2)))
self.assertEqual(op.power(8), Operator(np.eye(2)))
def test_expand(self):
"""Test expand method."""
mat1 = self.UX
mat2 = np.eye(3, dtype=complex)
mat21 = np.kron(mat2, mat1)
op21 = Operator(mat1).expand(Operator(mat2))
self.assertEqual(op21.dim, (6, 6))
assert_allclose(op21.data, Operator(mat21).data)
mat12 = np.kron(mat1, mat2)
op12 = Operator(mat2).expand(Operator(mat1))
self.assertEqual(op12.dim, (6, 6))
assert_allclose(op12.data, Operator(mat12).data)
def test_tensor(self):
"""Test tensor method."""
mat1 = self.UX
mat2 = np.eye(3, dtype=complex)
mat21 = np.kron(mat2, mat1)
op21 = Operator(mat2).tensor(Operator(mat1))
self.assertEqual(op21.dim, (6, 6))
assert_allclose(op21.data, Operator(mat21).data)
mat12 = np.kron(mat1, mat2)
op12 = Operator(mat1).tensor(Operator(mat2))
self.assertEqual(op12.dim, (6, 6))
assert_allclose(op12.data, Operator(mat12).data)
def test_power_except(self):
"""Test power method raises exceptions."""
op = Operator(self.rand_matrix(3, 3))
# Non-integer power raises error
self.assertRaises(QiskitError, op.power, 0.5)
def test_add(self):
"""Test add method."""
mat1 = self.rand_matrix(4, 4)
mat2 = self.rand_matrix(4, 4)
op1 = Operator(mat1)
op2 = Operator(mat2)
self.assertEqual(op1._add(op2), Operator(mat1 + mat2))
self.assertEqual(op1 + op2, Operator(mat1 + mat2))
self.assertEqual(op1 - op2, Operator(mat1 - mat2))
def test_add_except(self):
"""Test add method raises exceptions."""
op1 = Operator(self.rand_matrix(2, 2))
op2 = Operator(self.rand_matrix(3, 3))
self.assertRaises(QiskitError, op1._add, op2)
def test_multiply(self):
"""Test multiply method."""
mat = self.rand_matrix(4, 4)
val = np.exp(5j)
op = Operator(mat)
self.assertEqual(op._multiply(val), Operator(val * mat))
self.assertEqual(val * op, Operator(val * mat))
def test_multiply_except(self):
"""Test multiply method raises exceptions."""
op = Operator(self.rand_matrix(2, 2))
self.assertRaises(QiskitError, op._multiply, 's')
self.assertRaises(QiskitError, op.__rmul__, 's')
self.assertRaises(QiskitError, op._multiply, op)
self.assertRaises(QiskitError, op.__rmul__, op)
def test_negate(self):
"""Test negate method"""
mat = self.rand_matrix(4, 4)
op = Operator(mat)
self.assertEqual(-op, Operator(-1 * mat))
def test_equiv(self):
"""Test negate method"""
mat = np.diag([1, np.exp(1j * np.pi / 2)])
phase = np.exp(-1j * np.pi / 4)
op = Operator(mat)
self.assertTrue(op.equiv(phase * mat))
self.assertTrue(op.equiv(Operator(phase * mat)))
self.assertFalse(op.equiv(2 * mat))
if __name__ == '__main__':
unittest.main()
| 38.019928
| 90
| 0.593844
|
16d35d92117917030bf6fc3827316956afa3200e
| 3,383
|
py
|
Python
|
contrib/runners/orquesta_runner/tests/integration/test_expr_func_st2kv.py
|
shusugmt/st2
|
31da26badfb4ca3fb3e8cae07cfeec4791191afd
|
[
"Apache-2.0"
] | 1
|
2020-11-09T21:05:33.000Z
|
2020-11-09T21:05:33.000Z
|
contrib/runners/orquesta_runner/tests/integration/test_wiring_functions_st2kv.py
|
ellerbrock/st2
|
b3a0d9f82053c1fd5adb616dc8331bad427cd11f
|
[
"Apache-2.0"
] | 3
|
2021-03-25T23:57:10.000Z
|
2021-03-26T00:01:05.000Z
|
contrib/runners/orquesta_runner/tests/integration/test_wiring_functions_st2kv.py
|
ellerbrock/st2
|
b3a0d9f82053c1fd5adb616dc8331bad427cd11f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from integration.orquesta import base
from st2client import models
from st2common.constants import action as ac_const
class DatastoreFunctionTest(base.TestWorkflowExecution):
@classmethod
def set_kvp(cls, name, value, scope='system', secret=False):
kvp = models.KeyValuePair(
id=name,
name=name,
value=value,
scope=scope,
secret=secret
)
cls.st2client.keys.update(kvp)
@classmethod
def del_kvp(cls, name, scope='system'):
kvp = models.KeyValuePair(
id=name,
name=name,
scope=scope
)
cls.st2client.keys.delete(kvp)
def test_st2kv_system_scope(self):
key = 'lakshmi'
value = 'kanahansnasnasdlsajks'
self.set_kvp(key, value)
wf_name = 'examples.orquesta-st2kv'
wf_input = {'key_name': 'system.%s' % key}
execution = self._execute_workflow(wf_name, wf_input)
output = self._wait_for_completion(execution)
self.assertEqual(output.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
self.assertIn('output', output.result)
self.assertIn('value', output.result['output'])
self.assertEqual(value, output.result['output']['value'])
self.del_kvp(key)
def test_st2kv_user_scope(self):
key = 'winson'
value = 'SoDiamondEng'
self.set_kvp(key, value, 'user')
wf_name = 'examples.orquesta-st2kv'
wf_input = {'key_name': key}
execution = self._execute_workflow(wf_name, wf_input)
output = self._wait_for_completion(execution)
self.assertEqual(output.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
self.assertIn('output', output.result)
self.assertIn('value', output.result['output'])
self.assertEqual(value, output.result['output']['value'])
# self.del_kvp(key)
def test_st2kv_decrypt(self):
key = 'kami'
value = 'eggplant'
self.set_kvp(key, value, secret=True)
wf_name = 'examples.orquesta-st2kv'
wf_input = {
'key_name': 'system.%s' % key,
'decrypt': True
}
execution = self._execute_workflow(wf_name, wf_input)
output = self._wait_for_completion(execution)
self.assertEqual(output.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
self.assertIn('output', output.result)
self.assertIn('value', output.result['output'])
self.assertEqual(value, output.result['output']['value'])
self.del_kvp(key)
| 34.171717
| 77
| 0.658587
|
f75f9e1a84fb1952a0ef182a35374132cbae3978
| 4,078
|
py
|
Python
|
osm-adaptor/swagger_server/models/inline_response2001.py
|
kant/osslm-OpenSourceMANO-adaptor
|
191673aa1935be4f986b1c5edc642622a38fcc65
|
[
"Apache-2.0"
] | 3
|
2017-11-17T08:12:14.000Z
|
2021-04-08T20:12:07.000Z
|
osm-adaptor/swagger_server/models/inline_response2001.py
|
kant/osslm-OpenSourceMANO-adaptor
|
191673aa1935be4f986b1c5edc642622a38fcc65
|
[
"Apache-2.0"
] | 1
|
2017-11-16T14:42:08.000Z
|
2017-11-16T14:42:08.000Z
|
osm-adaptor/swagger_server/models/inline_response2001.py
|
kant/osslm-OpenSourceMANO-adaptor
|
191673aa1935be4f986b1c5edc642622a38fcc65
|
[
"Apache-2.0"
] | 4
|
2018-01-18T02:36:09.000Z
|
2020-06-29T13:54:43.000Z
|
# coding: utf-8
from __future__ import absolute_import
from .base_model_ import Model
from datetime import date, datetime
from typing import List, Dict
from ..util import deserialize_model
class InlineResponse2001(Model):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name: str=None, state: str=None, created_at: date=None, last_modified_at: date=None):
"""
InlineResponse2001 - a model defined in Swagger
:param name: The name of this InlineResponse2001.
:type name: str
:param state: The state of this InlineResponse2001.
:type state: str
:param created_at: The created_at of this InlineResponse2001.
:type created_at: date
:param last_modified_at: The last_modified_at of this InlineResponse2001.
:type last_modified_at: date
"""
self.swagger_types = {
'name': str,
'state': str,
'created_at': date,
'last_modified_at': date
}
self.attribute_map = {
'name': 'name',
'state': 'state',
'created_at': 'createdAt',
'last_modified_at': 'lastModifiedAt'
}
self._name = name
self._state = state
self._created_at = created_at
self._last_modified_at = last_modified_at
@classmethod
def from_dict(cls, dikt) -> 'InlineResponse2001':
"""
Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The inline_response_200_1 of this InlineResponse2001.
:rtype: InlineResponse2001
"""
return deserialize_model(dikt, cls)
@property
def name(self) -> str:
"""
Gets the name of this InlineResponse2001.
:return: The name of this InlineResponse2001.
:rtype: str
"""
return self._name
@name.setter
def name(self, name: str):
"""
Sets the name of this InlineResponse2001.
:param name: The name of this InlineResponse2001.
:type name: str
"""
self._name = name
@property
def state(self) -> str:
"""
Gets the state of this InlineResponse2001.
:return: The state of this InlineResponse2001.
:rtype: str
"""
return self._state
@state.setter
def state(self, state: str):
"""
Sets the state of this InlineResponse2001.
:param state: The state of this InlineResponse2001.
:type state: str
"""
allowed_values = ["PUBLISHED", "UNPUBLISHED", "DELETED"]
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}"
.format(state, allowed_values)
)
self._state = state
@property
def created_at(self) -> date:
"""
Gets the created_at of this InlineResponse2001.
:return: The created_at of this InlineResponse2001.
:rtype: date
"""
return self._created_at
@created_at.setter
def created_at(self, created_at: date):
"""
Sets the created_at of this InlineResponse2001.
:param created_at: The created_at of this InlineResponse2001.
:type created_at: date
"""
self._created_at = created_at
@property
def last_modified_at(self) -> date:
"""
Gets the last_modified_at of this InlineResponse2001.
:return: The last_modified_at of this InlineResponse2001.
:rtype: date
"""
return self._last_modified_at
@last_modified_at.setter
def last_modified_at(self, last_modified_at: date):
"""
Sets the last_modified_at of this InlineResponse2001.
:param last_modified_at: The last_modified_at of this InlineResponse2001.
:type last_modified_at: date
"""
self._last_modified_at = last_modified_at
| 27.369128
| 108
| 0.603727
|
b01bd0b7a8f2511733197637a2633b678db5412f
| 3,286
|
py
|
Python
|
tests/dicom/test_dose.py
|
pymedphys/pymedphys-archive-2019
|
6bb7c8d0da2e93ff56469bb47e65b15ece2ea25e
|
[
"Apache-2.0"
] | 1
|
2020-12-20T14:13:56.000Z
|
2020-12-20T14:13:56.000Z
|
tests/dicom/test_dose.py
|
pymedphys/pymedphys-archive-2019
|
6bb7c8d0da2e93ff56469bb47e65b15ece2ea25e
|
[
"Apache-2.0"
] | null | null | null |
tests/dicom/test_dose.py
|
pymedphys/pymedphys-archive-2019
|
6bb7c8d0da2e93ff56469bb47e65b15ece2ea25e
|
[
"Apache-2.0"
] | 1
|
2020-12-20T14:14:00.000Z
|
2020-12-20T14:14:00.000Z
|
# Copyright (C) 2018 Matthew Jennings
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A test suite for the DICOM RT Dose toolbox."""
import json
from os.path import abspath, dirname
from os.path import join as pjoin
from zipfile import ZipFile
import pytest
import numpy as np
import pydicom
import pymedphys
from pymedphys._data import download
from pymedphys._dicom.collection import DicomDose
from pymedphys._dicom.dose import require_patient_orientation_be_HFS
from test_coords import get_data_file
HERE = dirname(abspath(__file__))
DATA_DIRECTORY = pjoin(HERE, "data", "dose")
ORIENTATIONS_SUPPORTED = ["FFDL", "FFDR", "FFP", "FFS", "HFDL", "HFDR", "HFP", "HFS"]
def test_dicom_dose_constancy():
wedge_basline_filename = "wedge_dose_baseline.json"
baseline_dicom_dose_dict_zippath = download.get_file_within_data_zip(
"dicom_dose_test_data.zip", "lfs-wedge_dose_baseline.zip"
)
test_dicom_dose_filepath = download.get_file_within_data_zip(
"dicom_dose_test_data.zip", "RD.wedge.dcm"
)
test_dicom_dose = DicomDose.from_file(test_dicom_dose_filepath)
with ZipFile(baseline_dicom_dose_dict_zippath, "r") as zip_ref:
with zip_ref.open(wedge_basline_filename) as a_file:
expected_dicom_dose_dict = json.load(a_file)
assert np.allclose(
test_dicom_dose.values, np.array(expected_dicom_dose_dict["values"])
)
assert test_dicom_dose.units == expected_dicom_dose_dict["units"]
assert np.allclose(test_dicom_dose.x, np.array(expected_dicom_dose_dict["x"]))
assert np.allclose(test_dicom_dose.y, np.array(expected_dicom_dose_dict["y"]))
assert np.allclose(test_dicom_dose.z, np.array(expected_dicom_dose_dict["z"]))
assert np.allclose(
test_dicom_dose.coords, np.array(expected_dicom_dose_dict["coords"])
)
def test_require_patient_orientation_be_HFS():
test_ds_dict = {
key: pydicom.dcmread(get_data_file(key)) for key in ORIENTATIONS_SUPPORTED
}
ds_no_orient = pydicom.dcmread(
str(pymedphys.data_path("example_structures.dcm")), force=True
)
test_ds_dict["no orient"] = ds_no_orient
for orient, ds in test_ds_dict.items():
if orient == "HFS":
require_patient_orientation_be_HFS(ds)
elif orient == "no orient":
with pytest.raises(AttributeError) as ea:
require_patient_orientation_be_HFS(ds)
assert "object has no attribute 'ImageOrientationPatient'" in str(ea.value)
else:
with pytest.raises(ValueError) as ev:
require_patient_orientation_be_HFS(ds)
assert (
"The supplied dataset has a patient orientation "
"other than head-first supine" in str(ev.value)
)
| 34.957447
| 87
| 0.721546
|
aa7511e9b3db4eaa762aa5f44d6d88fb6e583289
| 12,998
|
py
|
Python
|
mplt/__init__.py
|
mizzbrumblebee/mplt
|
355f3010d7a4cd0d29c23552830d7146f1ad5e25
|
[
"MIT"
] | null | null | null |
mplt/__init__.py
|
mizzbrumblebee/mplt
|
355f3010d7a4cd0d29c23552830d7146f1ad5e25
|
[
"MIT"
] | null | null | null |
mplt/__init__.py
|
mizzbrumblebee/mplt
|
355f3010d7a4cd0d29c23552830d7146f1ad5e25
|
[
"MIT"
] | null | null | null |
"""Set of niceties wrapping matplotlib
"""
__author__ = 'Craig Stringham'
__version__ = 2.0
import matplotlib
# in order to pass through and un-overloaded functions to pyplot
from matplotlib.pyplot import *
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.path import Path as mpPath
import numpy as np
linecolors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
rcParams['axes.prop_cycle'] = (
"cycler('linestyle', ['-', '--', ':']) * cycler('color', {})".format(linecolors))
rcParams['image.aspect'] = 'auto'
def myfig(x=None, showtitle=True, clearfig=True, **kwargs):
if x is None:
x = np.random.randint(10000)
if clearfig:
plt.close(x)
fig = plt.figure(x, **kwargs)
if showtitle:
plt.suptitle(x)
return fig
def myshow():
import sys
if plt.get_backend().find('inline') > 0:
#print('myshow() inline')
return
try:
sys.ps1
#print('myshow() ps1')
show(False)
except:
#print('myshow() except')
show(True)
def plot3(*args, **kwargs):
ax = gca(projection='3d')
ax.plot(*args, **kwargs)
return ax
def format_coord(x, y, X, extent=None):
"""Set the format of the coordinates that are read when hovering
over on a plot.
"""
numrows, numcols = X.shape
if extent is not None:
col = int((x - extent[0]) / (extent[1] - extent[0]) * numcols + 0.5)
row = int((y - extent[3]) / (extent[2] - extent[3]) * numrows + 0.5)
else:
col = int(x + 0.5)
row = int(y + 0.5)
if col >= 0 and col < numcols and row >= 0 and row < numrows:
z = X[row, col]
return 'x=%1.4f, y=%1.4f, z=%1.4f' % (x, y, z)
else:
return 'x=%1.4f, y=%1.4f, z=E!' % (x, y)
def imshow(*args, **kwargs):
ax = plt.imshow(*args, **kwargs)
ax.format_coord = lambda x, y: format_coord(x, y, args[0])
return ax
def pcolor(*args, **kwargs):
plt.pcolor(*args, **kwargs)
ax = gca()
ax.format_coord = lambda x, y: format_coord(x, y, args[0])
def pcolormesh(*args, **kwargs):
ax = plt.pcolormesh(*args, **kwargs)
ax.format_coord = lambda x, y: format_coord(x, y, args[0])
return ax
def imenhance(x, percentiles=[5, 95]):
isf = np.isfinite(x)
(vmin, vmax) = np.percentile(x[isf], percentiles)
y = x
y[x < vmin] = vmin
y[x > vmax] = vmax
return y
def imshowe(x, percentiles=[5, 95], **kwargs):
isf = np.isfinite(x)
(vmin, vmax) = np.percentile(x[isf], percentiles)
if 'percentiles' in kwargs:
del(kwargs['percentiles'])
return imshow(x, vmin=vmin, vmax=vmax, **kwargs)
def im2(X, Y, xtitle, ytitle, **kwargs):
ax1 = plt.subplot(121)
title(xtitle)
imshow(X, **kwargs)
colorbar()
#ax1.format_coord= lambda x,y:format_coord(x,y,X)
ax2 = plt.subplot(122, sharex=ax1, sharey=ax1)
title(ytitle)
imshow(Y, **kwargs)
colorbar()
#ax2.format_coord= lambda x,y:format_coord(x,y,Y)
plt.subplot(121)
return ax1, ax2
def pcolor2(x, y, xtitle, ytitle, **kwargs):
ax = plt.subplot(121)
title(xtitle)
pcolor(x, **kwargs)
colorbar()
ax2 = plt.subplot(122, sharex=ax, sharey=ax)
title(ytitle)
pcolor(y, **kwargs)
colorbar()
return ax, ax2
def imimshow(x, **kwargs):
if not np.any(np.iscomplex(x)): # and x.dtype != np.complex64:
imshow(x, **kwargs)
else:
im2(x.real, x.imag, 'real', 'imag', **kwargs)
def mpimshow(x, **kwargs):
if x.size < 1:
print('Warning empty array supplied')
return
if not np.any(np.iscomplex(x)):
return imshow(x, **kwargs)
else:
return im2(np.abs(x), angle_offset(x), 'Magnitude', 'Phase', **kwargs)
def dbpimshow(x, **kwargs):
if np.all(np.isreal(x)) and x.dtype != np.complex64:
imshow(x, **kwargs)
else:
im2(20 * np.log10(np.abs(x)), angle_offset(x),
'Magnitude(dB)', 'Phase', **kwargs)
def impcolor(x, **kwargs):
if np.all(np.isreal(x)) and x.dtype != np.complex64:
pcolor(x, **kwargs)
else:
pcolor2(x.real, x.imag, 'real', 'imag', **kwargs)
def mppcolor(x, **kwargs):
if np.all(np.isreal(x)) and x.dtype != np.complex64:
pcolor(x, **kwargs)
else:
pcolor2(np.abs(x), angle_offset(x), 'Magnitude', 'Phase', **kwargs)
def implot(x, y=None, **kwargs):
if y is None:
y = x
x = np.arange(x.shape[0])
ax = subplot(211)
plot(x, y.real, **kwargs)
title('real')
subplot(212, sharex=ax)
plot(x, y.imag, **kwargs)
title('imag')
def mpplot(x, y=None, **kwargs):
if y is None:
y = x
x = np.arange(x.shape[0])
ax = subplot(211)
plot(x, np.abs(y), **kwargs)
title('magnitude')
ax2 = subplot(212, sharex=ax)
plot(x, np.angle(y), **kwargs)
title('phase')
return ax, ax2
def dbpplot(x, y=None, **kwargs):
if y is None:
y = x
x = np.arange(x.shape[0])
ax = subplot(211)
plot(x, 20 * np.log10(np.abs(y)), **kwargs)
title('magnitude (dB)')
subplot(212, sharex=ax)
plot(x, np.angle(y), **kwargs)
title('phase')
def plotyy(*args, **kwargs):
"""modified from http://matplotlib.org/examples/api/two_scales.html"""
ax1 = gca()
if len(args) == 4:
t1, s1, t2, s2 = args
if len(args) == 3:
t, s1, s2 = args
t1 = t
t2 = t
elif len(args) == 2:
s1, s2 = args
t1 = np.arange(len(s1))
t2 = np.arange(len(s2))
else:
raise Exception(
'I don''t know how to handle {} arguments'.format(len(args)))
color0 = kwargs.pop('color0', 'b')
color1 = kwargs.pop('color1', 'r')
ax1.plot(t1, s1, color=color0, **kwargs) # , 'b-')
#ax1.set_xlabel('time (s)')
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('f(x)', color=color0)
ax1.set_ylim(np.percentile(s1, [1, 99]))
for tl in ax1.get_yticklabels():
tl.set_color(color0)
ax2 = ax1.twinx()
ax2.plot(t2, s2, linestyle=':', color=color1)
ax2.set_ylabel('g(x)', color=color1)
ax2.set_ylim(np.percentile(s2, [1, 99]))
ax2.ticklabel_format(useOffset=False)
for tl in ax2.get_yticklabels():
tl.set_color(color1)
return ax1, ax2
def imshow_overlay(z1, z2, **kwargs):
ax = gca()
alpha = 0.5
if 'alpha' in kwargs:
alpha = kwargs['alpha']
del(kwargs['alpha'])
ax.imshow(z1, cmap=cm.gray, **kwargs)
ax.imshow(z2, alpha=alpha, cmap=cm.jet, **kwargs)
def multiimage(*args, **kwargs):
"""multiimage plots multiple images in one figure.
"""
colorbar = kwargs.pop('colorbar', False)
altpref = kwargs.pop('altpref', False)
layout = kwargs.pop('layout', None)
subtitles = kwargs.pop('subtitles', None)
labels = kwargs.pop('labels', None)
subtitles = subtitles or labels
pscale = kwargs.pop('pscale', None)
noticks = kwargs.pop('noticks', None)
numplot = len(args)
prefsizes = [(1, 1), (1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (2, 3),
(3, 3), (3, 3), (3, 3), (3, 4), (3, 4), (3, 4),
(4, 4), (4, 4), (4, 4), (4, 4)]
if args[0].shape[0] < 1.5 * args[0].shape[1]:
prefsizes = [(y, x) for x, y in prefsizes]
# if numplot > len(prefsizes):
# raise(Exception('unexpectedNumber', 'multiimage is not prepared to plot more than {} figures at once'.format(len(prefsizes))))
if numplot > len(prefsizes):
w = np.ceil(np.sqrt(numplot))
h = (w - 1)
if 2 * h < numplot:
h += 1
elif layout is not None:
(w, h) = layout
else:
(w, h) = prefsizes[numplot]
if altpref:
h, w = (w, h)
for count, img in enumerate(args):
if count == 0:
ax1 = subplot(w, h, 1)
else:
subplot(w, h, count + 1, sharex=ax1, sharey=ax1)
if pscale is not None:
kwargs['vmin'], kwargs['vmax'] = np.percentile(
img.flatten(), pscale)
imshow(img, **kwargs)
if subtitles is not None:
title(subtitles[count])
if colorbar:
plt.colorbar()
if noticks:
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
ax1.xaxis.set_ticklabels([])
ax1.yaxis.set_ticklabels([])
def cubeimage(cube, **kwargs):
"""given a 3d array display each plane """
arglist = [cube[0, :, :]]
# print(cube.shape[0])
for k in range(cube.shape[0] - 1):
# print(cube[k+1,:,:].shape)
arglist.append(cube[k + 1, :, :])
#print([x.shape for x in arglist])
multiimage(*arglist, **kwargs)
def fancy_hist(data):
""" from http://stackoverflow.com/a/6353051/1840190 """
from matplotlib.ticker import FormatStrFormatter
ax = gca()
counts, bins, patches = ax.hist(
data, 40, facecolor='yellow', edgecolor='gray')
# Set the ticks to be at the edges of the bins.
ax.set_xticks(bins)
# Set the xaxis's tick labels to be formatted with 1 decimal place...
ax.xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
# Change the colors of bars at the edges...
twentyfifth, seventyfifth = np.percentile(data, [25, 75])
for patch, rightside, leftside in zip(patches, bins[1:], bins[:-1]):
if rightside < twentyfifth:
patch.set_facecolor('green')
elif leftside > seventyfifth:
patch.set_facecolor('red')
# Label the raw counts and the percentages below the x-axis...
bin_centers = 0.5 * np.diff(bins) + bins[:-1]
for count, x in zip(counts, bin_centers):
# Label the raw counts
ax.annotate(str(count), xy=(x, 0), xycoords=('data', 'axes fraction'),
xytext=(0, -18), textcoords='offset points', va='top', ha='center')
# Label the percentages
percent = '%0.0f%%' % (100 * float(count) / counts.sum())
ax.annotate(percent, xy=(x, 0), xycoords=('data', 'axes fraction'),
xytext=(0, -32), textcoords='offset points', va='top', ha='center')
# Give ourselves some more room at the bottom of the plot
subplots_adjust(bottom=0.15)
def add_color_bar(fig, ax, im, frac=.08):
axp = ax.get_position()
# left bottom width height
cax = fig.add_axes(
[axp.x1, axp.y0, frac * (axp.x1 - axp.x0), axp.y1 - axp.y0])
fig.colorbar(im, cax=cax)
def saveall(outputdir='.', extension='.png'):
od = addtslash(outputdir)
mkdir_p(od)
pickleFig = False
if extension.find('.pickle') == 0:
pickleFig = True
if pickleFig:
figscript = ''
figscript += 'import matplotlib.pyplot as plt\nimport pickle\n'
for f in plt.get_fignums():
fig = plt.figure(f)
figname = fig.get_label().replace(' ', '_')
figname = 'FIG_' + figname.replace('/', '_-')
if pickleFig:
import pickle
picklename = '{}.pickle'.format(figname)
fout = open(od + picklename, 'wb')
pickle.dump(fig, fout)
fout.close()
figscript += 'ax = pickle.load(open(r''{}'',''rb''))\n'.format(
picklename)
else:
plt.savefig(od + '{}{}'.format(figname, extension))
if pickleFig:
figscript += 'plt.show()\n'
figscriptfile = open(od + 'plotfigs.py', 'w')
figscriptfile.write(figscript)
figscriptfile.close()
def polyContains(polyBounds, points, plot=False):
"""find the points that are contained in the bounding polygon"""
boxcodes = [mpPath.MOVETO]
polyBounds = np.append(polyBounds, polyBounds[0]).reshape((-1, 2))
for k in range(len(polyBounds) - 1):
boxcodes.append(mpPath.LINETO)
boxcodes[-1] = mpPath.CLOSEPOLY
bbPoly = mpPath(polyBounds, boxcodes)
if plot: # debugging patch
import matplotlib as mpl
fig = myfig('debug patch')
ax = fig.add_subplot(111)
patch = mpl.patches.PathPatch(
bbPoly, facecolor='orange', lw=2, alpha=0.5)
ax.add_patch(patch)
fpa = np.asarray(points)
scatter(fpa[:, 0], fpa[:, 1])
myshow()
withinbox = bbPoly.contains_points(points)
return withinbox
def mbin(bin_edges):
return bin_edges[:-1] + .5 * np.diff(bin_edges)
## various utilities
def angle_offset(img):
"""Calculate the angle, but wrap around the mean angle"""
m = np.nanmean(img)
m /= abs(m)
out = np.angle(img * m.conj()) + np.angle(m)
return out
def addtslash(d):
if d[-1] == '/':
return d
else:
return d + '/'
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise exc
| 28.948775
| 135
| 0.571703
|
56a2baa6697fce72ecee974d7e880b45d8f9bf0d
| 1,603
|
py
|
Python
|
odp/api/routers/tag.py
|
SAEON/Open-Data-Platform
|
8509c39c6f65ba18518e825e2359213ec4c67af5
|
[
"MIT"
] | null | null | null |
odp/api/routers/tag.py
|
SAEON/Open-Data-Platform
|
8509c39c6f65ba18518e825e2359213ec4c67af5
|
[
"MIT"
] | null | null | null |
odp/api/routers/tag.py
|
SAEON/Open-Data-Platform
|
8509c39c6f65ba18518e825e2359213ec4c67af5
|
[
"MIT"
] | null | null | null |
from fastapi import APIRouter, Depends, HTTPException
from jschon import URI
from sqlalchemy import select
from starlette.status import HTTP_404_NOT_FOUND
from odp import ODPScope
from odp.api.lib.auth import Authorize
from odp.api.lib.paging import Page, Paginator
from odp.api.models import TagModel
from odp.db import Session
from odp.db.models import Tag
from odp.lib.schema import schema_catalog
router = APIRouter()
@router.get(
'/',
response_model=Page[TagModel],
dependencies=[Depends(Authorize(ODPScope.TAG_READ))],
)
async def list_tags(
paginator: Paginator = Depends(),
):
return paginator.paginate(
select(Tag),
lambda row: TagModel(
id=row.Tag.id,
flag=row.Tag.flag,
public=row.Tag.public,
scope_id=row.Tag.scope_id,
schema_id=row.Tag.schema_id,
schema_uri=row.Tag.schema.uri,
schema_=schema_catalog.get_schema(URI(row.Tag.schema.uri)).value,
)
)
@router.get(
'/{tag_id}',
response_model=TagModel,
dependencies=[Depends(Authorize(ODPScope.TAG_READ))],
)
async def get_tag(
tag_id: str,
):
tag = Session.execute(
select(Tag).
where(Tag.id == tag_id)
).scalar_one_or_none()
if not tag:
raise HTTPException(HTTP_404_NOT_FOUND)
return TagModel(
id=tag.id,
flag=tag.flag,
public=tag.public,
scope_id=tag.scope_id,
schema_id=tag.schema_id,
schema_uri=tag.schema.uri,
schema_=schema_catalog.get_schema(URI(tag.schema.uri)).value,
)
| 25.046875
| 77
| 0.660012
|
140385f8cd0ca81ae687531b63687ab1aafed08b
| 2,317
|
py
|
Python
|
generate_tracks.py
|
DurbinLiu/deepfake-detection-challenge
|
c42f335ec861c7f2462cb0703ca53abf7b6a22a5
|
[
"Apache-2.0"
] | null | null | null |
generate_tracks.py
|
DurbinLiu/deepfake-detection-challenge
|
c42f335ec861c7f2462cb0703ca53abf7b6a22a5
|
[
"Apache-2.0"
] | null | null | null |
generate_tracks.py
|
DurbinLiu/deepfake-detection-challenge
|
c42f335ec861c7f2462cb0703ca53abf7b6a22a5
|
[
"Apache-2.0"
] | null | null | null |
import os
import yaml
import tqdm
import glob
import pickle
from tracker.iou_tracker import track_iou
from detect_faces_on_videos import DETECTIONS_FILE_NAME, DETECTIONS_ROOT
SIGMA_L = 0.3
SIGMA_H = 0.9
SIGMA_IOU = 0.3
T_MIN = 1
TRACKS_FILE_NAME = 'tracks.pkl'
def get_tracks(detections):
if len(detections) == 0:
return []
converted_detections = []
for i, detections_per_frame in enumerate(detections):
converted_detections_per_frame = []
for j, (bbox, score) in enumerate(zip(detections_per_frame['boxes'], detections_per_frame['scores'])):
bbox = tuple(bbox.tolist())
converted_detections_per_frame.append({'bbox': bbox, 'score': score})
converted_detections.append(converted_detections_per_frame)
tracks = track_iou(converted_detections, SIGMA_L, SIGMA_H, SIGMA_IOU, T_MIN)
tracks_converted = []
for track in tracks:
track_converted = []
start_frame = track['start_frame'] - 1
for i, bbox in enumerate(track['bboxes']):
track_converted.append((start_frame + i, bbox))
tracks_converted.append(track_converted)
return tracks_converted
def main():
with open('config.yaml', 'r') as f:
config = yaml.load(f)
root_dir = os.path.join(config['ARTIFACTS_PATH'], DETECTIONS_ROOT)
detections_content = []
for path in glob.iglob(os.path.join(root_dir, '**', DETECTIONS_FILE_NAME), recursive=True):
rel_path = path[len(root_dir) + 1:]
detections_content.append(rel_path)
detections_content = sorted(detections_content)
print('Total number of videos: {}'.format(len(detections_content)))
video_to_tracks = {}
for rel_path in tqdm.tqdm(detections_content):
video = os.path.dirname(rel_path)
with open(os.path.join(root_dir, rel_path), 'rb') as f:
detections = pickle.load(f)
video_to_tracks[video] = get_tracks(detections)
track_count = sum([len(tracks) for tracks in video_to_tracks.values()])
print('Total number of tracks: {}'.format(track_count))
with open(os.path.join(config['ARTIFACTS_PATH'], TRACKS_FILE_NAME), 'wb') as f:
pickle.dump(video_to_tracks, f)
if __name__ == '__main__':
main()
| 32.633803
| 111
| 0.665084
|
3e4ee7ffc8d84f1ca049a2eee95f245339beeb64
| 301
|
py
|
Python
|
Mundo 1 Fundamentos/ex012.py
|
costa53/curso_em_video_python3
|
4f859641324f8b35be56d807f40457d7dddc451f
|
[
"MIT"
] | 1
|
2022-02-17T16:23:52.000Z
|
2022-02-17T16:23:52.000Z
|
Mundo 1 Fundamentos/ex012.py
|
costa53/curso_em_video_python3
|
4f859641324f8b35be56d807f40457d7dddc451f
|
[
"MIT"
] | null | null | null |
Mundo 1 Fundamentos/ex012.py
|
costa53/curso_em_video_python3
|
4f859641324f8b35be56d807f40457d7dddc451f
|
[
"MIT"
] | null | null | null |
# DESAFIO 012
# Faça um algoritmo que leia o preço de um produto e mostre seu novo preço, com 5% de desconto.
pr = float(input('Qual é o preço do produto? R$'))
desc = 5
pf = pr - (pr * desc / 100)
print(f'O produto que custava R${pr:.2f}, na promoção com desconto de {desc}% vai custar R${pf:.2f}')
| 37.625
| 101
| 0.674419
|
c7f9ce9967a048b500e61ff70ff76da438293b78
| 5,022
|
py
|
Python
|
ECore_Copier_MM/transformation-Large/HepackageOUTeFactoryInstanceSolveRefEPackageEFactoryEPackageEFactory.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 3
|
2017-06-02T19:26:27.000Z
|
2021-06-14T04:25:45.000Z
|
ECore_Copier_MM/transformation-Large/HepackageOUTeFactoryInstanceSolveRefEPackageEFactoryEPackageEFactory.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 8
|
2016-08-24T07:04:07.000Z
|
2017-05-26T16:22:47.000Z
|
ECore_Copier_MM/transformation-Large/HepackageOUTeFactoryInstanceSolveRefEPackageEFactoryEPackageEFactory.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 1
|
2019-10-31T06:00:23.000Z
|
2019-10-31T06:00:23.000Z
|
from core.himesis import Himesis
class HepackageOUTeFactoryInstanceSolveRefEPackageEFactoryEPackageEFactory(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HepackageOUTeFactoryInstanceSolveRefEPackageEFactoryEPackageEFactory.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HepackageOUTeFactoryInstanceSolveRefEPackageEFactoryEPackageEFactory, self).__init__(name='HepackageOUTeFactoryInstanceSolveRefEPackageEFactoryEPackageEFactory', num_nodes=27, edges=[])
# Add the edges
self.add_edges([[0, 6], [6, 5], [0, 8], [8, 7], [1, 10], [10, 9], [1, 12], [12, 11], [5, 3], [3, 7], [9, 4], [4, 11], [9, 13], [13, 5], [11, 14], [14, 7], [9, 15], [15, 16], [17, 18], [18, 16], [17, 19], [19, 20], [11, 21], [21, 22], [23, 24], [24, 22], [23, 25], [25, 26], [0, 2], [2, 1]])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """epackageOUTeFactoryInstanceSolveRefEPackageEFactoryEPackageEFactory"""
self["GUID__"] = 4709174051022166187
# Set the node attributes
self.vs[0]["mm__"] = """MatchModel"""
self.vs[0]["GUID__"] = 4231580133452891083
self.vs[1]["mm__"] = """ApplyModel"""
self.vs[1]["GUID__"] = 8631377411555995428
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["GUID__"] = 4237852361844278249
self.vs[3]["associationType"] = """eFactoryInstance"""
self.vs[3]["mm__"] = """directLink_S"""
self.vs[3]["GUID__"] = 5218313243529445137
self.vs[4]["associationType"] = """eFactoryInstance"""
self.vs[4]["mm__"] = """directLink_T"""
self.vs[4]["GUID__"] = 6836889693106884853
self.vs[5]["name"] = """"""
self.vs[5]["classtype"] = """EPackage"""
self.vs[5]["mm__"] = """EPackage"""
self.vs[5]["cardinality"] = """+"""
self.vs[5]["GUID__"] = 2514770223916284325
self.vs[6]["mm__"] = """match_contains"""
self.vs[6]["GUID__"] = 5349404699201334775
self.vs[7]["name"] = """"""
self.vs[7]["classtype"] = """EFactory"""
self.vs[7]["mm__"] = """EFactory"""
self.vs[7]["cardinality"] = """+"""
self.vs[7]["GUID__"] = 5048402173467301935
self.vs[8]["mm__"] = """match_contains"""
self.vs[8]["GUID__"] = 536283503097499105
self.vs[9]["name"] = """"""
self.vs[9]["classtype"] = """EPackage"""
self.vs[9]["mm__"] = """EPackage"""
self.vs[9]["cardinality"] = """1"""
self.vs[9]["GUID__"] = 8470835144066371415
self.vs[10]["mm__"] = """apply_contains"""
self.vs[10]["GUID__"] = 6875894762205235978
self.vs[11]["name"] = """"""
self.vs[11]["classtype"] = """EFactory"""
self.vs[11]["mm__"] = """EFactory"""
self.vs[11]["cardinality"] = """1"""
self.vs[11]["GUID__"] = 8069813301381615213
self.vs[12]["mm__"] = """apply_contains"""
self.vs[12]["GUID__"] = 5797562316180377653
self.vs[13]["mm__"] = """backward_link"""
self.vs[13]["type"] = """ruleDef"""
self.vs[13]["GUID__"] = 521590616679632308
self.vs[14]["mm__"] = """backward_link"""
self.vs[14]["type"] = """ruleDef"""
self.vs[14]["GUID__"] = 743327577253593610
self.vs[15]["mm__"] = """hasAttribute_T"""
self.vs[15]["GUID__"] = 8475936060619617328
self.vs[16]["name"] = """ApplyAttribute"""
self.vs[16]["Type"] = """'String'"""
self.vs[16]["mm__"] = """Attribute"""
self.vs[16]["GUID__"] = 2541931610233514074
self.vs[17]["name"] = """eq_"""
self.vs[17]["mm__"] = """Equation"""
self.vs[17]["GUID__"] = 6708348967188956303
self.vs[18]["mm__"] = """leftExpr"""
self.vs[18]["GUID__"] = 1675741724473110817
self.vs[19]["mm__"] = """rightExpr"""
self.vs[19]["GUID__"] = 186368678036501396
self.vs[20]["name"] = """solveRef"""
self.vs[20]["Type"] = """'String'"""
self.vs[20]["mm__"] = """Constant"""
self.vs[20]["GUID__"] = 3341488270157212101
self.vs[21]["mm__"] = """hasAttribute_T"""
self.vs[21]["GUID__"] = 5317975929562910499
self.vs[22]["name"] = """ApplyAttribute"""
self.vs[22]["Type"] = """'String'"""
self.vs[22]["mm__"] = """Attribute"""
self.vs[22]["GUID__"] = 9130525873714163321
self.vs[23]["name"] = """eq_"""
self.vs[23]["mm__"] = """Equation"""
self.vs[23]["GUID__"] = 3570720800914918025
self.vs[24]["mm__"] = """leftExpr"""
self.vs[24]["GUID__"] = 8166534211753768185
self.vs[25]["mm__"] = """rightExpr"""
self.vs[25]["GUID__"] = 686081286119546133
self.vs[26]["name"] = """solveRef"""
self.vs[26]["Type"] = """'String'"""
self.vs[26]["mm__"] = """Constant"""
self.vs[26]["GUID__"] = 4665868472507240272
| 48.288462
| 298
| 0.544604
|
9bf3a0d83d4975996e74d32119e3cea15412e6cf
| 4,699
|
py
|
Python
|
bdscan/bdio.py
|
matthewb66/blackduck-scan-directguidance
|
00d7dc76fad67bc7df8765048f2b1b115a1f7d3c
|
[
"Apache-2.0"
] | null | null | null |
bdscan/bdio.py
|
matthewb66/blackduck-scan-directguidance
|
00d7dc76fad67bc7df8765048f2b1b115a1f7d3c
|
[
"Apache-2.0"
] | null | null | null |
bdscan/bdio.py
|
matthewb66/blackduck-scan-directguidance
|
00d7dc76fad67bc7df8765048f2b1b115a1f7d3c
|
[
"Apache-2.0"
] | null | null | null |
# import argparse
import glob
# import hashlib
import json
import os
# import random
# import re
# import shutil
import sys
import zipfile
from bdscan import globals
import networkx as nx
# from BlackDuckUtils import Utils
# from BlackDuckUtils import NpmUtils
# from BlackDuckUtils import MavenUtils
# from blackduck import Client
def read_json_object(filepath):
with open(filepath) as jsonfile:
data = json.load(jsonfile)
return data
def zip_extract_files(zip_file, dir_name):
# print("Extracting content of {} into {}".format(zip_file, dir_name))
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
zip_ref.extractall(dir_name)
def bdio_read(bdio_in, inputdir):
zip_extract_files(bdio_in, inputdir)
filelist = os.listdir(inputdir)
for filename in filelist:
# print ("processing {}".format(filename))
if filename.startswith("bdio-entry"):
filepath_in = os.path.join(inputdir, filename)
data = read_json_object(filepath_in)
return data
def get_bdio_dependency_graph(output_dir):
# Parse BDIO file into network graph
bd_output_latest_dir = max(glob.glob(output_dir + "/runs/*/bdio"), key=os.path.getmtime)
if len(bd_output_latest_dir) == 0:
print("BD-Scan-Action: ERROR: Unable to find latest scan folder: " + output_dir + "/runs/*/bdio")
sys.exit(1)
bd_rapid_output_bdio = glob.glob(bd_output_latest_dir + "/*.bdio")
if len(bd_rapid_output_bdio) == 0:
print("BD-Scan-Action: ERROR: Unable to find output scan files in: " + output_dir + "/runs/*/bdio/*.bdio")
sys.exit(1)
# TODO is there a case where there would be more than one BDIO file?
bdio_list = []
for bdfile in bd_rapid_output_bdio:
# globals.printdebug(f"DEBUG: BDIO File: " + bdfile)
bdio_list.append(bdio_read(bdfile, bd_output_latest_dir))
# if (globals.debug):
# print(f"DEBUG: BDIO Dump: " + json.dumps(bdio_data, indent=4))
# Construct dependency graph
G = nx.DiGraph()
globals.printdebug("DEBUG: Create dependency graph...")
# Save project for later so we can find the direct dependencies
projects = []
for bdio in bdio_list:
globals.printdebug("DEBUG: Processing bdio ..")
for node in bdio['@graph']:
parent = node['@id']
globals.printdebug(f"DEBUG: Parent {parent}")
nx_node = None
if "https://blackducksoftware.github.io/bdio#hasDependency" in node:
if isinstance(node['https://blackducksoftware.github.io/bdio#hasDependency'], list):
for dependency in node['https://blackducksoftware.github.io/bdio#hasDependency']:
child = dependency['https://blackducksoftware.github.io/bdio#dependsOn']['@id']
globals.printdebug(f"DEBUG: Dependency on {child}")
nx_node = G.add_edge(parent, child)
else:
child = node['https://blackducksoftware.github.io/bdio#hasDependency'][
'https://blackducksoftware.github.io/bdio#dependsOn']['@id']
globals.printdebug(f"DEBUG: (2) Dependency on {child}")
nx_node = G.add_edge(parent, child)
if node['@type'] == "https://blackducksoftware.github.io/bdio#Project":
projects.append(parent)
globals.printdebug(f"DEBUG: Project name is {parent}")
G.add_node(parent, project=1)
else:
nx_node = G.add_node(parent)
return G, projects
# def get_dependency_type(bdio_graph, bdio_projects, componentIdentifier):
# comp_ns, comp_name, comp_version = Utils.parse_component_id(componentIdentifier)
# Matching in the BDIO requires an http: prefix
#
# dependency_type = "Direct"
#
# if (comp_ns == "npmjs"):
# comp_http_name = NpmUtils.convert_dep_to_bdio(componentIdentifier)
# elif (comp_ns == "maven"):
# comp_http_name = MavenUtils.convert_to_bdio(componentIdentifier)
# else:
# print(f"BD-Scan-Action: ERROR: Domain '{comp_ns}' not supported yet")
# sys.exit(1)
#
# globals.printdebug(f"DEBUG: Looking for {comp_http_name}")
# ans = nx.ancestors(bdio_graph, comp_http_name)
# ans_list = list(ans)
# globals.printdebug(f"DEBUG: Ancestors are: {ans_list}")
# pred = nx.DiGraph.predecessors(bdio_graph, comp_http_name)
# pred_list = list(pred)
# globals.printdebug(f"DEBUG: Predecessors are: {ans_list}")
# if (len(ans_list) != 1):
# dependency_type = "Transitive"
#
# return dependency_type
| 37.592
| 114
| 0.642903
|
8c5f2cf9011ebe7f3ea8cf6f53b5c06aa49434ea
| 6,859
|
py
|
Python
|
scripts/4_multihop_ps.py
|
jobine/HGN
|
6d8dd5ebf2a5077652b09a00610ade89bfcd6b4b
|
[
"MIT"
] | null | null | null |
scripts/4_multihop_ps.py
|
jobine/HGN
|
6d8dd5ebf2a5077652b09a00610ade89bfcd6b4b
|
[
"MIT"
] | null | null | null |
scripts/4_multihop_ps.py
|
jobine/HGN
|
6d8dd5ebf2a5077652b09a00610ade89bfcd6b4b
|
[
"MIT"
] | 2
|
2021-04-19T06:18:30.000Z
|
2021-05-31T07:26:29.000Z
|
import spacy
import json
import os
import re
import torch
import numpy as np
import sys
from tqdm import tqdm
from collections import Counter
assert len(sys.argv) == 6
raw_data = json.load(open(sys.argv[1], 'r'))
doc_link_data = json.load(open(sys.argv[2], 'r'))
ent_data = json.load(open(sys.argv[3], 'r'))
para_data = json.load(open(sys.argv[4], 'r'))
output_file = sys.argv[5]
def select_titles(question_text, question_entities):
def custom_key(x):
# x[1]: start position
# x[2]: end position
return x[1], -x[2]
# get TITLE entities
candidates = []
title_set = set()
for ent in question_entities:
# only keep the entities from title matching
if ent[3] != 'TITLE' :
continue
stripped_ent = re.sub(r' \(.*?\)$', '', ent[0])
if stripped_ent in title_set:
continue
title_set.add(stripped_ent)
candidates.append(ent)
# If match multiple titles with the same start, then take the longest one
sorted_candidiates = sorted(candidates, key=custom_key)
non_overlap_titles, overlapped_titles = [], []
question_mask = [0] * len(question_text)
for i in range(len(sorted_candidiates)):
start_pos, end_pos = sorted_candidiates[i][1], sorted_candidiates[i][2]
is_masked = False
for p in range(start_pos, end_pos):
if question_mask[p] == 1:
is_masked = True
if not is_masked:
non_overlap_titles.append(sorted_candidiates[i][0])
else:
overlapped_titles.append(sorted_candidiates[i][0])
for p in range(start_pos, end_pos):
question_mask[p] = 1
return non_overlap_titles, overlapped_titles
def build_dict(title_list):
title_to_id, id_to_title = {}, {}
for idx, title in enumerate(title_list):
id_to_title[idx] = title
title_to_id[title] = idx
return title_to_id, id_to_title
def build_title_to_entities(context, filter_ent_type=[]):
title_to_ent = {}
for title, sent_ent_list in context:
title_to_ent[title] = set()
for sent_ent in sent_ent_list:
for ent in sent_ent:
if ent[3] not in filter_ent_type:
title_to_ent[title].add(ent[0].lower())
return title_to_ent
def build_PG(titles):
# build hyperlink graph
N = len(titles)
para_adj = np.zeros((N, N), dtype=np.float32)
title_to_id, id_to_title = build_dict(titles)
for title in titles:
sent_links = doc_link_data[title]['hyperlink_titles']
for next_title in [next_title for sent_link in sent_links for next_title in sent_link]:
if next_title in titles:
pi, pj = title_to_id[title], title_to_id[next_title]
para_adj[pi, pj] = 1
return para_adj
def bfs_step(start_vec, graph):
"""
:param start_vec: [E]
:param graph: [E x E]
:return: next_vec: [E]
"""
next_vec = torch.matmul(start_vec.float().unsqueeze(0), graph)
next_vec = (next_vec > 0).long().squeeze(0)
return next_vec
para_num = []
selected_para_dict = {}
for case in tqdm(raw_data):
guid = case['_id']
context = dict(case['context'])
para_scores = para_data[guid]
selected_para_dict[guid] = []
if len(para_scores) == 0:
print(guid)
continue
title_to_id, id_to_title = build_dict(context.keys())
sel_para_idx = [0] * len(context)
# question entity matching
question_entities = ent_data[guid]['question']
# hop 1.1 title in ques and top rank
sel_titles, _ = select_titles(case['question'], question_entities)
for idx, (para, score) in enumerate(para_scores):
if para in sel_titles and idx < 2:
sel_para_idx[title_to_id[para]] = 1
# hop 1.2: if cannot match by title, match entities
title_to_ent = build_title_to_entities(ent_data[guid]['context'], filter_ent_type=['CONTEXT'])
if sum(sel_para_idx) == 0:
linked_title = None
for idx, (title, score) in enumerate(para_scores):
if title not in title_to_id:
continue
ent_set = title_to_ent[title] # all entities from this document
for ent in question_entities:
if ent[0].lower() in ent_set:
linked_title = title
if linked_title is not None: # stop finding if match with question entities
break
if linked_title is None: # use default one
assert len(para_scores) > 0
linked_title = para_scores[0][0]
sel_para_idx[title_to_id[linked_title]] = 1
selected_para_dict[guid].append([id_to_title[i] for i in range(len(context)) if sel_para_idx[i] == 1])
# second hop: use hyperlink
second_hop_titles = []
para_adj = build_PG(context.keys())
if sum(sel_para_idx) == 1:
next_titles = []
next_vec = bfs_step(torch.tensor(sel_para_idx), torch.from_numpy(para_adj))
next_vec_list = next_vec.nonzero().squeeze(1).numpy().tolist()
for sent_id in next_vec_list:
next_titles.append(id_to_title[sent_id])
# hop 2.1: select the highest score for next title
# 1. define the next and default title for second hop
# 2. enumerate all docs, if found next link then stop
linked_title, default_title = None, None
for para, score in para_scores:
if linked_title is not None:
break
if para not in title_to_id: # skip documents that are not in the supporting docs
continue
if sel_para_idx[title_to_id[para]] == 0: # only deal with the ones have not been selected
if default_title is None:
default_title = para
if para in next_titles:
linked_title = para
linked_title = default_title if linked_title is None else linked_title
if linked_title is not None:
sel_para_idx[title_to_id[linked_title]] = 1
second_hop_titles = [linked_title]
selected_para_dict[guid].append(second_hop_titles)
# others, keep a high recall
other_titles = []
for para, score in para_scores:
if para not in title_to_id:
continue
if sum(sel_para_idx) == 4:
break
ind = title_to_id[para]
if sel_para_idx[ind] == 0:
sel_para_idx[ind] = 1
other_titles.append(para)
selected_para_dict[guid].append(other_titles)
para_num.append(sum(sel_para_idx))
json.dump(selected_para_dict, open(output_file, 'w'))
| 33.135266
| 107
| 0.609856
|
25dad46d06176f0c906759c2346750058a3cd097
| 602
|
py
|
Python
|
mapss/static/packages/arches/arches/app/models/migrations/3808_card_component_command.py
|
MPI-MAPSS/MAPSS
|
3a5c0109758801717aaa8de1125ca5e98f83d3b4
|
[
"CC0-1.0"
] | null | null | null |
mapss/static/packages/arches/arches/app/models/migrations/3808_card_component_command.py
|
MPI-MAPSS/MAPSS
|
3a5c0109758801717aaa8de1125ca5e98f83d3b4
|
[
"CC0-1.0"
] | null | null | null |
mapss/static/packages/arches/arches/app/models/migrations/3808_card_component_command.py
|
MPI-MAPSS/MAPSS
|
3a5c0109758801717aaa8de1125ca5e98f83d3b4
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-08-23 15:43
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('models', '3789_card_config'),
]
operations = [
migrations.AlterField(
model_name='cardmodel',
name='component',
field=models.ForeignKey(db_column='componentid', default=uuid.UUID('f05e4d3a-53c1-11e8-b0ea-784f435179ea'), on_delete=django.db.models.deletion.SET_DEFAULT, to='models.CardComponent'),
),
]
| 26.173913
| 196
| 0.66113
|
0f81e5a201240760692fe221faea84e61d0dc9d0
| 4,286
|
py
|
Python
|
bokeh/sphinxext/example_handler.py
|
timgates42/bokeh
|
fb8b07b838f4d07d520cfe899779a11bc89f3c77
|
[
"BSD-3-Clause"
] | 1
|
2015-01-31T14:42:39.000Z
|
2015-01-31T14:42:39.000Z
|
bokeh/sphinxext/example_handler.py
|
timgates42/bokeh
|
fb8b07b838f4d07d520cfe899779a11bc89f3c77
|
[
"BSD-3-Clause"
] | 1
|
2021-05-08T06:24:26.000Z
|
2021-05-08T06:24:26.000Z
|
bokeh/sphinxext/example_handler.py
|
timgates42/bokeh
|
fb8b07b838f4d07d520cfe899779a11bc89f3c77
|
[
"BSD-3-Clause"
] | 1
|
2021-03-04T05:23:36.000Z
|
2021-03-04T05:23:36.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import sys
# Bokeh imports
from ..application.handlers.code_runner import CodeRunner
from ..application.handlers.handler import Handler
from ..io.doc import curdoc, set_curdoc
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'ExampleHandler',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class ExampleHandler(Handler):
""" A stripped-down handler similar to CodeHandler but that does
some appropriate monkeypatching.
"""
_output_funcs = ['output_notebook', 'output_file', 'reset_output']
_io_funcs = ['show', 'save']
def __init__(self, source, filename):
super().__init__(self)
self._runner = CodeRunner(source, filename, [])
def modify_document(self, doc):
if self.failed:
return
module = self._runner.new_module()
sys.modules[module.__name__] = module
doc._modules.append(module)
old_doc = curdoc()
set_curdoc(doc)
old_io, old_doc = self._monkeypatch()
try:
self._runner.run(module, lambda: None)
finally:
self._unmonkeypatch(old_io, old_doc)
set_curdoc(old_doc)
def _monkeypatch(self):
def _pass(*args, **kw): pass
def _add_root(obj, *args, **kw):
from bokeh.io import curdoc
curdoc().add_root(obj)
def _curdoc(*args, **kw):
return curdoc()
# these functions are transitively imported from io into plotting,
# so we have to patch them all. Assumption is that no other patching
# has occurred, i.e. we can just save the funcs being patched once,
# from io, and use those as the originals to replace everywhere
import bokeh.io as io
import bokeh.plotting as p
mods = [io, p]
old_io = {}
for f in self._output_funcs + self._io_funcs:
old_io[f] = getattr(io, f)
for mod in mods:
for f in self._output_funcs:
setattr(mod, f, _pass)
for f in self._io_funcs:
setattr(mod, f, _add_root)
import bokeh.document as d
old_doc = d.Document
d.Document = _curdoc
return old_io, old_doc
def _unmonkeypatch(self, old_io, old_doc):
import bokeh.io as io
import bokeh.plotting as p
mods = [io, p]
for mod in mods:
for f in old_io:
setattr(mod, f, old_io[f])
import bokeh.document as d
d.Document = old_doc
@property
def failed(self):
return self._runner.failed
@property
def error(self):
return self._runner.error
@property
def error_detail(self):
return self._runner.error_detail
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 30.183099
| 78
| 0.433271
|
bdab9e33397041cb127e2e74da182c3a18dd9770
| 2,173
|
py
|
Python
|
submarine-sdk/pysubmarine/submarine/client/models/__init__.py
|
10088/submarine
|
9a9c7100dfa95c5126c15aec82092bb899565dac
|
[
"Apache-2.0"
] | null | null | null |
submarine-sdk/pysubmarine/submarine/client/models/__init__.py
|
10088/submarine
|
9a9c7100dfa95c5126c15aec82092bb899565dac
|
[
"Apache-2.0"
] | null | null | null |
submarine-sdk/pysubmarine/submarine/client/models/__init__.py
|
10088/submarine
|
9a9c7100dfa95c5126c15aec82092bb899565dac
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# flake8: noqa
"""
Submarine API
The Submarine REST API allows you to access Submarine resources such as, experiments, environments and notebooks. The API is hosted under the /v1 path on the Submarine server. For example, to list experiments on a server hosted at http://localhost:8080, access http://localhost:8080/api/v1/experiment/ # noqa: E501
The version of the OpenAPI document: 0.7.0
Contact: dev@submarine.apache.org
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
# import models into model package
from submarine.client.models.code_spec import CodeSpec
from submarine.client.models.environment_spec import EnvironmentSpec
from submarine.client.models.experiment_meta import ExperimentMeta
from submarine.client.models.experiment_spec import ExperimentSpec
from submarine.client.models.experiment_task_spec import ExperimentTaskSpec
from submarine.client.models.experiment_template_submit import ExperimentTemplateSubmit
from submarine.client.models.json_response import JsonResponse
from submarine.client.models.kernel_spec import KernelSpec
from submarine.client.models.notebook_meta import NotebookMeta
from submarine.client.models.notebook_pod_spec import NotebookPodSpec
from submarine.client.models.notebook_spec import NotebookSpec
from submarine.client.models.serve_spec import ServeSpec
| 48.288889
| 322
| 0.810861
|
e3d70125b8d8d1d21f5d6d737150a4597d3325c3
| 7,395
|
py
|
Python
|
bindings/python/cntk/tests/distributed_test.py
|
catycaldwell/CNTK
|
86a20080b19255b96ada85b6a7ab6b8e7be7465b
|
[
"RSA-MD"
] | null | null | null |
bindings/python/cntk/tests/distributed_test.py
|
catycaldwell/CNTK
|
86a20080b19255b96ada85b6a7ab6b8e7be7465b
|
[
"RSA-MD"
] | null | null | null |
bindings/python/cntk/tests/distributed_test.py
|
catycaldwell/CNTK
|
86a20080b19255b96ada85b6a7ab6b8e7be7465b
|
[
"RSA-MD"
] | 1
|
2021-01-21T05:58:03.000Z
|
2021-01-21T05:58:03.000Z
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import math
import numpy as np
import pytest
from .. import Function
from ..trainer import *
from ..learner import *
from .. import distributed
from .. import cross_entropy_with_softmax, classification_error, parameter, \
input_variable, times, plus, reduce_sum
def create_data_parallel_distributed_learner(learner, quantized, distributed_after):
return distributed.data_parallel_distributed_learner(
learner=learner,
distributed_after=distributed_after,
use_async_buffered_parameter_update=False,
num_quantization_bits=(1 if quantized else 32))
def create_block_momentum_distributed_learner(learner, distributed_after):
return distributed.block_momentum_distributed_learner(
learner=learner,
block_size=1024,
distributed_after=distributed_after)
def create_block_momentum_distributed_learner_with_time_constant(learner, distributed_after):
return distributed.block_momentum_distributed_learner(
learner=learner,
block_size=1024,
block_momentum_as_time_constant=4096,
distributed_after=distributed_after)
def run_distributed_training(tmpdir, create_func):
in1 = input_variable(shape=1)
labels = input_variable(shape=1)
p = parameter(shape=2, init=10)
z = plus(in1, reduce_sum(p), name='z')
ce = cross_entropy_with_softmax(z, labels)
errs = classification_error(z, labels)
momentum_time_constant = momentum_as_time_constant_schedule(1100)
lr_per_sample = learning_rate_schedule(0.007, UnitType.sample)
dist_learner = create_func(momentum_sgd(z.parameters, lr_per_sample, momentum_time_constant, True))
communicator = dist_learner.communicator()
workers = communicator.workers()
current_worker = communicator.current_worker()
found_rank = False
for wk in workers:
if current_worker.global_rank == wk.global_rank:
found_rank = True
assert found_rank
trainer = Trainer(z, ce, errs, [ dist_learner ])
in1_value = [[1],[2]]
label_value = [[0], [1]]
arguments = {in1: in1_value, labels: label_value}
z_output = z.output
updated, var_map = trainer.train_minibatch(arguments, [z_output])
p = str(tmpdir / 'checkpoint.dat')
trainer.save_checkpoint(p)
trainer.restore_from_checkpoint(p)
communicator.barrier()
assert trainer.model.name == 'z'
# Ensure that Swig is not leaking raw types
assert isinstance(trainer.model, Function)
assert trainer.model.__doc__
def test_distributed_mb_source(tmpdir):
input_dim = 69
ctf_data = '''\
0 |S0 3:1 |# <s> |S1 3:1 |# <s>
0 |S0 4:1 |# A |S1 32:1 |# ~AH
0 |S0 5:1 |# B |S1 36:1 |# ~B
0 |S0 4:1 |# A |S1 31:1 |# ~AE
0 |S0 7:1 |# D |S1 38:1 |# ~D
0 |S0 12:1 |# I |S1 47:1 |# ~IY
0 |S0 1:1 |# </s> |S1 1:1 |# </s>
2 |S0 60:1 |# <s> |S1 3:1 |# <s>
2 |S0 61:1 |# A |S1 32:1 |# ~AH
2 |S0 61:1 |# A |S1 32:1 |# ~AH
3 |S0 60:1 |# <s> |S1 3:1 |# <s>
3 |S0 61:1 |# A |S1 32:1 |# ~AH
3 |S0 61:1 |# A |S1 32:1 |# ~AH
3 |S0 61:1 |# A |S1 32:1 |# ~AH
4 |S0 60:1 |# <s> |S1 3:1 |# <s>
5 |S0 60:1 |# <s> |S1 3:1 |# <s>
5 |S0 61:1 |# A |S1 32:1 |# ~AH
6 |S0 60:1 |# <s> |S1 3:1 |# <s>
6 |S0 61:1 |# A |S1 32:1 |# ~AH
7 |S0 60:1 |# <s> |S1 3:1 |# <s>
8 |S0 60:1 |# <s> |S1 3:1 |# <s>
8 |S0 61:1 |# A |S1 32:1 |# ~AH
9 |S0 60:1 |# <s> |S1 3:1 |# <s>
9 |S0 61:1 |# A |S1 32:1 |# ~AH
10 |S0 61:1 |# A |S1 32:1 |# ~AH
'''
from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs, FULL_DATA_SWEEP
ctf_file = str(tmpdir/'2seqtest.txt')
with open(ctf_file, 'w') as f:
f.write(ctf_data)
# No randomization
mb0 = MinibatchSource(CTFDeserializer(ctf_file, StreamDefs(
features = StreamDef(field='S0', shape=input_dim, is_sparse=True),
labels = StreamDef(field='S1', shape=input_dim, is_sparse=True)
)),
randomize=False, epoch_size=FULL_DATA_SWEEP)
mb1 = MinibatchSource(CTFDeserializer(ctf_file, StreamDefs(
features = StreamDef(field='S0', shape=input_dim, is_sparse=True),
labels = StreamDef(field='S1', shape=input_dim, is_sparse=True)
)),
randomize=False, epoch_size=FULL_DATA_SWEEP)
input = input_variable(shape=(input_dim,))
label = input_variable(shape=(input_dim,))
input_map = {
input : mb0.streams.features,
label : mb0.streams.labels
}
data = mb0.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=0)
assert(data[input].num_samples == 7)
data = mb0.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=0)
assert(data[input].num_samples == 4)
data = mb0.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=0)
assert(data[input].num_samples == 5)
data = mb1.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=1)
assert(data[input].num_samples == 3)
data = mb1.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=1)
assert(data[input].num_samples == 5)
# Radomization
mb3 = MinibatchSource(CTFDeserializer(ctf_file, StreamDefs(
features = StreamDef(field='S0', shape=input_dim, is_sparse=True),
labels = StreamDef(field='S1', shape=input_dim, is_sparse=True)
)),
randomize=True, epoch_size=FULL_DATA_SWEEP)
mb4 = MinibatchSource(CTFDeserializer(ctf_file, StreamDefs(
features = StreamDef(field='S0', shape=input_dim, is_sparse=True),
labels = StreamDef(field='S1', shape=input_dim, is_sparse=True)
)),
randomize=True, epoch_size=FULL_DATA_SWEEP)
data = mb3.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=0)
assert(data[input].num_samples == 5)
data = mb3.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=0)
assert(data[input].num_samples == 4)
data = mb4.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=1)
assert(len(data) == 0)
def test_distributed(tmpdir, is_1bit_sgd):
quantized=(True if is_1bit_sgd==1 else False)
simple_aggregation=lambda learner: create_data_parallel_distributed_learner(learner, False, 0)
run_distributed_training(tmpdir, create_func=simple_aggregation)
if is_1bit_sgd == 1:
quantized_aggregation=lambda learner: create_data_parallel_distributed_learner(learner, True, 100)
run_distributed_training(tmpdir, create_func=quantized_aggregation)
block_momentum=lambda learner: create_block_momentum_distributed_learner(learner, 100)
run_distributed_training(tmpdir, create_func=block_momentum)
block_momentum_with_time=lambda learner: create_block_momentum_distributed_learner_with_time_constant(learner, 100)
run_distributed_training(tmpdir, create_func=block_momentum_with_time)
distributed.Communicator.finalize()
| 38.921053
| 123
| 0.694253
|
e522a3c1fe15bd81b4c35e22cef3fba4dcc3ca22
| 78
|
py
|
Python
|
Classwork/openFile.py
|
rhiggins2308/G00364712-problemSet
|
90ab72c15e3093104102224a96c728e1cf196157
|
[
"Apache-2.0"
] | null | null | null |
Classwork/openFile.py
|
rhiggins2308/G00364712-problemSet
|
90ab72c15e3093104102224a96c728e1cf196157
|
[
"Apache-2.0"
] | null | null | null |
Classwork/openFile.py
|
rhiggins2308/G00364712-problemSet
|
90ab72c15e3093104102224a96c728e1cf196157
|
[
"Apache-2.0"
] | null | null | null |
f = open("data/iris.csv")
print(f)
print(f.readline)
print(f.read)
f.close()
| 11.142857
| 25
| 0.666667
|
ee9c1a30279938de30535f32824240b505b0c118
| 1,631
|
py
|
Python
|
ocd/abc.py
|
neurobin/python-ocd
|
178bc7923e4702a1d90b2b38bc28515921f8553d
|
[
"BSD-3-Clause"
] | null | null | null |
ocd/abc.py
|
neurobin/python-ocd
|
178bc7923e4702a1d90b2b38bc28515921f8553d
|
[
"BSD-3-Clause"
] | 1
|
2020-04-26T16:00:32.000Z
|
2020-04-26T16:00:32.000Z
|
ocd/abc.py
|
neurobin/python-easyvar
|
178bc7923e4702a1d90b2b38bc28515921f8553d
|
[
"BSD-3-Clause"
] | null | null | null |
"""Abstract base classes.
"""
__author__ = 'Md Jahidul Hamid <jahidulhamid@yahoo.com>'
__copyright__ = 'Copyright © Md Jahidul Hamid <https://github.com/neurobin/>'
__license__ = '[BSD](http://www.opensource.org/licenses/bsd-license.php)'
__version__ = '0.0.4'
class VarConf():
"""A base class that must be inherited by `VarConf` classes in
subclasses of `PropMixin`.
The method `get_conf` must be implemented in your `VarConf` class.
If you want automatic property configuration, create a class named
`VarConf` in your `PropMixin` subclass and make your `VarConf`
class inherit from `ocd.abc.VarConf` or a default `VarConf`
class from `ocd.defaults` and implement the `get_conf` method
to either return a `Prop` object for property conversion to happen
for the corresponding attribute name or return `None` if no
conversion is desired.
"""
def get_conf(self, name, value):
"""This method will be called on each property to get the
property configuration.
It must return a `Prop` object or `None` for the particular
property name.
Args:
name (str): name of the property
value (any): Value of the property
Returns:
Either `None` (if not to be converted) or `Prop` object if
needs to be converted to property.
"""
raise NotImplementedError("`VarConf` class must define a "
"method `get_conf` that returns "
"`Prop` object or `None`. See "
"`ocd.abc.VarConf`")
| 35.456522
| 77
| 0.627223
|
b6caef7be0e6b941f11345ef8b42ed50ae01686e
| 966
|
py
|
Python
|
configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes_OptExpTMO_subset.py
|
ismailkocdemir/mmdetection
|
4ac7e76dc66be7c97a8ca2c5f8a8e71434e3d823
|
[
"Apache-2.0"
] | null | null | null |
configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes_OptExpTMO_subset.py
|
ismailkocdemir/mmdetection
|
4ac7e76dc66be7c97a8ca2c5f8a8e71434e3d823
|
[
"Apache-2.0"
] | null | null | null |
configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes_OptExpTMO_subset.py
|
ismailkocdemir/mmdetection
|
4ac7e76dc66be7c97a8ca2c5f8a8e71434e3d823
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn_scratch.py',
'../_base_/datasets/cityscapes_detection_OptExp_90.py',
'../_base_/default_runtime.py'
]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
evaluation = dict(interval=1, classwise=True)
optimizer_config = dict(grad_clip=None)
optimizer = dict(
type='SGD',
lr=0.02,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.1,
step=[11])
runner = dict(
type='EpochBasedRunner', max_epochs=13)
log_config = dict(
interval=100,
hooks=[
dict(type='TensorboardLoggerHook'),
dict(type='TextLoggerHook'),
]
)
work_dir = "/home/ihakki/h3dr/experiments/faster_rcnn_optexp/run_scratch_8"
gpu_ids = range(0, 1)
| 25.421053
| 75
| 0.643892
|
8a5e73283de698d7dd03754638052cdcd75dcb59
| 2,485
|
py
|
Python
|
codigo/update_deceased.py
|
kant/Mexico-datos
|
12a49b6bc71126a460c3c167906e8648c5e9eafe
|
[
"MIT"
] | null | null | null |
codigo/update_deceased.py
|
kant/Mexico-datos
|
12a49b6bc71126a460c3c167906e8648c5e9eafe
|
[
"MIT"
] | null | null | null |
codigo/update_deceased.py
|
kant/Mexico-datos
|
12a49b6bc71126a460c3c167906e8648c5e9eafe
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
rolling_window = 7 # promedio sobre 7 días
repo = os.pardir
dir_series = os.path.join(repo, 'datos', 'series_de_tiempo', '')
dir_datos_abiertos = os.path.join(repo, 'datos_abiertos', '')
dir_series_abiertos = os.path.join(dir_datos_abiertos, 'series_de_tiempo', 'nuevos', '')
dir_save = os.path.join(dir_datos_abiertos, 'formato_especial', '')
# Series de tiempo 'interpretacion JH'
muertes_df = (pd.read_csv(dir_series + 'covid19_mex_muertes_nuevas.csv')
.set_index('Fecha')['Nacional']
.rename('Nuevas_JH'))
muertes_df.index = pd.to_datetime(muertes_df.index)
casos_df = (pd.read_csv(dir_series + 'covid19_mex_casos_nuevos.csv')
.set_index('Fecha')['Nacional']
.rename('Nuevos_JH'))
casos_df.index = pd.to_datetime(casos_df.index)
# Series de tiempo 'datos abiertos'
muertes_abiertos_df = (pd.read_csv(dir_series_abiertos + 'covid19_mex_muertes.csv')
.set_index('Fecha')['Nacional']
.rename('Nuevas_abiertos'))
muertes_abiertos_df.index = pd.to_datetime(muertes_abiertos_df.index)
casos_abiertos_df = (pd.read_csv(dir_series_abiertos + 'covid19_mex_confirmados.csv')
.set_index('Fecha')['Nacional']
.rename('Nuevos_abiertos'))
casos_abiertos_df.index = pd.to_datetime(casos_abiertos_df.index)
# Creamos dfs
muertes_nuevas = pd.concat((muertes_df, muertes_abiertos_df), axis=1).fillna(0).astype(int)
muertes_acumuladas = muertes_nuevas.cumsum()
# agregamos el promedio
muertes_promedio = (muertes_nuevas.rolling(window=rolling_window, center=False)
.mean()
.round(2)) # 2 decimales
muertes_nuevas = muertes_nuevas.join(muertes_promedio, rsuffix='_promedio')
casos_nuevos = pd.concat((casos_df, casos_abiertos_df),axis=1).fillna(0).astype(int)
casos_acumulados = casos_nuevos.cumsum()
# el promedio
casos_promedio = (casos_nuevos.rolling(window=rolling_window, center=False)
.mean()
.round(2)) # 2 decimales
casos_nuevos = casos_nuevos.join(casos_promedio, rsuffix='_promedio')
# cutoff = '2020-02-28'
# Escribimos archivos
# .loc[cutoff:, :]
muertes_nuevas.to_csv(dir_save + 'comparativo_muertes_nuevas.csv')
muertes_acumuladas.to_csv(dir_save + 'comparativo_muertes_acumuladas.csv')
casos_nuevos.to_csv(dir_save + 'comparativo_casos_nuevos.csv')
casos_acumulados.to_csv(dir_save + 'comparativo_casos_acumulados.csv')
| 37.651515
| 91
| 0.713883
|
7414be303280cc8b31a5233e8d08bff454e25ecd
| 2,548
|
py
|
Python
|
tests/test_models/test_dense_heads/test_anchor_head.py
|
Brym-Gyimah/mmdetection
|
d5d749afe57c77e2ec4500395faed3566fdfedae
|
[
"Apache-2.0"
] | 20,190
|
2018-09-10T01:11:53.000Z
|
2022-03-31T22:31:33.000Z
|
tests/test_models/test_dense_heads/test_anchor_head.py
|
Joker-co/mmdet_pro
|
96abfd90cf0e38c5ce398795f949e9328eb85c1b
|
[
"Apache-2.0"
] | 6,736
|
2018-09-17T09:45:51.000Z
|
2022-03-31T22:54:10.000Z
|
tests/test_models/test_dense_heads/test_anchor_head.py
|
Joker-co/mmdet_pro
|
96abfd90cf0e38c5ce398795f949e9328eb85c1b
|
[
"Apache-2.0"
] | 7,837
|
2018-09-11T02:58:23.000Z
|
2022-03-31T22:31:38.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import AnchorHead
def test_anchor_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
self = AnchorHead(num_classes=4, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(self.anchor_generator.strides))
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 35.887324
| 79
| 0.602433
|
de6296d26ecda4c8d517c16b2c7aafe35abbd4ef
| 6,078
|
py
|
Python
|
sem2/lab1/parabolic.py
|
NetherQuartz/NumericalMethodsLabs
|
731ba11bc068018371d5e1a2f9b521ec7c4619ad
|
[
"MIT"
] | 1
|
2021-04-10T18:10:48.000Z
|
2021-04-10T18:10:48.000Z
|
sem2/lab1/parabolic.py
|
NetherQuartz/NumericalMethodsLabs
|
731ba11bc068018371d5e1a2f9b521ec7c4619ad
|
[
"MIT"
] | null | null | null |
sem2/lab1/parabolic.py
|
NetherQuartz/NumericalMethodsLabs
|
731ba11bc068018371d5e1a2f9b521ec7c4619ad
|
[
"MIT"
] | null | null | null |
import fire
import numpy as np
import matplotlib.pyplot as plt
from utilities import str2fun
def tma(a, b, c, d):
size = len(a)
p, q = [], []
p.append(-c[0] / b[0])
q.append(d[0] / b[0])
for i in range(1, size):
p_tmp = -c[i] / (b[i] + a[i] * p[i - 1])
q_tmp = (d[i] - a[i] * q[i - 1]) / (b[i] + a[i] * p[i - 1])
p.append(p_tmp)
q.append(q_tmp)
x = [0 for _ in range(size)]
x[size - 1] = q[size - 1]
for i in range(size - 2, -1, -1):
x[i] = p[i] * x[i + 1] + q[i]
return x
def solve_analytic(l, N, K, T, solution):
h = l / N
tau = T / K
u = np.zeros((K, N))
for k in range(K):
for j in range(N):
u[k][j] = solution(j * h, k * tau)
return u
def implicit_solver(l, psi, f, phi0, phil, h, tau, sigma, N, K):
a = np.zeros(N)
b = np.zeros(N)
c = np.zeros(N)
d = np.zeros(N)
u = np.zeros((K, N))
for i in range(1, N - 1):
u[0][i] = psi(i * h)
u[0][-1] = 0
for k in range(1, K):
for j in range(1, N - 1):
a[j] = sigma
b[j] = -(1 + 2 * sigma)
c[j] = sigma
d[j] = -u[k - 1][j] - tau * f(j * h, k * tau)
a[0] = 0
b[0] = 1
c[0] = 0
d[0] = phi0(k * tau)
a[-1] = -1
b[-1] = 1
c[-1] = 0
d[-1] = h * phil(k * tau)
u[k] = tma(a, b, c, d)
return u
def explicit_solver(l, psi, f, phi0, phil, h, tau, sigma, N, K):
u = np.zeros((K, N))
for j in range(1, N - 1):
u[0][j] = psi(j * h)
for k in range(1, K):
u[k][0] = phi0(k * tau)
for j in range(1, N - 1):
u[k][j] = sigma * u[k - 1][j + 1] + (1 - 2 * sigma) * u[k - 1][j] + sigma * u[k - 1][j - 1] \
+ tau * f(j * h, k * tau)
u[k][-1] = u[k][-2] + phil(k * tau) * h
return u
def crank_nicholson_solver(l, psi, f, phi0, phil, h, tau, sigma, N, K):
theta = 0.5
a = np.zeros(N)
b = np.zeros(N)
c = np.zeros(N)
d = np.zeros(N)
u = np.zeros((K, N))
for j in range(1, N - 1):
u[0][j] = psi(j * h)
for k in range(1, K):
for j in range(1, N - 1):
a[j] = sigma
b[j] = -(1 + 2 * sigma)
c[j] = sigma
d[j] = -u[k - 1][j] - tau * f(j * h, k * tau)
a[0] = 0
b[0] = 1
c[0] = 0
d[0] = phi0(k * tau)
a[-1] = -1
b[-1] = 1
c[-1] = 0
d[-1] = h * phil(k * tau)
tmp_imp = tma(a, b, c, d)
tmp_exp = np.zeros(N)
tmp_exp[0] = phi0(k * tau)
for j in range(1, N - 1):
tmp_exp[j] = sigma * u[k - 1][j + 1] + (1 - 2 * sigma) * u[k - 1][j] \
+ sigma * u[k - 1][j - 1] + tau * f(j * h, k * tau)
tmp_exp[-1] = tmp_exp[-2] + phil(k * tau) * h
for j in range(N):
u[k][j] = theta * tmp_imp[j] + (1 - theta) * tmp_exp[j]
return u
def solve(solver, data, N, K, T):
l = data["l"]
psi = str2fun(data["psi"], variables="x")
f = str2fun(data["f"], variables="x,t")
phi0 = str2fun(data["phi0"])
phil = str2fun(data["phil"])
solution = str2fun(data["solution"], variables="x,t")
h = l / N
tau = T / K
sigma = tau / (h ** 2)
print(sigma)
if solver is solve_analytic:
return solve_analytic(l, N, K, T, solution)
return solver(l, psi, f, phi0, phil, h, tau, sigma, N, K)
def main():
data = {
"l": np.pi / 2,
"psi": "0",
"f": "cos(x) * (cos(t) + sin(t))",
"phi0": "sin(t)",
"phil": "-sin(t)",
"solution": "sin(t) * cos(x)",
}
# l — верхняя граница x
N = 10 # количество отрезков x
K = 1000 # количество отрезков t
T = 10 # верхняя граница времени
analytic = solve(solve_analytic, data, N, K, T)
explicit = solve(explicit_solver, data, N, K, T)
implicit = solve(implicit_solver, data, N, K, T)
crank_nicholson = solve(crank_nicholson_solver, data, N, K, T)
xaxis = np.linspace(0, data["l"], N)
analytic_f = str2fun(data["solution"], variables="t,x")
t = 0.5
plt.plot(xaxis, analytic_f(0, xaxis), label="analytical, t=0")
plt.plot(xaxis, explicit[0, :], label="explicit, t=0")
plt.plot(xaxis, implicit[0, :], label="implicit, t=0")
plt.plot(xaxis, crank_nicholson[0, :], label="crank_nicholson, t=0")
plt.plot(xaxis, analytic_f(t, xaxis), label=f"analytical, t={t}")
plt.plot(xaxis, explicit[int(K / T * t), :], label=f"explicit, t={t}")
plt.plot(xaxis, implicit[int(K / T * t), :], label=f"implicit, t={t}")
plt.plot(xaxis, crank_nicholson[int(K / T * t), :], label=f"crank_nicholson, t={t}")
plt.xlabel("x")
plt.ylabel("u")
plt.legend()
plt.grid(True)
plt.show()
eps = {
"explicit": [],
"implicit": [],
"crank_nicholson": []
}
l = data["l"]
for N, K, T in [(5, 800, 10),
(10, 1000, 10),
(20, 4000, 10),
(40, 15000, 10)]:
xaxis = np.linspace(0, data["l"], N)
explicit = solve(explicit_solver, data, N, K, T)
implicit = solve(implicit_solver, data, N, K, T)
crank_nicholson = solve(crank_nicholson_solver, data, N, K, T)
analytic_sol = analytic_f(t, xaxis)
explicit_sol = explicit[int(K / T * t), :]
implicit_sol = implicit[int(K / T * t), :]
crank_nicholson_sol = crank_nicholson[int(K / T * t), :]
for method, sol in zip(["explicit", "implicit", "crank_nicholson"],
[explicit_sol, implicit_sol, crank_nicholson_sol]):
eps[method].append((np.mean(np.abs(analytic_sol - sol)), l / N))
for method, value in eps.items():
print(method, value)
mean, step = list(zip(*value))
plt.plot(step, mean, label=method)
plt.xlabel("Шаг")
plt.ylabel("Погрешность")
plt.grid(True)
plt.legend()
plt.show()
if __name__ == '__main__':
fire.Fire(main)
| 26.198276
| 105
| 0.471866
|
f66cc0dad7c14ba0366be5a44d0217ee567c55ff
| 3,281
|
py
|
Python
|
tests/test_cur.py
|
blester125/CUR_Decomposition
|
a74994b47fb3345a01821f9f102cab59c405be71
|
[
"MIT"
] | 2
|
2019-07-08T09:30:19.000Z
|
2020-02-14T10:58:20.000Z
|
tests/test_cur.py
|
blester125/CUR_Decomposition
|
a74994b47fb3345a01821f9f102cab59c405be71
|
[
"MIT"
] | null | null | null |
tests/test_cur.py
|
blester125/CUR_Decomposition
|
a74994b47fb3345a01821f9f102cab59c405be71
|
[
"MIT"
] | null | null | null |
from unittest.mock import MagicMock, patch
import numpy as np
from cur.cur import (
probabilities,
select_part,
select_C,
select_R,
select_W,
psuedo_inverse,
make_U,
cur_decomposition,
)
"""Demo data from Mining of Massive Datasets p. 408"""
input_ = np.array([
[1, 1, 1, 0, 0],
[3, 3, 3, 0, 0],
[4, 4, 4, 0, 0],
[5, 5, 5, 0, 0],
[0, 0, 0, 4, 4],
[0, 0, 0, 5, 5],
[0, 0, 0, 2, 2]
])
row_probs = np.array([.012, .111, .198, .309, .132, .206, .033])
col_probs = np.array([.210, .210, .210, .185, .185])
def test_probs():
gold_rows = row_probs
gold_cols = col_probs
r, c = probabilities(input_)
np.testing.assert_allclose(r, gold_rows, atol=1e-3)
np.testing.assert_allclose(c, gold_cols, atol=1e-3)
def test_probabilities_shape():
shape = np.random.randint(5, 25, size=2)
input_ = np.random.rand(*shape)
r, c = probabilities(input_)
assert len(r) == shape[0]
assert len(c) == shape[1]
def test_probabilities_valid():
shape = np.random.randint(5, 25, size=2)
input_ = np.random.rand(*shape)
r, c = probabilities(input_)
np.testing.assert_allclose(np.sum(r), 1.)
np.testing.assert_allclose(np.sum(c), 1.)
def test_select_0():
gold_select = np.array([
[0., 0., 0., 7.78971191, 7.78971191],
[6.36027314, 6.36027314, 6.36027314, 0., 0.]
])
with patch("cur.cur.np.random.choice") as choice_patch:
choice_patch.return_value = [5, 3]
m, idx = select_part(input_, 2, row_probs, 0)
np.testing.assert_allclose(m, gold_select)
def test_select_1():
gold_select = ([
[1.5430335, 0.],
[4.6291005, 0.],
[6.172134, 0.],
[7.7151675, 0.],
[0., 6.57595949],
[0., 8.21994937],
[0., 3.28797975],
])
with patch("cur.cur.np.random.choice") as choice_patch:
choice_patch.return_value = [2, 4]
m, idx = select_part(input_, 2, col_probs, 1)
np.testing.assert_allclose(m, gold_select)
def test_select_C_shape():
m, n = np.random.randint(5, 25, size=2)
in_ = np.random.rand(m, n)
r = np.random.randint(1, n)
probs = np.random.uniform(0, 1, size=n)
probs = probs / np.sum(probs)
C, idx = select_C(in_, r, probs)
assert C.shape[0] == m
assert C.shape[1] == r
assert len(idx) == r
def test_select_R_shape():
m, n = np.random.randint(5, 25, size=2)
in_ = np.random.rand(m, n)
r = np.random.randint(1, m)
probs = np.random.uniform(0, 1, size=m)
probs = probs / np.sum(probs)
C, idx = select_R(in_, r, probs)
assert C.shape[0] == r
assert C.shape[1] == n
assert len(idx) == r
def test_select_W():
gold_w = [[0, 5], [5, 0]]
w = select_W(input_, [2, 4], [5, 3])
np.testing.assert_allclose(w, gold_w)
def test_inverse():
gold = [0.2, 0.2]
example = np.array([5, 5])
inv = psuedo_inverse(example)
np.testing.assert_allclose(inv, gold)
def test_inverse_with_zero():
gold = [0.2, 0., 0.2]
example = np.array([5, 0, 5])
inv = psuedo_inverse(example)
np.testing.assert_allclose(inv, gold)
def test_make_U():
gold_U = np.array([[0, 1/25], [1/25, 0]])
U = make_U(input_, [2, 4], [5, 3])
np.testing.assert_allclose(U, gold_U)
| 28.284483
| 64
| 0.591893
|
848bf9135ccf2b9dc456cd4b91c6303a86558a6d
| 984
|
py
|
Python
|
test/gst-va/vpp/transpose.py
|
tong1wu/vaapi-fits
|
4d5d01668905ed84d4077e4a7a019d7ced0864e5
|
[
"BSD-3-Clause"
] | 19
|
2019-03-05T01:59:05.000Z
|
2022-01-11T15:31:49.000Z
|
test/gst-va/vpp/transpose.py
|
tong1wu/vaapi-fits
|
4d5d01668905ed84d4077e4a7a019d7ced0864e5
|
[
"BSD-3-Clause"
] | 213
|
2019-01-29T18:44:05.000Z
|
2022-03-30T05:57:04.000Z
|
test/gst-va/vpp/transpose.py
|
tong1wu/vaapi-fits
|
4d5d01668905ed84d4077e4a7a019d7ced0864e5
|
[
"BSD-3-Clause"
] | 26
|
2019-01-29T05:21:22.000Z
|
2022-02-09T00:57:35.000Z
|
###
### Copyright (C) 2021 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ....lib.gstreamer.va.util import *
from ....lib.gstreamer.va.vpp import VppTest
spec = load_test_spec("vpp", "transpose")
@slash.requires(*platform.have_caps("vpp", "transpose"))
class default(VppTest):
def before(self):
vars(self).update(
caps = platform.get_caps("vpp", "transpose"),
metric = dict(type = "md5"),
vpp_op = "transpose",
)
super(default, self).before()
@slash.parametrize(*gen_vpp_transpose_parameters(spec))
def test(self, case, degrees, method):
vars(self).update(spec[case].copy())
vars(self).update(
case = case,
degrees = degrees,
direction = map_transpose_direction(degrees, method),
method = method,
)
if self.direction is None:
slash.skip_test(
"{degrees} {method} direction not supported".format(**vars(self)))
self.vpp()
| 25.894737
| 74
| 0.635163
|
f5a8272a1dc192cd1650c94442748183730870b5
| 1,046
|
py
|
Python
|
examples/option/model.py
|
vishalbelsare/neworder
|
38635fca64f239a9e8eb1a671872c174e1814678
|
[
"MIT"
] | 17
|
2017-12-08T10:21:18.000Z
|
2022-01-13T09:29:43.000Z
|
examples/option/model.py
|
vishalbelsare/neworder
|
38635fca64f239a9e8eb1a671872c174e1814678
|
[
"MIT"
] | 61
|
2018-07-21T21:37:12.000Z
|
2021-07-10T12:49:15.000Z
|
examples/option/model.py
|
vishalbelsare/neworder
|
38635fca64f239a9e8eb1a671872c174e1814678
|
[
"MIT"
] | 6
|
2019-06-06T18:29:31.000Z
|
2021-08-20T13:32:17.000Z
|
"""
Example - pricing a simple option
The main vanishing point of this example is to illustrate how different processes
can interact within the model, and how to synchronise the random streams in each process
"""
import neworder
from black_scholes import BlackScholes
# neworder.verbose() # uncomment for verbose logging
# neworder.checked(False) # uncomment to disable checks
# requires 4 identical sims with perturbations to compute market sensitivities
# (a.k.a. Greeks)
assert neworder.mpi.size() == 4, "This example requires 4 processes"
# initialisation
# market data
market = {
"spot": 100.0, # underlying spot price
"rate": 0.02, # risk-free interest rate
"divy": 0.01, # (continuous) dividend yield
"vol": 0.2 # stock volatility
}
# (European) option instrument data
option = {
"callput": "CALL",
"strike": 100.0,
"expiry": 0.75 # years
}
# model parameters
nsims = 1000000 # number of underlyings to simulate
# instantiate model
bs_mc = BlackScholes(option, market, nsims)
# run model
neworder.run(bs_mc)
| 24.904762
| 88
| 0.729446
|
41cb077fd298ef85de3b2afd3c0230be19f5c4fd
| 2,589
|
py
|
Python
|
event/onvoicestateupdate.py
|
ZigAnon/zigbot
|
a8e54a9e30b15abac6d4defea20f3208cee87e30
|
[
"MIT"
] | null | null | null |
event/onvoicestateupdate.py
|
ZigAnon/zigbot
|
a8e54a9e30b15abac6d4defea20f3208cee87e30
|
[
"MIT"
] | null | null | null |
event/onvoicestateupdate.py
|
ZigAnon/zigbot
|
a8e54a9e30b15abac6d4defea20f3208cee87e30
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
import stackprinter as sp
from bin import zb
class onvoicestateupdateCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Events on member join
@commands.Cog.listener()
async def on_voice_state_update(self, member, before, after):
try:
# Ignore bots
if member.bot:
return
if before.afk:
return
if after.afk:
await member.edit(voice_channel=None)
embed=discord.Embed(description=f'**{member.mention} ' \
f'was kicked from voice for being AFK for ' \
f'{int(member.guild.afk_timeout/60)} minutes**',
color=0x23d160)
embed.set_author(name=member, icon_url=member.avatar_url)
await zb.print_log(self,member,embed)
return
if not before.mute and after.mute:
#TODO: log event
return
elif before.mute and not after.mute:
#TODO: log event
return
if before.channel is after.channel:
return
elif before.channel is None and not after.channel is None:
embed=discord.Embed(description=f'**{member.mention} ' \
f'joined voice channel #{after.channel.name}**',
color=0x23d160)
embed.set_author(name=member, icon_url=member.avatar_url)
await zb.print_log(self,member,embed)
elif not before.channel is None and not after.channel is None:
embed=discord.Embed(description=f'**{member.mention} ' \
f'switched voice channel `#{before.channel.name}` ' \
f'-> `#{after.channel.name}`**',
color=0x23d160)
embed.set_author(name=member, icon_url=member.avatar_url)
await zb.print_log(self,member,embed)
elif not before.channel is None and after.channel is None:
embed=discord.Embed(description=f'**{member.mention} ' \
f'left voice channel #{before.channel.name}**',
color=0x23d160)
embed.set_author(name=member, icon_url=member.avatar_url)
await zb.print_log(self,member,embed)
except Exception as e:
await zb.bot_errors(self,sp.format(e))
def setup(bot):
bot.add_cog(onvoicestateupdateCog(bot))
| 38.073529
| 77
| 0.548861
|
5166b3bab53a677b55fb04a0f93a165919544d33
| 1,426
|
py
|
Python
|
comments/management/commands/pop_comments.py
|
joshuanazareth97/cmod
|
bf3232f7cd062cfdac9b86eef6687db0c45fc2ef
|
[
"MIT"
] | null | null | null |
comments/management/commands/pop_comments.py
|
joshuanazareth97/cmod
|
bf3232f7cd062cfdac9b86eef6687db0c45fc2ef
|
[
"MIT"
] | null | null | null |
comments/management/commands/pop_comments.py
|
joshuanazareth97/cmod
|
bf3232f7cd062cfdac9b86eef6687db0c45fc2ef
|
[
"MIT"
] | null | null | null |
import random
from django.core.management.base import BaseCommand, CommandError
from comments.models import Candidate, Comment
from faker import Faker
from faker.providers import lorem
class Command(BaseCommand):
def __init__(self, *args, **kwargs):
super(BaseCommand, self).__init__(*args, **kwargs)
self.fake = Faker()
self.fake.add_provider(lorem)
def add_arguments(self, parser):
parser.add_argument('--n', type=lambda x: map(int, x.split(",")), help="Specify the (min, max) number of comments to create for each comment. Usage: --n <min, max>")
def handle(self, *args, **options):
min_max = list(options.get("n")) or [10,20]
for candidate in Candidate.objects.all():
print("="*30)
no_tries = random.randint(*min_max)
print(f"Generating {no_tries} entry(s) for {candidate.name}")
for _ in range(no_tries):
comment = Comment.objects.create(author=candidate.creator, candidate=candidate, **self._generate_comment_data())
print(comment)
print()
def _generate_comment_data(self):
return {
"title": self.fake.sentence(nb_words=6, variable_nb_words=True, ext_word_list=None),
"type": random.choice(["NT","BR","EV"]) ,
"text": self.fake.paragraph(nb_sentences=5, variable_nb_sentences=True, ext_word_list=None),
}
| 39.611111
| 173
| 0.642356
|
433c71d15b9ed4357ed860d316d25365e922be87
| 4,331
|
py
|
Python
|
benchmark/startQiskit1431.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit1431.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit1431.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=5
# total number=55
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=31
prog.cz(input_qubit[1],input_qubit[0]) # number=32
prog.h(input_qubit[0]) # number=33
prog.h(input_qubit[1]) # number=44
prog.cz(input_qubit[0],input_qubit[1]) # number=45
prog.h(input_qubit[1]) # number=46
prog.x(input_qubit[1]) # number=41
prog.h(input_qubit[1]) # number=48
prog.cz(input_qubit[0],input_qubit[1]) # number=49
prog.h(input_qubit[1]) # number=50
prog.x(input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=52
prog.cz(input_qubit[1],input_qubit[0]) # number=53
prog.h(input_qubit[0]) # number=54
prog.h(input_qubit[1]) # number=37
prog.cz(input_qubit[0],input_qubit[1]) # number=38
prog.h(input_qubit[1]) # number=39
prog.x(input_qubit[1]) # number=35
prog.cx(input_qubit[0],input_qubit[1]) # number=36
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=12
prog.cx(input_qubit[3],input_qubit[2]) # number=43
prog.cx(input_qubit[3],input_qubit[2]) # number=47
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.cx(input_qubit[0],input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
prog.cx(input_qubit[0],input_qubit[1]) # number=24
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[1]) # number=29
prog.y(input_qubit[4]) # number=28
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[3]) # number=51
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1431.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 32.081481
| 82
| 0.605403
|
1e61ea93c48673b55ce4b8934e06db941b53434f
| 156
|
py
|
Python
|
app.py
|
Shubham654/Image_Caption_In_Flask
|
0c641602f73464815ff55e49f0035359ee0e18e5
|
[
"MIT"
] | null | null | null |
app.py
|
Shubham654/Image_Caption_In_Flask
|
0c641602f73464815ff55e49f0035359ee0e18e5
|
[
"MIT"
] | null | null | null |
app.py
|
Shubham654/Image_Caption_In_Flask
|
0c641602f73464815ff55e49f0035359ee0e18e5
|
[
"MIT"
] | null | null | null |
from flask import Flask
app = Flask(__name__)
@app.route('/')
def home():
return "HELLO Shubham"
if __name__ == '__main__':
app.run(debug=True)
| 13
| 26
| 0.653846
|
a8b0260995482da49d8adf4cf28f57de040c3349
| 1,759
|
py
|
Python
|
final_gpt2_test_wikitext103-fp32.py
|
minhhn2910/conga2022
|
81ad2fb9c0055c332f8f305b2ea409b6577003f4
|
[
"MIT"
] | null | null | null |
final_gpt2_test_wikitext103-fp32.py
|
minhhn2910/conga2022
|
81ad2fb9c0055c332f8f305b2ea409b6577003f4
|
[
"MIT"
] | null | null | null |
final_gpt2_test_wikitext103-fp32.py
|
minhhn2910/conga2022
|
81ad2fb9c0055c332f8f305b2ea409b6577003f4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Transformers installation
import numpy as np
from transformers import GPT2LMHeadModel, GPT2TokenizerFast
from qtorch.quant import posit_quantize, float_quantize, configurable_table_quantize
device = 'cuda'
model_id = 'gpt2-large'
tokenizer = GPT2TokenizerFast.from_pretrained(model_id)
from datasets import load_dataset
#test = load_dataset("lambada", split='test')
test = load_dataset('wikitext', 'wikitext-103-v1', split='test')
#test = load_dataset("ptb_text_only", split='test')
encodings = tokenizer('\n\n'.join(test['text']), return_tensors='pt')
def run(weight_table, act_table ):
import torch
import torch.nn as nn
model = GPT2LMHeadModel.from_pretrained(model_id)
model = model.to(device)
layer_count = 0
linear_layer_count = 0
op_count = 0
#print ("MAC operation count ", op_count)
print ("Layer count ", layer_count)
#model = model.to(device)
import torch
from tqdm import tqdm
max_length = model.config.n_positions
stride = 1024
#stride = 32
lls = []
for i in tqdm(range(0, encodings.input_ids.size(1), stride)):
begin_loc = max(i + stride - max_length, 0)
end_loc = min(i + stride, encodings.input_ids.size(1))
trg_len = end_loc - i # may be different from stride on last loop
input_ids = encodings.input_ids[:,begin_loc:end_loc].to(device)
target_ids = input_ids.clone()
target_ids[:,:-trg_len] = -100
with torch.no_grad():
outputs = model(input_ids, labels=target_ids)
log_likelihood = outputs[0] * trg_len
lls.append(log_likelihood)
ppl = torch.exp(torch.stack(lls).sum() / end_loc)
return ppl.item()
print (run ([],[]))
| 25.128571
| 84
| 0.670836
|
8b111459077c0e4e28cfaed9c70a1662e4c822e3
| 3,144
|
py
|
Python
|
azure-mgmt-web/azure/mgmt/web/models/csr.py
|
azuresdkci1x/azure-sdk-for-python-1722
|
e08fa6606543ce0f35b93133dbb78490f8e6bcc9
|
[
"MIT"
] | 1
|
2018-11-09T06:16:34.000Z
|
2018-11-09T06:16:34.000Z
|
azure-mgmt-web/azure/mgmt/web/models/csr.py
|
azuresdkci1x/azure-sdk-for-python-1722
|
e08fa6606543ce0f35b93133dbb78490f8e6bcc9
|
[
"MIT"
] | null | null | null |
azure-mgmt-web/azure/mgmt/web/models/csr.py
|
azuresdkci1x/azure-sdk-for-python-1722
|
e08fa6606543ce0f35b93133dbb78490f8e6bcc9
|
[
"MIT"
] | 1
|
2018-11-09T06:17:41.000Z
|
2018-11-09T06:17:41.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class Csr(Resource):
"""Certificate signing request.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:param name: Resource Name.
:type name: str
:param kind: Kind of resource.
:type kind: str
:param location: Resource Location.
:type location: str
:param type: Resource type.
:type type: str
:param tags: Resource tags.
:type tags: dict
:param csr_name: Name used to locate CSR object.
:type csr_name: str
:param distinguished_name: Distinguished name of certificate to be
created.
:type distinguished_name: str
:param csr_string: Actual CSR string created.
:type csr_string: str
:param pfx_blob: PFX certifcate of created certificate.
:type pfx_blob: str
:param password: PFX password.
:type password: str
:param public_key_hash: Hash of the certificate's public key.
:type public_key_hash: str
:param hosting_environment: App Service Environment.
:type hosting_environment: str
"""
_validation = {
'id': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'csr_name': {'key': 'properties.name', 'type': 'str'},
'distinguished_name': {'key': 'properties.distinguishedName', 'type': 'str'},
'csr_string': {'key': 'properties.csrString', 'type': 'str'},
'pfx_blob': {'key': 'properties.pfxBlob', 'type': 'str'},
'password': {'key': 'properties.password', 'type': 'str'},
'public_key_hash': {'key': 'properties.publicKeyHash', 'type': 'str'},
'hosting_environment': {'key': 'properties.hostingEnvironment', 'type': 'str'},
}
def __init__(self, location, name=None, kind=None, type=None, tags=None, csr_name=None, distinguished_name=None, csr_string=None, pfx_blob=None, password=None, public_key_hash=None, hosting_environment=None):
super(Csr, self).__init__(name=name, kind=kind, location=location, type=type, tags=tags)
self.csr_name = csr_name
self.distinguished_name = distinguished_name
self.csr_string = csr_string
self.pfx_blob = pfx_blob
self.password = password
self.public_key_hash = public_key_hash
self.hosting_environment = hosting_environment
| 39.3
| 212
| 0.615776
|
4dabdec2cdccbc27f37a3ea26cc01e7605482fc2
| 1,794
|
py
|
Python
|
cgi-bin/request/hourlyprecip.py
|
trentford/iem
|
7264d24f2d79a3cd69251a09758e6531233a732f
|
[
"MIT"
] | 1
|
2019-10-07T17:01:24.000Z
|
2019-10-07T17:01:24.000Z
|
cgi-bin/request/hourlyprecip.py
|
trentford/iem
|
7264d24f2d79a3cd69251a09758e6531233a732f
|
[
"MIT"
] | null | null | null |
cgi-bin/request/hourlyprecip.py
|
trentford/iem
|
7264d24f2d79a3cd69251a09758e6531233a732f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Hourly precip download"""
import cgi
import datetime
import pytz
from pyiem.util import get_dbconn, ssw
def get_data(network, sts, ets, tzinfo, stations):
"""Go fetch data please"""
pgconn = get_dbconn('iem', user='nobody')
cursor = pgconn.cursor()
res = ("station,network,valid,precip_in\n")
if len(stations) == 1:
stations.append('ZZZZZ')
cursor.execute("""SELECT station, network, valid, phour from
hourly WHERE
valid >= %s and valid < %s and network = %s and station in %s
ORDER by valid ASC
""", (sts, ets, network, tuple(stations)))
for row in cursor:
res += ("%s,%s,%s,%s\n"
) % (row[0], row[1],
(row[2].astimezone(tzinfo)).strftime("%Y-%m-%d %H:%M"),
row[3])
return res
def main():
""" run rabbit run """
ssw('Content-type: text/plain\n\n')
form = cgi.FieldStorage()
tzinfo = pytz.timezone(form.getfirst("tz", "America/Chicago"))
try:
sts = datetime.date(int(form.getfirst('year1')),
int(form.getfirst('month1')),
int(form.getfirst('day1')))
ets = datetime.date(int(form.getfirst('year2')),
int(form.getfirst('month2')),
int(form.getfirst('day2')))
except Exception as _exp:
ssw(("ERROR: Invalid date provided, please check selected dates."))
return
stations = form.getlist('station')
if not stations:
ssw(("ERROR: No stations specified for request."))
return
network = form.getfirst('network')[:12]
ssw(get_data(network, sts, ets, tzinfo, stations=stations))
if __name__ == '__main__':
# Go Main Go
main()
| 31.473684
| 76
| 0.561315
|
c40ac566f37b1de23756d0e061990c094053142f
| 35,444
|
py
|
Python
|
get_map.py
|
ChrisLiuxp/efficientdet
|
5d52ac491e1dd2a29ee6650bb746f1e840c24fcc
|
[
"MIT"
] | null | null | null |
get_map.py
|
ChrisLiuxp/efficientdet
|
5d52ac491e1dd2a29ee6650bb746f1e840c24fcc
|
[
"MIT"
] | null | null | null |
get_map.py
|
ChrisLiuxp/efficientdet
|
5d52ac491e1dd2a29ee6650bb746f1e840c24fcc
|
[
"MIT"
] | null | null | null |
import glob
import json
import os
import shutil
import operator
import sys
import argparse
import math
import numpy as np
#----------------------------------------------------#
# 用于计算mAP
# 代码克隆自https://github.com/Cartucho/mAP
#----------------------------------------------------#
MINOVERLAP = 0.5 # default value (defined in the PASCAL VOC2012 challenge)
parser = argparse.ArgumentParser()
parser.add_argument('-na', '--no-animation', help="no animation is shown.", action="store_true")
parser.add_argument('-np', '--no-plot', help="no plot is shown.", action="store_true")
parser.add_argument('-q', '--quiet', help="minimalistic console output.", action="store_true")
# argparse receiving list of classes to be ignored
parser.add_argument('-i', '--ignore', nargs='+', type=str, help="ignore a list of classes.")
# argparse receiving list of classes with specific IoU (e.g., python main.py --set-class-iou person 0.7)
parser.add_argument('--set-class-iou', nargs='+', type=str, help="set IoU for a specific class.")
args = parser.parse_args()
'''
0,0 ------> x (width)
|
| (Left,Top)
| *_________
| | |
| |
y |_________|
(height) *
(Right,Bottom)
'''
# if there are no classes to ignore then replace None by empty list
if args.ignore is None:
args.ignore = []
specific_iou_flagged = False
if args.set_class_iou is not None:
specific_iou_flagged = True
# make sure that the cwd() is the location of the python script (so that every path makes sense)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
GT_PATH = os.path.join(os.getcwd(), 'input', 'ground-truth')
DR_PATH = os.path.join(os.getcwd(), 'input', 'detection-results')
# if there are no images then no animation can be shown
IMG_PATH = os.path.join(os.getcwd(), 'input', 'images-optional')
if os.path.exists(IMG_PATH):
for dirpath, dirnames, files in os.walk(IMG_PATH):
if not files:
# no image files found
args.no_animation = True
else:
args.no_animation = True
# try to import OpenCV if the user didn't choose the option --no-animation
show_animation = False
if not args.no_animation:
try:
import cv2
show_animation = True
except ImportError:
print("\"opencv-python\" not found, please install to visualize the results.")
args.no_animation = True
# try to import Matplotlib if the user didn't choose the option --no-plot
draw_plot = False
if not args.no_plot:
try:
import matplotlib.pyplot as plt
draw_plot = True
except ImportError:
print("\"matplotlib\" not found, please install it to get the resulting plots.")
args.no_plot = True
def log_average_miss_rate(precision, fp_cumsum, num_images):
"""
log-average miss rate:
Calculated by averaging miss rates at 9 evenly spaced FPPI points
between 10e-2 and 10e0, in log-space.
output:
lamr | log-average miss rate
mr | miss rate
fppi | false positives per image
references:
[1] Dollar, Piotr, et al. "Pedestrian Detection: An Evaluation of the
State of the Art." Pattern Analysis and Machine Intelligence, IEEE
Transactions on 34.4 (2012): 743 - 761.
"""
# if there were no detections of that class
if precision.size == 0:
lamr = 0
mr = 1
fppi = 0
return lamr, mr, fppi
fppi = fp_cumsum / float(num_images)
mr = (1 - precision)
fppi_tmp = np.insert(fppi, 0, -1.0)
mr_tmp = np.insert(mr, 0, 1.0)
# Use 9 evenly spaced reference points in log-space
ref = np.logspace(-2.0, 0.0, num = 9)
for i, ref_i in enumerate(ref):
# np.where() will always find at least 1 index, since min(ref) = 0.01 and min(fppi_tmp) = -1.0
j = np.where(fppi_tmp <= ref_i)[-1][-1]
ref[i] = mr_tmp[j]
# log(0) is undefined, so we use the np.maximum(1e-10, ref)
lamr = math.exp(np.mean(np.log(np.maximum(1e-10, ref))))
return lamr, mr, fppi
"""
throw error and exit
"""
def error(msg):
print(msg)
sys.exit(0)
"""
check if the number is a float between 0.0 and 1.0
"""
def is_float_between_0_and_1(value):
try:
val = float(value)
if val > 0.0 and val < 1.0:
return True
else:
return False
except ValueError:
return False
"""
Calculate the AP given the recall and precision array
1st) We compute a version of the measured precision/recall curve with
precision monotonically decreasing
2nd) We compute the AP as the area under this curve by numerical integration.
"""
def voc_ap(rec, prec):
"""
--- Official matlab code VOC2012---
mrec=[0 ; rec ; 1];
mpre=[0 ; prec ; 0];
for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
end
i=find(mrec(2:end)~=mrec(1:end-1))+1;
ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
rec.insert(0, 0.0) # insert 0.0 at begining of list
rec.append(1.0) # insert 1.0 at end of list
mrec = rec[:]
prec.insert(0, 0.0) # insert 0.0 at begining of list
prec.append(0.0) # insert 0.0 at end of list
mpre = prec[:]
"""
This part makes the precision monotonically decreasing
(goes from the end to the beginning)
matlab: for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
"""
# matlab indexes start in 1 but python in 0, so I have to do:
# range(start=(len(mpre) - 2), end=0, step=-1)
# also the python function range excludes the end, resulting in:
# range(start=(len(mpre) - 2), end=-1, step=-1)
for i in range(len(mpre)-2, -1, -1):
mpre[i] = max(mpre[i], mpre[i+1])
"""
This part creates a list of indexes where the recall changes
matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;
"""
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i-1]:
i_list.append(i) # if it was matlab would be i + 1
"""
The Average Precision (AP) is the area under the curve
(numerical integration)
matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
ap = 0.0
for i in i_list:
ap += ((mrec[i]-mrec[i-1])*mpre[i])
return ap, mrec, mpre
"""
Convert the lines of a file to a list
"""
def file_lines_to_list(path):
# open txt file lines to a list
with open(path) as f:
content = f.readlines()
# remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
return content
"""
Draws text in image
"""
def draw_text_in_image(img, text, pos, color, line_width):
font = cv2.FONT_HERSHEY_PLAIN
fontScale = 1
lineType = 1
bottomLeftCornerOfText = pos
cv2.putText(img, text,
bottomLeftCornerOfText,
font,
fontScale,
color,
lineType)
text_width, _ = cv2.getTextSize(text, font, fontScale, lineType)[0]
return img, (line_width + text_width)
"""
Plot - adjust axes
"""
def adjust_axes(r, t, fig, axes):
# get text width for re-scaling
bb = t.get_window_extent(renderer=r)
text_width_inches = bb.width / fig.dpi
# get axis width in inches
current_fig_width = fig.get_figwidth()
new_fig_width = current_fig_width + text_width_inches
propotion = new_fig_width / current_fig_width
# get axis limit
x_lim = axes.get_xlim()
axes.set_xlim([x_lim[0], x_lim[1]*propotion])
"""
Draw plot using Matplotlib
"""
def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar):
# sort the dictionary by decreasing value, into a list of tuples
sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))
# unpacking the list of tuples into two lists
sorted_keys, sorted_values = zip(*sorted_dic_by_value)
#
if true_p_bar != "":
"""
Special case to draw in:
- green -> TP: True Positives (object detected and matches ground-truth)
- red -> FP: False Positives (object detected but does not match ground-truth)
- orange -> FN: False Negatives (object not detected but present in the ground-truth)
"""
fp_sorted = []
tp_sorted = []
for key in sorted_keys:
fp_sorted.append(dictionary[key] - true_p_bar[key])
tp_sorted.append(true_p_bar[key])
plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Positive')
plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Positive', left=fp_sorted)
# add legend
plt.legend(loc='lower right')
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
fp_val = fp_sorted[i]
tp_val = tp_sorted[i]
fp_str_val = " " + str(fp_val)
tp_str_val = fp_str_val + " " + str(tp_val)
# trick to paint multicolor with offset:
# first paint everything and then repaint the first number
t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')
plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
else:
plt.barh(range(n_classes), sorted_values, color=plot_color)
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
str_val = " " + str(val) # add a space before
if val < 1.0:
str_val = " {0:.2f}".format(val)
t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')
# re-set axes to show number inside the figure
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
# set window title
fig.canvas.set_window_title(window_title)
# write classes in y axis
tick_font_size = 12
plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)
"""
Re-scale height accordingly
"""
init_height = fig.get_figheight()
# comput the matrix height in points and inches
dpi = fig.dpi
height_pt = n_classes * (tick_font_size * 1.4) # 1.4 (some spacing)
height_in = height_pt / dpi
# compute the required figure height
top_margin = 0.15 # in percentage of the figure height
bottom_margin = 0.05 # in percentage of the figure height
figure_height = height_in / (1 - top_margin - bottom_margin)
# set new height
if figure_height > init_height:
fig.set_figheight(figure_height)
# set plot title
plt.title(plot_title, fontsize=14)
# set axis titles
# plt.xlabel('classes')
plt.xlabel(x_label, fontsize='large')
# adjust size of window
fig.tight_layout()
# save the plot
fig.savefig(output_path)
# show image
if to_show:
plt.show()
# close the plot
plt.close()
"""
Create a ".temp_files/" and "results/" directory
"""
TEMP_FILES_PATH = ".temp_files"
if not os.path.exists(TEMP_FILES_PATH): # if it doesn't exist already
os.makedirs(TEMP_FILES_PATH)
results_files_path = "results"
if os.path.exists(results_files_path): # if it exist already
# reset the results directory
shutil.rmtree(results_files_path)
os.makedirs(results_files_path)
if draw_plot:
os.makedirs(os.path.join(results_files_path, "classes"))
if show_animation:
os.makedirs(os.path.join(results_files_path, "images", "detections_one_by_one"))
"""
ground-truth
Load each of the ground-truth files into a temporary ".json" file.
Create a list of all the class names present in the ground-truth (gt_classes).
"""
# get a list with the ground-truth files
ground_truth_files_list = glob.glob(GT_PATH + '/*.txt')
if len(ground_truth_files_list) == 0:
error("Error: No ground-truth files found!")
ground_truth_files_list.sort()
# dictionary with counter per class
gt_counter_per_class = {}
counter_images_per_class = {}
for txt_file in ground_truth_files_list:
#print(txt_file)
file_id = txt_file.split(".txt", 1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
# check if there is a correspondent detection-results file
temp_path = os.path.join(DR_PATH, (file_id + ".txt"))
if not os.path.exists(temp_path):
error_msg = "Error. File not found: {}\n".format(temp_path)
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-dr.py)"
error(error_msg)
lines_list = file_lines_to_list(txt_file)
# create ground-truth dictionary
bounding_boxes = []
is_difficult = False
already_seen_classes = []
for line in lines_list:
try:
if "difficult" in line:
haha = line.split()
haha_size = haha.__len__()
if haha_size == 6:
class_name, left, top, right, bottom, _difficult = line.split()
if haha_size == 7:
_difficult = haha[6]
bottom = haha[5]
right = haha[4]
top = haha[3]
left = haha[2]
class_name = haha[0]+" "+haha[1]
is_difficult = True
else:
# class_name, left, top, right, bottom = line.split()
haha = line.split()
haha_size = haha.__len__()
if haha_size == 5:
class_name, left, top, right, bottom = line.split()
if haha_size == 6:
bottom = haha[5]
right = haha[4]
top = haha[3]
left = haha[2]
class_name = haha[0]+" "+haha[1]
except ValueError:
error_msg = "Error: File " + txt_file + " in the wrong format.\n"
error_msg += " Expected: <class_name> <left> <top> <right> <bottom> ['difficult']\n"
error_msg += " Received: " + line
error_msg += "\n\nIf you have a <class_name> with spaces between words you should remove them\n"
error_msg += "by running the script \"remove_space.py\" or \"rename_class.py\" in the \"extra/\" folder."
error(error_msg)
# check if class is in the ignore list, if yes skip
if class_name in args.ignore:
continue
bbox = left + " " + top + " " + right + " " +bottom
if is_difficult:
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False, "difficult":True})
is_difficult = False
else:
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False})
# count that object
if class_name in gt_counter_per_class:
gt_counter_per_class[class_name] += 1
else:
# if class didn't exist yet
gt_counter_per_class[class_name] = 1
if class_name not in already_seen_classes:
if class_name in counter_images_per_class:
counter_images_per_class[class_name] += 1
else:
# if class didn't exist yet
counter_images_per_class[class_name] = 1
already_seen_classes.append(class_name)
# dump bounding_boxes into a ".json" file
with open(TEMP_FILES_PATH + "/" + file_id + "_ground_truth.json", 'w') as outfile:
json.dump(bounding_boxes, outfile)
gt_classes = list(gt_counter_per_class.keys())
# let's sort the classes alphabetically
gt_classes = sorted(gt_classes)
n_classes = len(gt_classes)
#print(gt_classes)
#print(gt_counter_per_class)
"""
Check format of the flag --set-class-iou (if used)
e.g. check if class exists
"""
if specific_iou_flagged:
n_args = len(args.set_class_iou)
error_msg = \
'\n --set-class-iou [class_1] [IoU_1] [class_2] [IoU_2] [...]'
if n_args % 2 != 0:
error('Error, missing arguments. Flag usage:' + error_msg)
# [class_1] [IoU_1] [class_2] [IoU_2]
# specific_iou_classes = ['class_1', 'class_2']
specific_iou_classes = args.set_class_iou[::2] # even
# iou_list = ['IoU_1', 'IoU_2']
iou_list = args.set_class_iou[1::2] # odd
if len(specific_iou_classes) != len(iou_list):
error('Error, missing arguments. Flag usage:' + error_msg)
for tmp_class in specific_iou_classes:
if tmp_class not in gt_classes:
error('Error, unknown class \"' + tmp_class + '\". Flag usage:' + error_msg)
for num in iou_list:
if not is_float_between_0_and_1(num):
error('Error, IoU must be between 0.0 and 1.0. Flag usage:' + error_msg)
"""
detection-results
Load each of the detection-results files into a temporary ".json" file.
"""
# get a list with the detection-results files
dr_files_list = glob.glob(DR_PATH + '/*.txt')
dr_files_list.sort()
for class_index, class_name in enumerate(gt_classes):
bounding_boxes = []
for txt_file in dr_files_list:
#print(txt_file)
# the first time it checks if all the corresponding ground-truth files exist
file_id = txt_file.split(".txt",1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
temp_path = os.path.join(GT_PATH, (file_id + ".txt"))
if class_index == 0:
if not os.path.exists(temp_path):
error_msg = "Error. File not found: {}\n".format(temp_path)
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-dr.py)"
error(error_msg)
lines = file_lines_to_list(txt_file)
for line in lines:
try:
# tmp_class_name, confidence, left, top, right, bottom = line.split()
haha = line.split()
haha_size = haha.__len__()
if haha_size == 6:
tmp_class_name, confidence, left, top, right, bottom = line.split()
if haha_size == 7:
bottom = haha[6]
right = haha[5]
top = haha[4]
left = haha[3]
confidence = haha[2]
tmp_class_name = haha[0]+" "+haha[1]
except ValueError:
error_msg = "Error: File " + txt_file + " in the wrong format.\n"
error_msg += " Expected: <class_name> <confidence> <left> <top> <right> <bottom>\n"
error_msg += " Received: " + line
error(error_msg)
if tmp_class_name == class_name:
#print("match")
bbox = left + " " + top + " " + right + " " +bottom
bounding_boxes.append({"confidence":confidence, "file_id":file_id, "bbox":bbox})
#print(bounding_boxes)
# sort detection-results by decreasing confidence
bounding_boxes.sort(key=lambda x:float(x['confidence']), reverse=True)
with open(TEMP_FILES_PATH + "/" + class_name + "_dr.json", 'w') as outfile:
json.dump(bounding_boxes, outfile)
"""
Calculate the AP for each class
"""
sum_AP = 0.0
ap_dictionary = {}
lamr_dictionary = {}
# open file to store the results
with open(results_files_path + "/results.txt", 'w') as results_file:
results_file.write("# AP and precision/recall per class\n")
count_true_positives = {}
for class_index, class_name in enumerate(gt_classes):
count_true_positives[class_name] = 0
"""
Load detection-results of that class
"""
dr_file = TEMP_FILES_PATH + "/" + class_name + "_dr.json"
dr_data = json.load(open(dr_file))
"""
Assign detection-results to ground-truth objects
"""
nd = len(dr_data)
tp = [0] * nd # creates an array of zeros of size nd
fp = [0] * nd
for idx, detection in enumerate(dr_data):
file_id = detection["file_id"]
if show_animation:
# find ground truth image
ground_truth_img = glob.glob1(IMG_PATH, file_id + ".*")
#tifCounter = len(glob.glob1(myPath,"*.tif"))
if len(ground_truth_img) == 0:
error("Error. Image not found with id: " + file_id)
elif len(ground_truth_img) > 1:
error("Error. Multiple image with id: " + file_id)
else: # found image
#print(IMG_PATH + "/" + ground_truth_img[0])
# Load image
img = cv2.imread(IMG_PATH + "/" + ground_truth_img[0])
# load image with draws of multiple detections
img_cumulative_path = results_files_path + "/images/" + ground_truth_img[0]
if os.path.isfile(img_cumulative_path):
img_cumulative = cv2.imread(img_cumulative_path)
else:
img_cumulative = img.copy()
# Add bottom border to image
bottom_border = 60
BLACK = [0, 0, 0]
img = cv2.copyMakeBorder(img, 0, bottom_border, 0, 0, cv2.BORDER_CONSTANT, value=BLACK)
# assign detection-results to ground truth object if any
# open ground-truth with that file_id
gt_file = TEMP_FILES_PATH + "/" + file_id + "_ground_truth.json"
ground_truth_data = json.load(open(gt_file))
ovmax = -1
gt_match = -1
# load detected object bounding-box
bb = [ float(x) for x in detection["bbox"].split() ]
for obj in ground_truth_data:
# look for a class_name match
if obj["class_name"] == class_name:
bbgt = [ float(x) for x in obj["bbox"].split() ]
bi = [max(bb[0],bbgt[0]), max(bb[1],bbgt[1]), min(bb[2],bbgt[2]), min(bb[3],bbgt[3])]
iw = bi[2] - bi[0] + 1
ih = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
# compute overlap (IoU) = area of intersection / area of union
ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + (bbgt[2] - bbgt[0]
+ 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih
ov = iw * ih / ua
if ov > ovmax:
ovmax = ov
gt_match = obj
# assign detection as true positive/don't care/false positive
if show_animation:
status = "NO MATCH FOUND!" # status is only used in the animation
# set minimum overlap
min_overlap = MINOVERLAP
if specific_iou_flagged:
if class_name in specific_iou_classes:
index = specific_iou_classes.index(class_name)
min_overlap = float(iou_list[index])
if ovmax >= min_overlap:
if "difficult" not in gt_match:
if not bool(gt_match["used"]):
# true positive
tp[idx] = 1
gt_match["used"] = True
count_true_positives[class_name] += 1
# update the ".json" file
with open(gt_file, 'w') as f:
f.write(json.dumps(ground_truth_data))
if show_animation:
status = "MATCH!"
else:
# false positive (multiple detection)
fp[idx] = 1
if show_animation:
status = "REPEATED MATCH!"
else:
# false positive
fp[idx] = 1
if ovmax > 0:
status = "INSUFFICIENT OVERLAP"
"""
Draw image to show animation
"""
if show_animation:
height, widht = img.shape[:2]
# colors (OpenCV works with BGR)
white = (255,255,255)
light_blue = (255,200,100)
green = (0,255,0)
light_red = (30,30,255)
# 1st line
margin = 10
v_pos = int(height - margin - (bottom_border / 2.0))
text = "Image: " + ground_truth_img[0] + " "
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0)
text = "Class [" + str(class_index) + "/" + str(n_classes) + "]: " + class_name + " "
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), light_blue, line_width)
if ovmax != -1:
color = light_red
if status == "INSUFFICIENT OVERLAP":
text = "IoU: {0:.2f}% ".format(ovmax*100) + "< {0:.2f}% ".format(min_overlap*100)
else:
text = "IoU: {0:.2f}% ".format(ovmax*100) + ">= {0:.2f}% ".format(min_overlap*100)
color = green
img, _ = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
# 2nd line
v_pos += int(bottom_border / 2.0)
rank_pos = str(idx+1) # rank position (idx starts at 0)
text = "Detection #rank: " + rank_pos + " confidence: {0:.2f}% ".format(float(detection["confidence"])*100)
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0)
color = light_red
if status == "MATCH!":
color = green
text = "Result: " + status + " "
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
font = cv2.FONT_HERSHEY_SIMPLEX
if ovmax > 0: # if there is intersections between the bounding-boxes
bbgt = [ int(round(float(x))) for x in gt_match["bbox"].split() ]
cv2.rectangle(img,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2)
cv2.rectangle(img_cumulative,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2)
cv2.putText(img_cumulative, class_name, (bbgt[0],bbgt[1] - 5), font, 0.6, light_blue, 1, cv2.LINE_AA)
bb = [int(i) for i in bb]
cv2.rectangle(img,(bb[0],bb[1]),(bb[2],bb[3]),color,2)
cv2.rectangle(img_cumulative,(bb[0],bb[1]),(bb[2],bb[3]),color,2)
cv2.putText(img_cumulative, class_name, (bb[0],bb[1] - 5), font, 0.6, color, 1, cv2.LINE_AA)
# show image
cv2.imshow("Animation", img)
cv2.waitKey(20) # show for 20 ms
# save image to results
output_img_path = results_files_path + "/images/detections_one_by_one/" + class_name + "_detection" + str(idx) + ".jpg"
cv2.imwrite(output_img_path, img)
# save the image with all the objects drawn to it
cv2.imwrite(img_cumulative_path, img_cumulative)
#print(tp)
# compute precision/recall
cumsum = 0
for idx, val in enumerate(fp):
fp[idx] += cumsum
cumsum += val
cumsum = 0
for idx, val in enumerate(tp):
tp[idx] += cumsum
cumsum += val
#print(tp)
rec = tp[:]
for idx, val in enumerate(tp):
rec[idx] = float(tp[idx]) / gt_counter_per_class[class_name]
#print(rec)
prec = tp[:]
for idx, val in enumerate(tp):
prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])
#print(prec)
ap, mrec, mprec = voc_ap(rec[:], prec[:])
sum_AP += ap
text = "{0:.2f}%".format(ap*100) + " = " + class_name + " AP " #class_name + " AP = {0:.2f}%".format(ap*100)
"""
Write to results.txt
"""
rounded_prec = [ '%.2f' % elem for elem in prec ]
rounded_rec = [ '%.2f' % elem for elem in rec ]
results_file.write(text + "\n Precision: " + str(rounded_prec) + "\n Recall :" + str(rounded_rec) + "\n\n")
if not args.quiet:
print(text)
ap_dictionary[class_name] = ap
n_images = counter_images_per_class[class_name]
lamr, mr, fppi = log_average_miss_rate(np.array(rec), np.array(fp), n_images)
lamr_dictionary[class_name] = lamr
"""
Draw plot
"""
if draw_plot:
plt.plot(rec, prec, '-o')
# add a new penultimate point to the list (mrec[-2], 0.0)
# since the last line segment (and respective area) do not affect the AP value
area_under_curve_x = mrec[:-1] + [mrec[-2]] + [mrec[-1]]
area_under_curve_y = mprec[:-1] + [0.0] + [mprec[-1]]
plt.fill_between(area_under_curve_x, 0, area_under_curve_y, alpha=0.2, edgecolor='r')
# set window title
fig = plt.gcf() # gcf - get current figure
fig.canvas.set_window_title('AP ' + class_name)
# set plot title
plt.title('class: ' + text)
#plt.suptitle('This is a somewhat long figure title', fontsize=16)
# set axis titles
plt.xlabel('Recall')
plt.ylabel('Precision')
# optional - set axes
axes = plt.gca() # gca - get current axes
axes.set_xlim([0.0,1.0])
axes.set_ylim([0.0,1.05]) # .05 to give some extra space
# Alternative option -> wait for button to be pressed
#while not plt.waitforbuttonpress(): pass # wait for key display
# Alternative option -> normal display
#plt.show()
# save the plot
fig.savefig(results_files_path + "/classes/" + class_name + ".png")
plt.cla() # clear axes for next plot
if show_animation:
cv2.destroyAllWindows()
results_file.write("\n# mAP of all classes\n")
mAP = sum_AP / n_classes
text = "mAP = {0:.2f}%".format(mAP*100)
results_file.write(text + "\n")
print(text)
# remove the temp_files directory
shutil.rmtree(TEMP_FILES_PATH)
"""
Count total of detection-results
"""
# iterate through all the files
det_counter_per_class = {}
for txt_file in dr_files_list:
# get lines to list
lines_list = file_lines_to_list(txt_file)
for line in lines_list:
class_name = line.split()[0]
# check if class is in the ignore list, if yes skip
if class_name in args.ignore:
continue
# count that object
if class_name in det_counter_per_class:
det_counter_per_class[class_name] += 1
else:
# if class didn't exist yet
det_counter_per_class[class_name] = 1
#print(det_counter_per_class)
dr_classes = list(det_counter_per_class.keys())
"""
Plot the total number of occurences of each class in the ground-truth
"""
if draw_plot:
window_title = "ground-truth-info"
plot_title = "ground-truth\n"
plot_title += "(" + str(len(ground_truth_files_list)) + " files and " + str(n_classes) + " classes)"
x_label = "Number of objects per class"
output_path = results_files_path + "/ground-truth-info.png"
to_show = False
plot_color = 'forestgreen'
draw_plot_func(
gt_counter_per_class,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
'',
)
"""
Write number of ground-truth objects per class to results.txt
"""
with open(results_files_path + "/results.txt", 'a') as results_file:
results_file.write("\n# Number of ground-truth objects per class\n")
for class_name in sorted(gt_counter_per_class):
results_file.write(class_name + ": " + str(gt_counter_per_class[class_name]) + "\n")
"""
Finish counting true positives
"""
for class_name in dr_classes:
# if class exists in detection-result but not in ground-truth then there are no true positives in that class
if class_name not in gt_classes:
count_true_positives[class_name] = 0
#print(count_true_positives)
"""
Plot the total number of occurences of each class in the "detection-results" folder
"""
if draw_plot:
window_title = "detection-results-info"
# Plot title
plot_title = "detection-results\n"
plot_title += "(" + str(len(dr_files_list)) + " files and "
count_non_zero_values_in_dictionary = sum(int(x) > 0 for x in list(det_counter_per_class.values()))
plot_title += str(count_non_zero_values_in_dictionary) + " detected classes)"
# end Plot title
x_label = "Number of objects per class"
output_path = results_files_path + "/detection-results-info.png"
to_show = False
plot_color = 'forestgreen'
true_p_bar = count_true_positives
draw_plot_func(
det_counter_per_class,
len(det_counter_per_class),
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
true_p_bar
)
"""
Write number of detected objects per class to results.txt
"""
with open(results_files_path + "/results.txt", 'a') as results_file:
results_file.write("\n# Number of detected objects per class\n")
for class_name in sorted(dr_classes):
n_det = det_counter_per_class[class_name]
text = class_name + ": " + str(n_det)
text += " (tp:" + str(count_true_positives[class_name]) + ""
text += ", fp:" + str(n_det - count_true_positives[class_name]) + ")\n"
results_file.write(text)
"""
Draw log-average miss rate plot (Show lamr of all classes in decreasing order)
"""
if draw_plot:
window_title = "lamr"
plot_title = "log-average miss rate"
x_label = "log-average miss rate"
output_path = results_files_path + "/lamr.png"
to_show = False
plot_color = 'royalblue'
draw_plot_func(
lamr_dictionary,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
""
)
"""
Draw mAP plot (Show AP's of all classes in decreasing order)
"""
if draw_plot:
window_title = "mAP"
plot_title = "mAP = {0:.2f}%".format(mAP*100)
x_label = "Average Precision"
output_path = results_files_path + "/mAP.png"
to_show = True
plot_color = 'royalblue'
draw_plot_func(
ap_dictionary,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
""
)
| 38.864035
| 135
| 0.573948
|
f41444ad3c7e4f6b3650c8f73851eda4d02bee9e
| 411
|
py
|
Python
|
test/unittests/gui/views/widgets/__init__.py
|
Alexhuszagh/XLDiscoverer
|
60937b1f7f2e23af4219eb26519d6b83fb4232d6
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
test/unittests/gui/views/widgets/__init__.py
|
Alexhuszagh/XLDiscoverer
|
60937b1f7f2e23af4219eb26519d6b83fb4232d6
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
test/unittests/gui/views/widgets/__init__.py
|
Alexhuszagh/XLDiscoverer
|
60937b1f7f2e23af4219eb26519d6b83fb4232d6
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
'''
Unittests/Gui/Views/Widgets
___________________________
Test suite for widget definitions.
:copyright: (c) 2015 The Regents of the University of California.
:license: GNU GPL, see licenses/GNU GPLv3.txt for more details.
'''
# load modules/submodules
from . import header
# SUITE
# -----
def add_tests(suite):
'''Add tests to the unittest suite'''
header.add_tests(suite)
| 18.681818
| 69
| 0.693431
|
ee386b3557e992772f63f93cc1e43c4a0630bc93
| 371
|
py
|
Python
|
habari/celery.py
|
ppolle/habari
|
671b98c361ce593f708bc15f69dd3aa6fe72b128
|
[
"MIT"
] | 3
|
2020-06-08T08:39:06.000Z
|
2020-07-30T10:46:22.000Z
|
habari/celery.py
|
ppolle/habari
|
671b98c361ce593f708bc15f69dd3aa6fe72b128
|
[
"MIT"
] | 9
|
2021-03-19T11:18:58.000Z
|
2022-02-10T15:48:35.000Z
|
habari/celery.py
|
ppolle/habari
|
671b98c361ce593f708bc15f69dd3aa6fe72b128
|
[
"MIT"
] | 1
|
2021-09-22T07:23:03.000Z
|
2021-09-22T07:23:03.000Z
|
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'habari.settings')
app = Celery('habari')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| 24.733333
| 66
| 0.778976
|
51dad37e5119ecd3ceb561b5b78f7f78b073715c
| 6,868
|
py
|
Python
|
autofit/graphical/expectation_propagation/ep_mean_field.py
|
caoxiaoyue/PyAutoFit
|
819cd2acc8d4069497a161c3bb6048128e44d828
|
[
"MIT"
] | 39
|
2019-01-24T10:45:23.000Z
|
2022-03-18T09:37:59.000Z
|
autofit/graphical/expectation_propagation/ep_mean_field.py
|
caoxiaoyue/PyAutoFit
|
819cd2acc8d4069497a161c3bb6048128e44d828
|
[
"MIT"
] | 260
|
2018-11-27T12:56:33.000Z
|
2022-03-31T16:08:59.000Z
|
autofit/graphical/expectation_propagation/ep_mean_field.py
|
caoxiaoyue/PyAutoFit
|
819cd2acc8d4069497a161c3bb6048128e44d828
|
[
"MIT"
] | 13
|
2018-11-30T16:49:05.000Z
|
2022-01-21T17:39:29.000Z
|
import logging
from typing import (
Dict, Tuple, Optional, List
)
import numpy as np
from autofit.graphical.factor_graphs import (
Factor, FactorGraph
)
from autofit.graphical.mean_field import MeanField, FactorApproximation
from autofit.graphical.utils import Status
from autofit.mapper.variable import Variable
from autofit.messages.abstract import AbstractMessage
logger = logging.getLogger(
__name__
)
class EPMeanField(FactorGraph):
'''
this class encode the EP mean-field approximation to a factor graph
Attributes
----------
factor_graph: FactorGraph
the base factor graph being approximated
factor_mean_field: Dict[Factor, MeanField]
the mean-field approximation for each factor in the factor graph
mean_field: MeanField
the mean-field approximation of the full factor graph
i.e. the product of the factor mean-field approximations
variables: Set[Variable]
the variables of the approximation
deterministic_variables: Set[Variable]
the deterministic variables
log_evidence: float
the approximate log evidence of the approximation
is_valid: bool
returns whether the factor mean-field approximations are all valid
Methods
-------
from_approx_dists(factor_graph, approx_dists)
create a EPMeanField object from the passed factor_graph
using approx_dists to initialise the factor mean-field approximations
factor_approximation(factor)
create the FactorApproximation for the factor
project_factor_approx(factor_approximation)
given the passed FactorApproximation, return a new `EPMeanField`
object encoding the updated mean-field approximation
'''
def __init__(
self,
factor_graph: FactorGraph,
factor_mean_field: Dict[Factor, MeanField]
):
self._factor_graph = factor_graph
self._factor_mean_field = factor_mean_field
super().__init__(self.factor_graph.factors)
@property
def name(self):
return f"EP_{self.factor_graph.name}"
@property
def variables(self):
return self.factor_graph.variables
@property
def deterministic_variables(self):
return self.factor_graph.deterministic_variables
@property
def factor_mean_field(self) -> Dict[Factor, MeanField]:
return self._factor_mean_field.copy()
@property
def factor_graph(self) -> FactorGraph:
return self._factor_graph
@classmethod
def from_approx_dists(
cls,
factor_graph: FactorGraph,
approx_dists: Dict[Variable, AbstractMessage],
) -> "EPMeanField":
factor_mean_field = {
factor: MeanField({
v: approx_dists[v] for v in factor.all_variables
})
for factor in factor_graph.factors
}
return cls(
factor_graph,
factor_mean_field
)
from_kws = from_approx_dists
def factor_approximation(self, factor: Factor) -> FactorApproximation:
"""
Create an approximation for one factor.
This comprises:
- The factor
- The factor's variable distributions
- The cavity distribution, which is the product of the distributions
for each variable for all other factors
- The model distribution, which is the product of the distributions
for each variable for all factors
Parameters
----------
factor
Some factor
Returns
-------
An object comprising distributions with a specific distribution excluding
that factor
"""
factor_mean_field = self._factor_mean_field.copy()
factor_dist = factor_mean_field.pop(factor)
cavity_dist = MeanField({
v: 1. for v
in factor_dist.all_variables
}).prod(
*factor_mean_field.values()
)
model_dist = factor_dist.prod(cavity_dist)
return FactorApproximation(
factor,
cavity_dist,
factor_dist,
model_dist
)
def project_factor_approx(
self, projection: FactorApproximation, status: Optional[Status] = None,
) -> Tuple["EPMeanField", Status]:
"""
"""
factor_mean_field = self.factor_mean_field
factor_mean_field[projection.factor] = projection.factor_dist
new_approx = type(self)(
factor_graph=self._factor_graph,
factor_mean_field=factor_mean_field)
return new_approx, status
project = project_factor_approx
@property
def mean_field(self) -> MeanField:
return MeanField({
v: 1. for v in self.all_variables
}).prod(
*self._factor_mean_field.values()
)
model_dist = mean_field
@property
def variable_messages(self) -> Dict[Variable, List[AbstractMessage]]:
variable_messages = {
v: [] for v in self.all_variables}
for meanfield in self.factor_mean_field.values():
for v, message in meanfield.items():
variable_messages[v].append(message)
return variable_messages
@property
def variable_evidence(self) -> Dict[Variable, np.ndarray]:
return {
v: AbstractMessage.log_normalisation(*ms)
for v, ms in self.variable_messages.items()
}
@property
def factor_evidence(self) -> Dict[Factor, np.ndarray]:
return {
factor: meanfield.log_norm
for factor, meanfield in self.factor_mean_field.items()
}
@property
def log_evidence(self):
"""
Calculates evidence for the EP approximation
Evidence for a variable, xᵢ,
Zᵢ = ∫ ∏ₐ m_{a → i} (xᵢ) dxᵢ
Evidence for a factor, f_a,
∫ ∏_{j ∈ a} m_{i → a} (xᵢ) fₐ(xₐ) dxₐ
Zₐ = -----------------------------------------
∏_{j ∈ a} Zⱼ
Evidence for model
Z = ∏ᵢ Zᵢ ∏ₐ Zₐ
"""
variable_evidence = {
v: np.sum(logz) for v, logz in self.variable_evidence.items()}
factor_evidence = sum(
np.sum(meanfield.log_norm)
- sum(variable_evidence[v] for v in factor.all_variables)
for factor, meanfield in self.factor_mean_field.items()
)
return factor_evidence + sum(variable_evidence.values())
def __repr__(self) -> str:
clsname = type(self).__name__
try:
log_evidence = self.log_evidence
except Exception as e:
logger.exception(e)
log_evidence = float("nan")
return (
f"{clsname}({self.factor_graph}, "
f"log_evidence={log_evidence})")
| 28.497925
| 83
| 0.622452
|
3ed7cbf6be384e29d18fdcfc8fd502e3ca5eff8c
| 9,001
|
py
|
Python
|
plugins/trezor/clientbase.py
|
johnlito123/electrum-xuez
|
4eb35889f95e31f0a08d5488082df9ab94b4c3ca
|
[
"MIT"
] | null | null | null |
plugins/trezor/clientbase.py
|
johnlito123/electrum-xuez
|
4eb35889f95e31f0a08d5488082df9ab94b4c3ca
|
[
"MIT"
] | null | null | null |
plugins/trezor/clientbase.py
|
johnlito123/electrum-xuez
|
4eb35889f95e31f0a08d5488082df9ab94b4c3ca
|
[
"MIT"
] | 4
|
2018-07-07T16:35:50.000Z
|
2018-12-25T16:02:52.000Z
|
import time
from struct import pack
from electrum_xuez.i18n import _
from electrum_xuez.util import PrintError, UserCancelled
from electrum_xuez.keystore import bip39_normalize_passphrase
from electrum_xuez.bitcoin import serialize_xpub
class GuiMixin(object):
# Requires: self.proto, self.device
messages = {
3: _("Confirm the transaction output on your %s device"),
4: _("Confirm internal entropy on your %s device to begin"),
5: _("Write down the seed word shown on your %s"),
6: _("Confirm on your %s that you want to wipe it clean"),
7: _("Confirm on your %s device the message to sign"),
8: _("Confirm the total amount spent and the transaction fee on your "
"%s device"),
10: _("Confirm wallet address on your %s device"),
'default': _("Check your %s device to continue"),
}
def callback_Failure(self, msg):
# BaseClient's unfortunate call() implementation forces us to
# raise exceptions on failure in order to unwind the stack.
# However, making the user acknowledge they cancelled
# gets old very quickly, so we suppress those. The NotInitialized
# one is misnamed and indicates a passphrase request was cancelled.
if msg.code in (self.types.Failure_PinCancelled,
self.types.Failure_ActionCancelled,
self.types.Failure_NotInitialized):
raise UserCancelled()
raise RuntimeError(msg.message)
def callback_ButtonRequest(self, msg):
message = self.msg
if not message:
message = self.messages.get(msg.code, self.messages['default'])
self.handler.show_message(message % self.device, self.cancel)
return self.proto.ButtonAck()
def callback_PinMatrixRequest(self, msg):
if msg.type == 2:
msg = _("Enter a new PIN for your %s:")
elif msg.type == 3:
msg = (_("Re-enter the new PIN for your %s.\n\n"
"NOTE: the positions of the numbers have changed!"))
else:
msg = _("Enter your current %s PIN:")
pin = self.handler.get_pin(msg % self.device)
if not pin:
return self.proto.Cancel()
return self.proto.PinMatrixAck(pin=pin)
def callback_PassphraseRequest(self, req):
if self.creating_wallet:
msg = _("Enter a passphrase to generate this wallet. Each time "
"you use this wallet your %s will prompt you for the "
"passphrase. If you forget the passphrase you cannot "
"access the Xuez coins in the wallet.") % self.device
else:
msg = _("Enter the passphrase to unlock this wallet:")
passphrase = self.handler.get_passphrase(msg, self.creating_wallet)
if passphrase is None:
return self.proto.Cancel()
passphrase = bip39_normalize_passphrase(passphrase)
return self.proto.PassphraseAck(passphrase=passphrase)
def callback_WordRequest(self, msg):
self.step += 1
msg = _("Step %d/24. Enter seed word as explained on "
"your %s:") % (self.step, self.device)
word = self.handler.get_word(msg)
# Unfortunately the device can't handle self.proto.Cancel()
return self.proto.WordAck(word=word)
def callback_CharacterRequest(self, msg):
char_info = self.handler.get_char(msg)
if not char_info:
return self.proto.Cancel()
return self.proto.CharacterAck(**char_info)
class TrezorClientBase(GuiMixin, PrintError):
def __init__(self, handler, plugin, proto):
assert hasattr(self, 'tx_api') # ProtocolMixin already constructed?
self.proto = proto
self.device = plugin.device
self.handler = handler
self.tx_api = plugin
self.types = plugin.types
self.msg = None
self.creating_wallet = False
self.used()
def __str__(self):
return "%s/%s" % (self.label(), self.features.device_id)
def label(self):
'''The name given by the user to the device.'''
return self.features.label
def is_initialized(self):
'''True if initialized, False if wiped.'''
return self.features.initialized
def is_pairable(self):
return not self.features.bootloader_mode
def used(self):
self.last_operation = time.time()
def prevent_timeouts(self):
self.last_operation = float('inf')
def timeout(self, cutoff):
'''Time out the client if the last operation was before cutoff.'''
if self.last_operation < cutoff:
self.print_error("timed out")
self.clear_session()
@staticmethod
def expand_path(n):
'''Convert bip32 path to list of uint32 integers with prime flags
0/-1/1' -> [0, 0x80000001, 0x80000001]'''
# This code is similar to code in trezorlib where it unforunately
# is not declared as a staticmethod. Our n has an extra element.
PRIME_DERIVATION_FLAG = 0x80000000
path = []
for x in n.split('/')[1:]:
prime = 0
if x.endswith("'"):
x = x.replace('\'', '')
prime = PRIME_DERIVATION_FLAG
if x.startswith('-'):
prime = PRIME_DERIVATION_FLAG
path.append(abs(int(x)) | prime)
return path
def cancel(self):
'''Provided here as in keepkeylib but not trezorlib.'''
self.transport.write(self.proto.Cancel())
def i4b(self, x):
return pack('>I', x)
def get_xpub(self, bip32_path, xtype):
address_n = self.expand_path(bip32_path)
creating = False
node = self.get_public_node(address_n, creating).node
return serialize_xpub(xtype, node.chain_code, node.public_key, node.depth, self.i4b(node.fingerprint), self.i4b(node.child_num))
def toggle_passphrase(self):
if self.features.passphrase_protection:
self.msg = _("Confirm on your %s device to disable passphrases")
else:
self.msg = _("Confirm on your %s device to enable passphrases")
enabled = not self.features.passphrase_protection
self.apply_settings(use_passphrase=enabled)
def change_label(self, label):
self.msg = _("Confirm the new label on your %s device")
self.apply_settings(label=label)
def change_homescreen(self, homescreen):
self.msg = _("Confirm on your %s device to change your home screen")
self.apply_settings(homescreen=homescreen)
def set_pin(self, remove):
if remove:
self.msg = _("Confirm on your %s device to disable PIN protection")
elif self.features.pin_protection:
self.msg = _("Confirm on your %s device to change your PIN")
else:
self.msg = _("Confirm on your %s device to set a PIN")
self.change_pin(remove)
def clear_session(self):
'''Clear the session to force pin (and passphrase if enabled)
re-entry. Does not leak exceptions.'''
self.print_error("clear session:", self)
self.prevent_timeouts()
try:
super(TrezorClientBase, self).clear_session()
except BaseException as e:
# If the device was removed it has the same effect...
self.print_error("clear_session: ignoring error", str(e))
pass
def get_public_node(self, address_n, creating):
self.creating_wallet = creating
return super(TrezorClientBase, self).get_public_node(address_n)
def close(self):
'''Called when Our wallet was closed or the device removed.'''
self.print_error("closing client")
self.clear_session()
# Release the device
self.transport.close()
def firmware_version(self):
f = self.features
return (f.major_version, f.minor_version, f.patch_version)
def atleast_version(self, major, minor=0, patch=0):
return self.firmware_version() >= (major, minor, patch)
@staticmethod
def wrapper(func):
'''Wrap methods to clear any message box they opened.'''
def wrapped(self, *args, **kwargs):
try:
self.prevent_timeouts()
return func(self, *args, **kwargs)
finally:
self.used()
self.handler.finished()
self.creating_wallet = False
self.msg = None
return wrapped
@staticmethod
def wrap_methods(cls):
for method in ['apply_settings', 'change_pin',
'get_address', 'get_public_node',
'load_device_by_mnemonic', 'load_device_by_xprv',
'recovery_device', 'reset_device', 'sign_message',
'sign_tx', 'wipe_device']:
setattr(cls, method, cls.wrapper(getattr(cls, method)))
| 38.302128
| 136
| 0.616265
|
6fd0ccb98a789d98d42e1b7d5d0fb1466f529314
| 4,855
|
py
|
Python
|
tools/external_devel.py
|
chemoelectric/sortsmill
|
90b97a9296582211a133970bb577013c9c86ed81
|
[
"MIT"
] | 1
|
2021-10-14T20:56:30.000Z
|
2021-10-14T20:56:30.000Z
|
tools/external_devel.py
|
chemoelectric/sortsmill
|
90b97a9296582211a133970bb577013c9c86ed81
|
[
"MIT"
] | null | null | null |
tools/external_devel.py
|
chemoelectric/sortsmill
|
90b97a9296582211a133970bb577013c9c86ed81
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
A module for use with external development tools/methods, such as the
OCaml programming favored at the Sorts Mill.
Copyright (c) 2011 Barry Schwartz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import cmath
import fontforge
import imp
import subprocess
#--------------------------------------------------------------------------
direction_tolerance = 1e-4
linear_tolerance = 1e-4
decimal_places = 4
def move_to_first_quadrant(c):
return complex(abs(c.real), abs(c.imag))
def caml_complex(c):
x = c.real
y = c.imag
if x < 0:
s_x = ("x'({0:." + str(decimal_places) + "f})").format(x)
elif x == 0:
s_x = None
else:
s_x = ("x' {0:." + str(decimal_places) + "f}").format(x)
if y < 0:
s_y = ("y'({0:." + str(decimal_places) + "f})").format(y)
elif y == 0:
s_y = None
else:
s_y = ("y' {0:." + str(decimal_places) + "f}").format(y)
if s_x is None and s_y is None:
s = "zero"
elif s_x is None:
s = s_y
elif s_y is None:
s = s_x
else:
s = s_x + " + " + s_y
return s
def caml_path(contour):
"Returns OCaml code for a contour, in the notations of the Sorts Mill's Fontdesign module."
plist = list(contour)
if 3 <= len(plist) and not plist[-2].on_curve and not plist[-1].on_curve:
plist = plist[-1:] + plist[:-1]
plist2 = []
i = 0
while i < len(plist):
if plist[i].on_curve:
if i + 1 < len(plist) and not plist[i + 1].on_curve:
plist2 += [plist[i], plist[i], plist[i + 1]]
i += 2
else:
plist2 += [plist[i], plist[i], plist[i]]
i += 1
elif i + 2 < len(plist) and not plist[i + 2].on_curve:
plist2 += [plist[i], plist[i + 1], plist[i + 2]]
i += 3
else:
plist2 += [plist[i], plist[i + 1], plist[i + 1]]
i += 2
s = ""
i = 0
while i < len(plist2):
join = "|> put " if i != 0 else ""
inhandle_x = plist2[i].x - plist2[i + 1].x
inhandle_y = plist2[i].y - plist2[i + 1].y
oncurve_x = plist2[i + 1].x
oncurve_y = plist2[i + 1].y
outhandle_x = plist2[i + 2].x - plist2[i + 1].x
outhandle_y = plist2[i + 2].y - plist2[i + 1].y
c_inhandle = complex(inhandle_x, inhandle_y)
c_oncurve = complex(oncurve_x, oncurve_y)
c_outhandle = complex(outhandle_x, outhandle_y)
s += (join + "(make_node (" +
caml_complex(c_inhandle) + ") (" +
caml_complex(c_oncurve) + ") (" +
caml_complex(c_outhandle) + "))\n")
i += 3
s += "|> close"
return s
#--------------------------------------------------------------------------
def load_glyph_data_from_program(glyph, program):
module_name = 'program_generated_glyph_data'
pipe = subprocess.Popen([program, glyph.glyphname], stdout=subprocess.PIPE).stdout
module = imp.load_module(module_name, pipe, "<stdin>", ('', '', imp.PY_SOURCE))
def load_program_glyph_data(glyph):
program = './' + glyph.font.fontname + '_glyph_update'
load_glyph_data_from_program(glyph, program)
fontforge.registerMenuItem((lambda _, glyph: load_program_glyph_data(glyph)),
None, None, 'Glyph', 'None',
'Load glyph data from program')
#--------------------------------------------------------------------------
def print_caml_contours(glyph):
for c in glyph.layers[glyph.activeLayer]:
print(caml_path(c))
fontforge.registerMenuItem((lambda _, glyph: print_caml_contours(glyph)),
None, None, 'Glyph', 'None',
'Output OCaml contours')
#--------------------------------------------------------------------------
| 32.583893
| 95
| 0.568692
|
50e799a02a6dd5a8ff3983b4cf841850520d8f89
| 1,965
|
py
|
Python
|
setup.py
|
rll/cyres
|
c28f52130709e314914cc36c4702e6497d47994e
|
[
"BSD-2-Clause"
] | 43
|
2015-01-27T23:27:36.000Z
|
2021-08-22T14:27:11.000Z
|
setup.py
|
rll/cyres
|
c28f52130709e314914cc36c4702e6497d47994e
|
[
"BSD-2-Clause"
] | 4
|
2015-05-06T23:01:11.000Z
|
2017-04-10T13:48:21.000Z
|
setup.py
|
rll/cyres
|
c28f52130709e314914cc36c4702e6497d47994e
|
[
"BSD-2-Clause"
] | 17
|
2015-02-05T22:53:50.000Z
|
2019-08-13T03:53:46.000Z
|
from distutils.core import setup
from Cython.Distutils import Extension
from Cython.Distutils import build_ext
import numpy
import os, tempfile, subprocess, shutil
# see http://openmp.org/wp/openmp-compilers/
omp_test = r"""#include <omp.h>
#include <stdio.h>
int main() {
#pragma omp parallel
printf("Hello from thread %d, nthreads %d\n", omp_get_thread_num(), omp_get_num_threads());
}
"""
def has_openmp():
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
filename = r'test.c'
file = open(filename,'w', 0)
file.write(omp_test)
with open(os.devnull, 'w') as fnull:
result = subprocess.call(['cc', '-fopenmp', filename], stdout=fnull,
stderr=fnull)
file.close
os.chdir(curdir)
#clean up
shutil.rmtree(tmpdir)
return result == 0
ceres_include = "/usr/local/include/ceres/"
ceres_lib = "/usr/local/lib/"
gflags_lib = "/usr/local/lib/"
glog_lib = "/usr/local/lib/"
cholmod_lib = amd_lib = camd_lib = colamd_lib = "/usr/local/lib/"
cxsparse_lib = "/usr/local/lib/"
extra_compile_args = ['-O3']
extra_link_args = []
if has_openmp():
extra_compile_args = ['-fopenmp']
extra_link_args = ['-lgomp']
ext_modules = [
Extension(
"cyres",
["cyres/src/cyres.pyx", "cyres/src/cyres.pxd", "cyres/src/ceres.pxd"],
language="c++",
include_dirs=[ceres_include, numpy.get_include()],
libraries=['ceres', 'gflags', 'glog', "cholmod", "camd", "amd", "colamd", "cxsparse"],
library_dirs=[ceres_lib, gflags_lib, glog_lib, cholmod_lib, amd_lib, camd_lib, colamd_lib, cxsparse_lib],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
)
]
setup(
name = 'cyres',
version='0.0.1',
cmdclass = {'build_ext': build_ext},
ext_package = 'cyres',
ext_modules = ext_modules,
packages= ['cyres'],
package_data={'cyres': ['src/*.pxd']},
scripts=['scripts/cyresc']
)
| 26.554054
| 113
| 0.650891
|
031107620ab18dcd1346c565934bf0a13f111fbb
| 4,164
|
py
|
Python
|
util/xds.py
|
graeme-winter/dials
|
78a696a6591e224e73204846f39771ebac0c2668
|
[
"BSD-3-Clause"
] | 58
|
2015-10-15T09:28:20.000Z
|
2022-03-28T20:09:38.000Z
|
util/xds.py
|
graeme-winter/dials
|
78a696a6591e224e73204846f39771ebac0c2668
|
[
"BSD-3-Clause"
] | 1,741
|
2015-11-24T08:17:02.000Z
|
2022-03-31T15:46:42.000Z
|
util/xds.py
|
graeme-winter/dials
|
78a696a6591e224e73204846f39771ebac0c2668
|
[
"BSD-3-Clause"
] | 45
|
2015-10-14T13:44:16.000Z
|
2022-03-22T14:45:56.000Z
|
import logging
import os
from dxtbx.serialize import xds
from iotbx.xds import spot_xds
from scitbx import matrix
logger = logging.getLogger(__name__)
def dump(experiments, reflections, directory):
"""Dump the files in XDS format"""
if len(experiments) > 0:
for i, experiment in enumerate(experiments):
suffix = ""
if len(experiments) > 1:
suffix = "_%i" % (i + 1)
sub_dir = f"{directory}{suffix}"
if not os.path.isdir(sub_dir):
os.makedirs(sub_dir)
# XXX imageset is getting the experimental geometry from the image files
# rather than the input models.expt file
imageset = experiment.imageset
imageset.set_detector(experiment.detector)
imageset.set_beam(experiment.beam)
imageset.set_goniometer(experiment.goniometer)
imageset.set_scan(experiment.scan)
if experiment.crystal is None:
space_group_number = None
real_space_a = None
real_space_b = None
real_space_c = None
job_card = "XYCORR INIT COLSPOT IDXREF DEFPIX INTEGRATE CORRECT"
else:
crystal_model = experiment.crystal
crystal_model = crystal_model.change_basis(
crystal_model.get_space_group()
.info()
.change_of_basis_op_to_reference_setting()
)
space_group_number = crystal_model.get_space_group().type().number()
A = matrix.sqr(crystal_model.get_A())
A_inv = A.inverse()
real_space_a = A_inv.elems[:3]
real_space_b = A_inv.elems[3:6]
real_space_c = A_inv.elems[6:9]
job_card = ("XYCORR INIT DEFPIX INTEGRATE CORRECT",)
to_xds = xds.to_xds(imageset)
xds_inp = os.path.join(sub_dir, "XDS.INP")
xparm_xds = os.path.join(sub_dir, "XPARM.XDS")
logger.info("Exporting experiment to %s", xds_inp)
with open(xds_inp, "w") as f:
f.write(
to_xds.XDS_INP(
space_group_number=space_group_number,
real_space_a=real_space_a,
real_space_b=real_space_b,
real_space_c=real_space_c,
job_card=job_card,
)
)
if space_group_number:
logger.info("Exporting crystal model to %s", xparm_xds)
with open(xparm_xds, "w") as f:
f.write(
to_xds.xparm_xds(
real_space_a, real_space_b, real_space_c, space_group_number
)
)
if reflections is not None and len(reflections) > 0:
ref_cryst = reflections.select(reflections["id"] == i)
export_spot_xds(ref_cryst, os.path.join(sub_dir, "SPOT.XDS"))
else:
if not os.path.isdir(directory):
os.makedirs(directory)
export_spot_xds(reflections, os.path.join(directory, "SPOT.XDS"))
def export_spot_xds(reflections, filename):
if reflections is not None and len(reflections) > 0:
centroids = reflections["xyzobs.px.value"]
intensities = reflections["intensity.sum.value"]
miller_indices = None
if "miller_index" in reflections:
miller_indices = reflections["miller_index"]
selection = miller_indices != (0, 0, 0)
miller_indices = miller_indices.select(selection)
if len(miller_indices) == 0:
miller_indices = None
else:
centroids = centroids.select(selection)
intensities = intensities.select(selection)
xds_writer = spot_xds.writer(
centroids=centroids, intensities=intensities, miller_indices=miller_indices
)
logger.info("Exporting spot list as %s", filename)
xds_writer.write_file(filename=filename)
| 40.427184
| 88
| 0.563401
|
585838beafef5b1649e5a458fab61f9c3c1cf46d
| 19,289
|
py
|
Python
|
qiskit-runtime/qiskit_runtime/qka/qka.py
|
Avhijit-codeboy/Portfolio-Diversification
|
b71abd4365a21e4c9939bd8c52cbeb83fd496583
|
[
"Apache-2.0"
] | null | null | null |
qiskit-runtime/qiskit_runtime/qka/qka.py
|
Avhijit-codeboy/Portfolio-Diversification
|
b71abd4365a21e4c9939bd8c52cbeb83fd496583
|
[
"Apache-2.0"
] | null | null | null |
qiskit-runtime/qiskit_runtime/qka/qka.py
|
Avhijit-codeboy/Portfolio-Diversification
|
b71abd4365a21e4c9939bd8c52cbeb83fd496583
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of qiskit-runtime.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Source code for the QKA Qiskit Runtime program."""
# pylint: disable=invalid-name
import itertools
import json
import numpy as np
from numpy.random import RandomState
from qiskit import QuantumCircuit, QuantumRegister
from qiskit.compiler import transpile
from cvxopt import matrix, solvers # pylint: disable=import-error
class FeatureMap:
"""Mapping data with the feature map."""
def __init__(self, feature_dimension, entangler_map=None):
"""
Args:
feature_dimension (int): number of features, twice the number
of qubits for this encoding
entangler_map (list[list]): connectivity of qubits with a list of [source, target],
or None for full entanglement. Note that the order in
the list is the order of applying the two-qubit gate.
Raises:
ValueError: If the value of ``feature_dimension`` is not an even integer.
"""
if isinstance(feature_dimension, int):
if feature_dimension % 2 == 0:
self._feature_dimension = feature_dimension
else:
raise ValueError("Feature dimension must be an even integer.")
else:
raise ValueError("Feature dimension must be an even integer.")
self._num_qubits = int(feature_dimension / 2)
if entangler_map is None:
self._entangler_map = [
[i, j]
for i in range(self._feature_dimension)
for j in range(i + 1, self._feature_dimension)
]
else:
self._entangler_map = entangler_map
self._num_parameters = self._num_qubits
def construct_circuit(self, x=None, parameters=None, q=None, inverse=False, name=None):
"""Construct the feature map circuit.
Args:
x (numpy.ndarray): data vector of size feature_dimension
parameters (numpy.ndarray): optional parameters in feature map
q (QauntumRegister): the QuantumRegister object for the circuit
inverse (bool): whether or not to invert the circuit
name (str): name of circuit
Returns:
QuantumCircuit: a quantum circuit transforming data x
Raises:
ValueError: If the input parameters or vector are invalid
"""
if parameters is not None:
if isinstance(parameters, (int, float)):
raise ValueError("Parameters must be a list.")
if len(parameters) == 1:
parameters = parameters * np.ones(self._num_qubits)
else:
if len(parameters) != self._num_parameters:
raise ValueError(
"The number of feature map parameters must be {}.".format(
self._num_parameters
)
)
if len(x) != self._feature_dimension:
raise ValueError(
"The input vector must be of length {}.".format(self._feature_dimension)
)
if q is None:
q = QuantumRegister(self._num_qubits, name="q")
circuit = QuantumCircuit(q, name=name)
for i in range(self._num_qubits):
circuit.ry(-parameters[i], q[i])
for source, target in self._entangler_map:
circuit.cz(q[source], q[target])
for i in range(self._num_qubits):
circuit.rz(-2 * x[2 * i + 1], q[i])
circuit.rx(-2 * x[2 * i], q[i])
if inverse:
return circuit.inverse()
else:
return circuit
def to_json(self):
"""Return JSON representation of this object.
Returns:
str: JSON string representing this object.
"""
return json.dumps(
{"feature_dimension": self._feature_dimension, "entangler_map": self._entangler_map}
)
@classmethod
def from_json(cls, data):
"""Return an instance of this class from the JSON representation.
Args:
data (str): JSON string representing an object.
Returns:
cls: An instance of this class.
"""
return cls(**json.loads(data))
class KernelMatrix:
"""Build the kernel matrix from a quantum feature map."""
def __init__(self, feature_map, backend, initial_layout=None):
"""
Args:
feature_map: the feature map object
backend (Backend): the backend instance
initial_layout (list or dict): initial position of virtual
qubits on the physical qubits
of the quantum device
"""
self._feature_map = feature_map
self._feature_map_circuit = self._feature_map.construct_circuit
self._backend = backend
self._initial_layout = initial_layout
self.results = {}
def construct_kernel_matrix(self, x1_vec, x2_vec, parameters=None):
"""Create the kernel matrix for a given feature map and input data.
With the qasm simulator or real backends, compute order 'n^2'
states Phi^dag(y)Phi(x)|0> for input vectors x and y.
Args:
x1_vec (numpy.ndarray): NxD array of training data or test data,
where N is the number of samples
and D is the feature dimension
x2_vec (numpy.ndarray): MxD array of training data or support
vectors, where M is the number of samples
and D is the feature dimension
parameters (numpy.ndarray): optional parameters in feature map
Returns:
numpy.ndarray: the kernel matrix
"""
is_identical = False
if np.array_equal(x1_vec, x2_vec):
is_identical = True
experiments = []
measurement_basis = "0" * self._feature_map._num_qubits
if is_identical:
my_product_list = list(
itertools.combinations(range(len(x1_vec)), 2)
) # all pairwise combos of datapoint indices
for index_1, index_2 in my_product_list:
circuit_1 = self._feature_map_circuit(
x=x1_vec[index_1], parameters=parameters, name="{}_{}".format(index_1, index_2)
)
circuit_2 = self._feature_map_circuit(
x=x1_vec[index_2], parameters=parameters, inverse=True
)
circuit = circuit_1.compose(circuit_2)
circuit.measure_all()
experiments.append(circuit)
experiments = transpile(
experiments, backend=self._backend, initial_layout=self._initial_layout
)
program_data = self._backend.run(experiments, shots=8192).result()
self.results["program_data"] = program_data
mat = np.eye(
len(x1_vec), len(x1_vec)
) # kernel matrix element on the diagonal is always 1
for experiment, [index_1, index_2] in enumerate(my_product_list):
counts = program_data.get_counts(experiment=experiment)
shots = sum(counts.values())
mat[index_1][index_2] = (
counts.get(measurement_basis, 0) / shots
) # kernel matrix element is the probability of measuring all 0s
mat[index_2][index_1] = mat[index_1][index_2] # kernel matrix is symmetric
return mat
else:
for index_1, point_1 in enumerate(x1_vec):
for index_2, point_2 in enumerate(x2_vec):
circuit_1 = self._feature_map_circuit(
x=point_1, parameters=parameters, name="{}_{}".format(index_1, index_2)
)
circuit_2 = self._feature_map_circuit(
x=point_2, parameters=parameters, inverse=True
)
circuit = circuit_1.compose(circuit_2)
circuit.measure_all()
experiments.append(circuit)
experiments = transpile(
experiments, backend=self._backend, initial_layout=self._initial_layout
)
program_data = self._backend.run(experiments, shots=8192).result()
self.results["program_data"] = program_data
mat = np.zeros((len(x1_vec), len(x2_vec)))
i = 0
for index_1, _ in enumerate(x1_vec):
for index_2, _ in enumerate(x2_vec):
counts = program_data.get_counts(experiment=i)
shots = sum(counts.values())
mat[index_1][index_2] = counts.get(measurement_basis, 0) / shots
i += 1
return mat
class QKA:
"""The quantum kernel alignment algorithm."""
def __init__(self, feature_map, backend, initial_layout=None, user_messenger=None):
"""Constructor.
Args:
feature_map (partial obj): the quantum feature map object
backend (Backend): the backend instance
initial_layout (list or dict): initial position of virtual qubits on
the physical qubits of the quantum device
user_messenger (UserMessenger): used to publish interim results.
"""
self.feature_map = feature_map
self.feature_map_circuit = self.feature_map.construct_circuit
self.backend = backend
self.initial_layout = initial_layout
self.num_parameters = self.feature_map._num_parameters
self._user_messenger = user_messenger
self.result = {}
self.kernel_matrix = KernelMatrix(
feature_map=self.feature_map, backend=self.backend, initial_layout=self.initial_layout
)
def spsa_parameters(self):
"""Return array of precomputed SPSA parameters.
The i-th optimization step, i>=0, the parameters evolve as
a_i = a / (i + 1 + A) ** alpha,
c_i = c / (i + 1) ** gamma,
for fixed coefficents a, c, alpha, gamma, A.
Returns:
numpy.ndarray: spsa parameters
"""
spsa_params = np.zeros((5))
spsa_params[0] = 0.05 # a
spsa_params[1] = 0.1 # c
spsa_params[2] = 0.602 # alpha
spsa_params[3] = 0.101 # gamma
spsa_params[4] = 0 # A
return spsa_params
def cvxopt_solver(self, K, y, C, max_iters=10000, show_progress=False):
"""Convex optimization of SVM objective using cvxopt.
Args:
K (numpy.ndarray): nxn kernel (Gram) matrix
y (numpy.ndarray): nx1 vector of labels +/-1
C (float): soft-margin penalty
max_iters (int): maximum iterations for the solver
show_progress (bool): print progress of solver
Returns:
dict: results from the solver
"""
if y.ndim == 1:
y = y[:, np.newaxis]
H = np.outer(y, y) * K
f = -np.ones(y.shape)
n = K.shape[1] # number of training points
y = y.astype("float")
P = matrix(H)
q = matrix(f)
G = matrix(np.vstack((-np.eye((n)), np.eye((n)))))
h = matrix(np.vstack((np.zeros((n, 1)), np.ones((n, 1)) * C)))
A = matrix(y, y.T.shape)
b = matrix(np.zeros(1), (1, 1))
solvers.options["maxiters"] = max_iters
solvers.options["show_progress"] = show_progress
ret = solvers.qp(P, q, G, h, A, b, kktsolver="ldl")
return ret
def spsa_step_one(self, lambdas, spsa_params, count):
"""Evaluate +/- perturbations of kernel parameters (lambdas).
Args:
lambdas (numpy.ndarray): kernel parameters at step 'count' in SPSA optimization loop
spsa_params (numpy.ndarray): SPSA parameters
count (int): the current step in the SPSA optimization loop
Returns:
numpy.ndarray: kernel parameters in + direction
numpy.ndarray: kernel parameters in - direction
numpy.ndarray: random vector with elements {-1,1}
"""
prng = RandomState(count)
c_spsa = float(spsa_params[1]) / np.power(count + 1, spsa_params[3])
delta = 2 * prng.randint(0, 2, size=np.shape(lambdas)[0]) - 1
lambda_plus = lambdas + c_spsa * delta
lambda_minus = lambdas - c_spsa * delta
return lambda_plus, lambda_minus, delta
def spsa_step_two(self, cost_plus, cost_minus, lambdas, spsa_params, delta, count):
"""Evaluate one iteration of SPSA on SVM objective function F and
return updated kernel parameters.
F(alpha, lambda) = 1^T * alpha - (1/2) * alpha^T * Y * K * Y * alpha
Args:
cost_plus (float): objective function F(alpha_+, lambda_+)
cost_minus (float): objective function F(alpha_-, lambda_-)
lambdas (numpy.ndarray): kernel parameters at step 'count' in SPSA optimization loop
spsa_params (numpy.ndarray): SPSA parameters
delta (numpy.ndarray): random vector with elements {-1,1}
count(int): the current step in the SPSA optimization loop
Returns:
float: estimate of updated SVM objective function F using average
of F(alpha_+, lambda_+) and F(alpha_-, lambda_-)
numpy.ndarray: updated values of the kernel parameters
after one SPSA optimization step
"""
a_spsa = float(spsa_params[0]) / np.power(count + 1 + spsa_params[4], spsa_params[2])
c_spsa = float(spsa_params[1]) / np.power(count + 1, spsa_params[3])
g_spsa = (cost_plus - cost_minus) * delta / (2.0 * c_spsa)
lambdas_new = lambdas - a_spsa * g_spsa
lambdas_new = lambdas_new.flatten()
cost_final = (cost_plus + cost_minus) / 2
return cost_final, lambdas_new
def align_kernel(self, data, labels, initial_kernel_parameters=None, maxiters=1, C=1):
"""Align the quantum kernel.
Uses SPSA for minimization over kernel parameters (lambdas) and
convex optimization for maximization over lagrange multipliers (alpha):
min_lambda max_alpha 1^T * alpha - (1/2) * alpha^T * Y * K_lambda * Y * alpha
Args:
data (numpy.ndarray): NxD array of training data, where N is the
number of samples and D is the feature dimension
labels (numpy.ndarray): Nx1 array of +/-1 labels of the N training samples
initial_kernel_parameters (numpy.ndarray): Initial parameters of the quantum kernel
maxiters (int): number of SPSA optimization steps
C (float): penalty parameter for the soft-margin support vector machine
Returns:
dict: the results of kernel alignment
"""
if initial_kernel_parameters is not None:
lambdas = initial_kernel_parameters
else:
lambdas = np.random.uniform(-1.0, 1.0, size=(self.num_parameters))
spsa_params = self.spsa_parameters()
lambda_save = []
cost_final_save = []
for count in range(maxiters):
lambda_plus, lambda_minus, delta = self.spsa_step_one(
lambdas=lambdas, spsa_params=spsa_params, count=count
)
kernel_plus = self.kernel_matrix.construct_kernel_matrix(
x1_vec=data, x2_vec=data, parameters=lambda_plus
)
kernel_minus = self.kernel_matrix.construct_kernel_matrix(
x1_vec=data, x2_vec=data, parameters=lambda_minus
)
ret_plus = self.cvxopt_solver(K=kernel_plus, y=labels, C=C)
cost_plus = -1 * ret_plus["primal objective"]
ret_minus = self.cvxopt_solver(K=kernel_minus, y=labels, C=C)
cost_minus = -1 * ret_minus["primal objective"]
cost_final, lambda_best = self.spsa_step_two(
cost_plus=cost_plus,
cost_minus=cost_minus,
lambdas=lambdas,
spsa_params=spsa_params,
delta=delta,
count=count,
)
lambdas = lambda_best
interim_result = {"cost": cost_final, "kernel_parameters": lambdas}
self._user_messenger.publish(interim_result)
lambda_save.append(lambdas)
cost_final_save.append(cost_final)
# Evaluate aligned kernel matrix with optimized set of
# parameters averaged over last 10% of SPSA steps:
num_last_lambdas = int(len(lambda_save) * 0.10)
if num_last_lambdas > 0:
last_lambdas = np.array(lambda_save)[-num_last_lambdas:, :]
lambdas = np.sum(last_lambdas, axis=0) / num_last_lambdas
else:
lambdas = np.array(lambda_save)[-1, :]
kernel_best = self.kernel_matrix.construct_kernel_matrix(
x1_vec=data, x2_vec=data, parameters=lambdas
)
self.result["aligned_kernel_parameters"] = lambdas
self.result["aligned_kernel_matrix"] = kernel_best
return self.result
def main(backend, user_messenger, **kwargs):
"""Entry function."""
# Reconstruct the feature map object.
feature_map = kwargs.get("feature_map")
fm = FeatureMap.from_json(feature_map)
data = kwargs.get("data")
labels = kwargs.get("labels")
initial_kernel_parameters = kwargs.get("initial_kernel_parameters", None)
maxiters = kwargs.get("maxiters", 1)
C = kwargs.get("C", 1)
initial_layout = kwargs.get("initial_layout", None)
qka = QKA(
feature_map=fm,
backend=backend,
initial_layout=initial_layout,
user_messenger=user_messenger,
)
qka_results = qka.align_kernel(
data=data,
labels=labels,
initial_kernel_parameters=initial_kernel_parameters,
maxiters=maxiters,
C=C,
)
user_messenger.publish(qka_results, final=True)
| 37.165703
| 100
| 0.574317
|
c698a83d19f69e3335398b295bba1bbcadbbe595
| 126
|
py
|
Python
|
Python/Project Euler/Maximum Path Sum 1/max_sum.py
|
ZacJoffe/competitive-programming
|
8150c9e12198500d8f57c6281f268d8027e7c318
|
[
"MIT"
] | null | null | null |
Python/Project Euler/Maximum Path Sum 1/max_sum.py
|
ZacJoffe/competitive-programming
|
8150c9e12198500d8f57c6281f268d8027e7c318
|
[
"MIT"
] | null | null | null |
Python/Project Euler/Maximum Path Sum 1/max_sum.py
|
ZacJoffe/competitive-programming
|
8150c9e12198500d8f57c6281f268d8027e7c318
|
[
"MIT"
] | null | null | null |
file = open("path.txt")
path = []
for line in file:
path.append(line)
print(maxSum(path))
def maxSum(path):
| 11.454545
| 24
| 0.587302
|
961561eeab63fce882b809aaaed19634b1d9e923
| 7,344
|
py
|
Python
|
databaseConn.py
|
justin-oxford/dee-dee-reddit
|
9e7b7f15327984ac65bbf93c8e3154de3dc8699b
|
[
"MIT"
] | null | null | null |
databaseConn.py
|
justin-oxford/dee-dee-reddit
|
9e7b7f15327984ac65bbf93c8e3154de3dc8699b
|
[
"MIT"
] | null | null | null |
databaseConn.py
|
justin-oxford/dee-dee-reddit
|
9e7b7f15327984ac65bbf93c8e3154de3dc8699b
|
[
"MIT"
] | null | null | null |
import datetime
from mongoengine import *
import timeit
DB_URI = "MONGO"
# DEPRECIATED high/lows over different time-frames
class SpreadField(EmbeddedDocument):
high = FloatField(min_value=0.0, default=0.0)
low = FloatField(min_value=0.0, default=9999.0)
# These are the individual points containing time-related data
class PointField(EmbeddedDocument):
price = DecimalField(min_value=0.0)
price_h = DecimalField(min_value=0.0)
price_l = DecimalField(min_value=0.0)
price_o = DecimalField(min_value=0.0)
price_pc = DecimalField(min_value=0.0)
vol_1 = DecimalField(min_value=0.0)
vol_2 = DecimalField(min_value=0.0)
vol_3 = DecimalField(min_value=0.0)
vol_4 = DecimalField(min_value=0.0)
hi_1 = DecimalField(min_value=0.0)
lo_1 = DecimalField(min_value=0.0)
hi_2 = DecimalField(min_value=0.0)
lo_2 = DecimalField(min_value=0.0)
hot_mentions = IntField(min_value=0)
new_mentions = IntField(min_value=0)
comment_mentions = IntField(min_value=0)
poll_time = DateTimeField(default=datetime.datetime.now)
# dee dee ML stats
class DeeDeeMLStats(EmbeddedDocument):
delta_up = DecimalField(default=0.0)
delta_dn = DecimalField(default=0.0)
bull_ahead = DecimalField(default=0.0)
bear_ahead = DecimalField(default=0.0)
delta_cl = DecimalField(default=0.0)
close_up = DecimalField(default=0.0)
close_dn = DecimalField(default=0.0)
delta_o = DecimalField(default=0.0)
open_up = DecimalField(default=0.0)
open_dn = DecimalField(default=0.0)
# Data-type Definitions
class DeeDeeData(Document):
symbol = StringField(required=True)
date_added = DateTimeField(default=datetime.datetime.now)
daily_spread = EmbeddedDocumentField(SpreadField) # DEPRECIATED
weekly_spread = EmbeddedDocumentField(SpreadField) # DEPRECIATED
monthly_spread = EmbeddedDocumentField(SpreadField) # DEPRECIATED
points = EmbeddedDocumentListField(PointField)
is_active = BooleanField(required=True)
active_track = IntField(required=True)
r_index = IntField(required=True)
ml_stats = EmbeddedDocumentField(DeeDeeMLStats)
def db_connect():
connect(host=DB_URI)
def db_post(data):
db_connect()
for symbol in data:
# add if not in database
print(symbol)
start = timeit.default_timer()
R_INDEX = data[symbol]['point']['hot_mentions'] + data[symbol]['point']['new_mentions'] + data[symbol]['point'][
'comment_mentions']
if not DeeDeeData.objects(symbol=data[symbol]["symbol"]):
print(" Adding " + data[symbol]['symbol'] + " to the database.")
dep_spread = SpreadField( # initialize this to the price for the first pull
high=0.0,
low=0.0
)
point = PointField(
price=data[symbol]['point']['price'],
price_h=data[symbol]['point']['price_high'],
price_l=data[symbol]['point']['price_low'],
price_o=data[symbol]['point']['price_open'],
price_pc=data[symbol]['point']['price_pvcl'],
vol_1=data[symbol]['point']['volume_1'],
vol_2=data[symbol]['point']['volume_2'],
vol_3=data[symbol]['point']['volume_3'],
vol_4=data[symbol]['point']['volume_4'],
hi_1=data[symbol]['point']['high_1'],
lo_1=data[symbol]['point']['low_1'],
hi_2=data[symbol]['point']['high_2'],
lo_2=data[symbol]['point']['low_2'],
hot_mentions=data[symbol]['point']['hot_mentions'],
new_mentions=data[symbol]['point']['new_mentions'],
comment_mentions=data[symbol]['point']['comment_mentions'],
poll_time=datetime.datetime.now()
)
post_ml = DeeDeeMLStats(
delta_up=0.0,
delta_dn=0.0,
bull_ahead=0.0,
bear_ahead=0.0,
delta_cl=0.0,
close_up=0.0,
close_dn=0.0,
delta_o=0.0,
open_up=0.0,
open_dn=0.0
)
post = DeeDeeData(
symbol=data[symbol]['symbol'],
date_added=datetime.datetime.now(),
daily_spread=dep_spread,
weekly_spread=dep_spread,
monthly_spread=dep_spread,
points=[point],
is_active=True,
active_track=1,
r_index=R_INDEX,
ml_stats=post_ml
)
post.save()
stop = timeit.default_timer()
print(" ...Done (" + str(stop - start) + ")")
# update if is in database
else:
print(" Updating " + data[symbol]['symbol'] + "...")
point = PointField(
price=data[symbol]['point']['price'],
price_h=data[symbol]['point']['price_high'],
price_l=data[symbol]['point']['price_low'],
price_o=data[symbol]['point']['price_open'],
price_pc=data[symbol]['point']['price_pvcl'],
vol_1=data[symbol]['point']['volume_1'],
vol_2=data[symbol]['point']['volume_2'],
vol_3=data[symbol]['point']['volume_3'],
vol_4=data[symbol]['point']['volume_4'],
hi_1=data[symbol]['point']['high_1'],
lo_1=data[symbol]['point']['low_1'],
hi_2=data[symbol]['point']['high_2'],
lo_2=data[symbol]['point']['low_2'],
hot_mentions=data[symbol]['point']['hot_mentions'],
new_mentions=data[symbol]['point']['new_mentions'],
comment_mentions=data[symbol]['point']['comment_mentions'],
poll_time=datetime.datetime.now()
)
DeeDeeData.objects(symbol=data[symbol]['symbol']).update_one(push__points=point)
DeeDeeData.objects(symbol=data[symbol]['symbol']).update_one(is_active=True)
DeeDeeData.objects(symbol=data[symbol]['symbol']).update_one(active_track=1)
DeeDeeData.objects(symbol=data[symbol]['symbol']).update_one(r_index=R_INDEX)
stop = timeit.default_timer()
print(" ...Done (" + str(stop - start) + ")")
def db_post_ml_data(
symbol,
ml_data_delta_up,
ml_data_delta_dn,
ml_data_bull_ahead,
ml_data_bear_ahead,
ml_data_delta_cl,
ml_data_close_up,
ml_data_close_dn,
ml_data_delta_o,
ml_data_open_up,
ml_data_open_dn
):
post_ml = DeeDeeMLStats(
delta_up=ml_data_delta_up,
delta_dn=ml_data_delta_dn,
bull_ahead=ml_data_bull_ahead,
bear_ahead=ml_data_bear_ahead,
delta_cl=ml_data_delta_cl,
close_up=ml_data_close_up,
close_dn=ml_data_close_dn,
delta_o=ml_data_delta_o,
open_up=ml_data_open_up,
open_dn=ml_data_open_dn
)
DeeDeeData.objects(symbol=symbol).update_one(ml_stats=post_ml, upsert=True)
| 40.131148
| 121
| 0.580338
|
528c25064b8fcbb433350aca3e22f2b5c661e208
| 3,937
|
py
|
Python
|
src/Python/Images/BackgroundImage.py
|
sankhesh/vtk-examples
|
2d50e847ad62ce0eb71b66c029ad8abb302cd39f
|
[
"Apache-2.0"
] | null | null | null |
src/Python/Images/BackgroundImage.py
|
sankhesh/vtk-examples
|
2d50e847ad62ce0eb71b66c029ad8abb302cd39f
|
[
"Apache-2.0"
] | null | null | null |
src/Python/Images/BackgroundImage.py
|
sankhesh/vtk-examples
|
2d50e847ad62ce0eb71b66c029ad8abb302cd39f
|
[
"Apache-2.0"
] | 1
|
2022-02-16T08:20:41.000Z
|
2022-02-16T08:20:41.000Z
|
#!/usr/bin/env python
import vtk
def get_program_parameters():
import argparse
description = 'Add a background image at a render window.'
epilogue = '''
Add a background image to a render window.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue)
parser.add_argument('filename', default=None, type=str, nargs='?', help='A required filename.')
args = parser.parse_args()
return args.filename
def main():
colors = vtk.vtkNamedColors()
# Verify input arguments
fn = get_program_parameters()
if fn:
# Read the image
jpeg_reader = vtk.vtkJPEGReader()
if not jpeg_reader.CanReadFile(fn):
print('Error reading file:', fn)
return
jpeg_reader.SetFileName(fn)
jpeg_reader.Update()
image_data = jpeg_reader.GetOutput()
else:
canvas_source = vtk.vtkImageCanvasSource2D()
canvas_source.SetExtent(0, 100, 0, 100, 0, 0)
canvas_source.SetScalarTypeToUnsignedChar()
canvas_source.SetNumberOfScalarComponents(3)
canvas_source.SetDrawColor(colors.GetColor4ub('warm_grey'))
canvas_source.FillBox(0, 100, 0, 100)
canvas_source.SetDrawColor(colors.GetColor4ub('DarkCyan'))
canvas_source.FillTriangle(10, 10, 25, 10, 25, 25)
canvas_source.SetDrawColor(colors.GetColor4ub('LightCoral'))
canvas_source.FillTube(75, 75, 0, 75, 5.0)
canvas_source.Update()
image_data = canvas_source.GetOutput()
# Create an image actor to display the image
image_actor = vtk.vtkImageActor()
image_actor.SetInputData(image_data)
# Create a renderer to display the image in the background
background_renderer = vtk.vtkRenderer()
# Create a superquadric
superquadric_source = vtk.vtkSuperquadricSource()
superquadric_source.SetPhiRoundness(1.1)
superquadric_source.SetThetaRoundness(.2)
# Create a mapper and actor
superquadric_mapper = vtk.vtkPolyDataMapper()
superquadric_mapper.SetInputConnection(superquadric_source.GetOutputPort())
superquadric_actor = vtk.vtkActor()
superquadric_actor.SetMapper(superquadric_mapper)
superquadric_actor.GetProperty().SetColor(colors.GetColor3d('NavajoWhite'))
scene_renderer = vtk.vtkRenderer()
render_window = vtk.vtkRenderWindow()
# Set up the render window and renderers such that there is
# a background layer and a foreground layer
background_renderer.SetLayer(0)
background_renderer.InteractiveOff()
scene_renderer.SetLayer(1)
render_window.SetNumberOfLayers(2)
render_window.AddRenderer(background_renderer)
render_window.AddRenderer(scene_renderer)
render_window.SetWindowName('BackgroundImage')
render_window_interactor = vtk.vtkRenderWindowInteractor()
render_window_interactor.SetRenderWindow(render_window)
# Add actors to the renderers
scene_renderer.AddActor(superquadric_actor)
background_renderer.AddActor(image_actor)
# Render once to figure out where the background camera will be
render_window.Render()
# Set up the background camera to fill the renderer with the image
origin = image_data.GetOrigin()
spacing = image_data.GetSpacing()
extent = image_data.GetExtent()
camera = background_renderer.GetActiveCamera()
camera.ParallelProjectionOn()
xc = origin[0] + 0.5 * (extent[0] + extent[1]) * spacing[0]
yc = origin[1] + 0.5 * (extent[2] + extent[3]) * spacing[1]
# xd = (extent[1] - extent[0] + 1) * spacing[0]
yd = (extent[3] - extent[2] + 1) * spacing[1]
d = camera.GetDistance()
camera.SetParallelScale(0.5 * yd)
camera.SetFocalPoint(xc, yc, 0.0)
camera.SetPosition(xc, yc, d)
# Render again to set the correct view
render_window.Render()
# Interact with the window
render_window_interactor.Start()
if __name__ == '__main__':
main()
| 33.939655
| 99
| 0.705359
|
025f9a29b17e1bd7ddc17157570d887c020aeea8
| 794
|
py
|
Python
|
challenge_2/python/lepmets/src/challenge_2.py
|
rchicoli/2017-challenges
|
44f0b672e5dea34de1dde131b6df837d462f8e29
|
[
"Apache-2.0"
] | 271
|
2017-01-01T22:58:36.000Z
|
2021-11-28T23:05:29.000Z
|
challenge_2/python/lepmets/src/challenge_2.py
|
AakashOfficial/2017Challenges
|
a8f556f1d5b43c099a0394384c8bc2d826f9d287
|
[
"Apache-2.0"
] | 283
|
2017-01-01T23:26:05.000Z
|
2018-03-23T00:48:55.000Z
|
challenge_2/python/lepmets/src/challenge_2.py
|
AakashOfficial/2017Challenges
|
a8f556f1d5b43c099a0394384c8bc2d826f9d287
|
[
"Apache-2.0"
] | 311
|
2017-01-01T22:59:23.000Z
|
2021-09-23T00:29:12.000Z
|
#! /usr/bin/python3
"""Challenge #2 in Python."""
# coding: utf-8
list_with_chars = [2, 'a', 'l', 3, 'l', 4, 'k', 2, 3, 4, 'a', 6,
'c', 4, 'm', 6, 'm', 'k', 9, 10, 9, 8, 7, 8, 10, 7]
list_with_numbers = [2, 3, 4, 2, 3, 5, 4, 6, 4, 6, 9, 10, 9, 8, 7, 8, 10, 7]
def search_list(list_provided):
"""Search list provided for characters that are represented only once."""
for i in range(len(list_provided) - 1):
if not list_provided[i] in list_provided[i + 1:] and not list_provided[i] in list_provided[:i]:
"""If the same number is not present before or after in the list then
return the number"""
return str(list_provided[i])
break
print(search_list(list_with_numbers) + ', ' + search_list(list_with_chars))
| 34.521739
| 103
| 0.579345
|
9fd8b989c310122879a79e00ce4f11a7ad6fe8af
| 2,593
|
py
|
Python
|
SprityBird/spritybird/python3.5/lib/python3.5/site-packages/ggplot/__init__.py
|
MobileAnalytics/iPython-Framework
|
da0e598308c067cd5c5290a6364b3ffaf2d2418f
|
[
"MIT"
] | 4
|
2018-07-04T17:20:12.000Z
|
2019-07-14T18:07:25.000Z
|
SprityBird/spritybird/python3.5/lib/python3.5/site-packages/ggplot/__init__.py
|
MobileAnalytics/iPython-Framework
|
da0e598308c067cd5c5290a6364b3ffaf2d2418f
|
[
"MIT"
] | null | null | null |
SprityBird/spritybird/python3.5/lib/python3.5/site-packages/ggplot/__init__.py
|
MobileAnalytics/iPython-Framework
|
da0e598308c067cd5c5290a6364b3ffaf2d2418f
|
[
"MIT"
] | 1
|
2018-09-03T03:02:06.000Z
|
2018-09-03T03:02:06.000Z
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__version__ = '0.11.1'
# For testing purposes we might need to set mpl backend before any
# other import of matplotlib.
def _set_mpl_backend():
import os
import matplotlib as mpl
env_backend = os.environ.get('MATPLOTLIB_BACKEND')
if env_backend:
# we were instructed
mpl.use(env_backend)
_set_mpl_backend()
from .geoms import geom_area, geom_blank, geom_boxplot, geom_line, geom_point, geom_jitter, geom_histogram, geom_density, geom_hline, geom_vline, geom_bar, geom_abline, geom_tile, geom_rect, geom_bin2d, geom_step, geom_text, geom_path, geom_ribbon, geom_now_its_art, geom_violin, geom_errorbar, geom_polygon
from .stats import stat_smooth, stat_density
from .facets import facet_wrap, facet_grid, Facet
from .chart_components import ggtitle, xlim, ylim, xlab, ylab, labs
from .ggplot import ggplot
from .qplot import qplot
from .aes import aes
from .coords.coords import coord_polar, coord_equal, coord_flip
from .datasets import chopsticks, diamonds, mtcars, meat, pageviews, pigeons, movies, mpg, salmon, load_world
from .scales.scale_color_brewer import scale_color_brewer
from .scales.scale_color_crayon import scale_color_crayon
from .scales.scale_color_funfetti import scale_color_funfetti
from .scales.scale_color_manual import scale_color_manual
from .scales.scale_color_gradient import scale_color_gradient
from .scales.scale_color_yhat import scale_color_yhat
from .scales.scale_fill_brewer import scale_fill_brewer
from .scales.scale_fill_crayon import scale_fill_crayon
from .scales.scale_fill_funfetti import scale_fill_funfetti
from .scales.scale_fill_manual import scale_fill_manual
from .scales.scale_fill_yhat import scale_fill_yhat
from .scales.scale_identity import scale_identity, scale_alpha_identity, scale_color_identity, scale_fill_identity, scale_linetype_identity, scale_shape_identity, scale_size_identity
from .scales.scale_log import scale_x_log, scale_y_log
from .scales.scale_reverse import scale_x_reverse, scale_y_reverse
from .scales.scale_x_continuous import scale_x_continuous
from .scales.scale_y_continuous import scale_y_continuous
from .scales.scale_x_discrete import scale_x_discrete
from .scales.scale_y_discrete import scale_y_discrete
from .scales.scale_x_date import scale_x_date
from .scales.scale_y_date import scale_y_date
from .scales.date_utils import date_format, date_breaks
from .themes import theme, theme_538, theme_gray, theme_bw, theme_xkcd
from .themes import element_text
| 43.216667
| 307
| 0.831469
|
4cd101cf9b2dce2b225ac9ea266afde530ec3f1e
| 1,846
|
py
|
Python
|
amadeus/namespaces/_shopping.py
|
siddydutta/amadeus-python
|
7e8b399e22c9ff4c3e4557843b52ea14844f135a
|
[
"MIT"
] | null | null | null |
amadeus/namespaces/_shopping.py
|
siddydutta/amadeus-python
|
7e8b399e22c9ff4c3e4557843b52ea14844f135a
|
[
"MIT"
] | null | null | null |
amadeus/namespaces/_shopping.py
|
siddydutta/amadeus-python
|
7e8b399e22c9ff4c3e4557843b52ea14844f135a
|
[
"MIT"
] | null | null | null |
from amadeus.client.decorator import Decorator
from amadeus.shopping._flight_dates import FlightDates
from amadeus.shopping._flight_destinations import FlightDestinations
from amadeus.shopping._flight_offers import FlightOffers
from amadeus.shopping._flight_offers_search import FlightOffersSearch
from amadeus.shopping._hotel_offers import HotelOffers
from amadeus.shopping._hotel_offers_by_hotel import HotelOffersByHotel
from amadeus.shopping._hotel_offer import HotelOffer
from amadeus.shopping._seatmaps import Seatmaps
from amadeus.shopping._activities import Activities
from amadeus.shopping._activity import Activity
from amadeus.shopping._availability import Availability
from amadeus.shopping._hotel_offer_search import HotelOfferSearch
from amadeus.shopping._hotel_offers_search import HotelOffersSearch
class Shopping(Decorator, object):
def __init__(self, client):
Decorator.__init__(self, client)
self.flight_dates = FlightDates(client)
self.flight_destinations = FlightDestinations(client)
self.flight_offers = FlightOffers(client)
self.hotel_offers = HotelOffers(client)
self.hotel_offers_by_hotel = HotelOffersByHotel(client)
self.flight_offers_search = FlightOffersSearch(client)
self.seatmaps = Seatmaps(client)
self.activities = Activities(client)
self.availability = Availability(client)
self.hotel_offers_search = HotelOffersSearch(client)
def hotel_offer(self, offer_id):
return HotelOffer(self.client, offer_id)
def hotel_offer_search(self, offer_id):
return HotelOfferSearch(self.client, offer_id)
def activity(self, activity_id):
return Activity(self.client, activity_id)
__all__ = ['FlightDates', 'FlightDestinations', 'FlightOffers',
'FlightOffersSearch', 'Availability']
| 42.930233
| 70
| 0.790899
|
9c3d73b43b55fad51a99616d9377303bbc21ec24
| 16,586
|
py
|
Python
|
homeassistant/components/tplink/light.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 6
|
2020-07-18T16:33:25.000Z
|
2021-09-26T09:52:04.000Z
|
homeassistant/components/tplink/light.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 47
|
2020-07-23T07:14:33.000Z
|
2022-03-31T06:01:46.000Z
|
homeassistant/components/tplink/light.py
|
klauern/home-assistant-core
|
c18ba6aec0627e6afb6442c678edb5ff2bb17db6
|
[
"Apache-2.0"
] | 5
|
2020-03-29T00:29:13.000Z
|
2021-09-06T20:58:40.000Z
|
"""Support for TPLink lights."""
from datetime import timedelta
import logging
import time
from typing import Any, Dict, NamedTuple, Tuple, cast
from pyHS100 import SmartBulb, SmartDeviceException
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
LightEntity,
)
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.color import (
color_temperature_kelvin_to_mired as kelvin_to_mired,
color_temperature_mired_to_kelvin as mired_to_kelvin,
)
import homeassistant.util.dt as dt_util
from . import CONF_LIGHT, DOMAIN as TPLINK_DOMAIN
from .common import async_add_entities_retry
PARALLEL_UPDATES = 0
SCAN_INTERVAL = timedelta(seconds=5)
CURRENT_POWER_UPDATE_INTERVAL = timedelta(seconds=60)
HISTORICAL_POWER_UPDATE_INTERVAL = timedelta(minutes=60)
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_POWER_W = "current_power_w"
ATTR_DAILY_ENERGY_KWH = "daily_energy_kwh"
ATTR_MONTHLY_ENERGY_KWH = "monthly_energy_kwh"
LIGHT_STATE_DFT_ON = "dft_on_state"
LIGHT_STATE_ON_OFF = "on_off"
LIGHT_STATE_RELAY_STATE = "relay_state"
LIGHT_STATE_BRIGHTNESS = "brightness"
LIGHT_STATE_COLOR_TEMP = "color_temp"
LIGHT_STATE_HUE = "hue"
LIGHT_STATE_SATURATION = "saturation"
LIGHT_STATE_ERROR_MSG = "err_msg"
LIGHT_SYSINFO_MAC = "mac"
LIGHT_SYSINFO_ALIAS = "alias"
LIGHT_SYSINFO_MODEL = "model"
LIGHT_SYSINFO_IS_DIMMABLE = "is_dimmable"
LIGHT_SYSINFO_IS_VARIABLE_COLOR_TEMP = "is_variable_color_temp"
LIGHT_SYSINFO_IS_COLOR = "is_color"
async def async_setup_entry(hass: HomeAssistantType, config_entry, async_add_entities):
"""Set up switches."""
await async_add_entities_retry(
hass, async_add_entities, hass.data[TPLINK_DOMAIN][CONF_LIGHT], add_entity
)
return True
def add_entity(device: SmartBulb, async_add_entities):
"""Check if device is online and add the entity."""
# Attempt to get the sysinfo. If it fails, it will raise an
# exception that is caught by async_add_entities_retry which
# will try again later.
device.get_sysinfo()
async_add_entities([TPLinkSmartBulb(device)], update_before_add=True)
def brightness_to_percentage(byt):
"""Convert brightness from absolute 0..255 to percentage."""
return round((byt * 100.0) / 255.0)
def brightness_from_percentage(percent):
"""Convert percentage to absolute value 0..255."""
return round((percent * 255.0) / 100.0)
class LightState(NamedTuple):
"""Light state."""
state: bool
brightness: int
color_temp: float
hs: Tuple[int, int]
def to_param(self):
"""Return a version that we can send to the bulb."""
if self.color_temp:
color_temp = mired_to_kelvin(self.color_temp)
else:
color_temp = None
return {
LIGHT_STATE_ON_OFF: 1 if self.state else 0,
LIGHT_STATE_BRIGHTNESS: brightness_to_percentage(self.brightness),
LIGHT_STATE_COLOR_TEMP: color_temp,
LIGHT_STATE_HUE: self.hs[0] if self.hs else 0,
LIGHT_STATE_SATURATION: self.hs[1] if self.hs else 0,
}
class LightFeatures(NamedTuple):
"""Light features."""
sysinfo: Dict[str, Any]
mac: str
alias: str
model: str
supported_features: int
min_mireds: float
max_mireds: float
has_emeter: bool
class TPLinkSmartBulb(LightEntity):
"""Representation of a TPLink Smart Bulb."""
def __init__(self, smartbulb: SmartBulb) -> None:
"""Initialize the bulb."""
self.smartbulb = smartbulb
self._light_features = cast(LightFeatures, None)
self._light_state = cast(LightState, None)
self._is_available = True
self._is_setting_light_state = False
self._last_current_power_update = None
self._last_historical_power_update = None
self._emeter_params = {}
@property
def unique_id(self):
"""Return a unique ID."""
return self._light_features.mac
@property
def name(self):
"""Return the name of the Smart Bulb."""
return self._light_features.alias
@property
def device_info(self):
"""Return information about the device."""
return {
"name": self._light_features.alias,
"model": self._light_features.model,
"manufacturer": "TP-Link",
"connections": {(dr.CONNECTION_NETWORK_MAC, self._light_features.mac)},
"sw_version": self._light_features.sysinfo["sw_ver"],
}
@property
def available(self) -> bool:
"""Return if bulb is available."""
return self._is_available
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._emeter_params
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
brightness = int(kwargs[ATTR_BRIGHTNESS])
elif self._light_state.brightness is not None:
brightness = self._light_state.brightness
else:
brightness = 255
if ATTR_COLOR_TEMP in kwargs:
color_tmp = int(kwargs[ATTR_COLOR_TEMP])
else:
color_tmp = self._light_state.color_temp
if ATTR_HS_COLOR in kwargs:
# TP-Link requires integers.
hue_sat = tuple(int(val) for val in kwargs[ATTR_HS_COLOR])
# TP-Link cannot have both color temp and hue_sat
color_tmp = 0
else:
hue_sat = self._light_state.hs
await self._async_set_light_state_retry(
self._light_state,
self._light_state._replace(
state=True, brightness=brightness, color_temp=color_tmp, hs=hue_sat,
),
)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._async_set_light_state_retry(
self._light_state, self._light_state._replace(state=False),
)
@property
def min_mireds(self):
"""Return minimum supported color temperature."""
return self._light_features.min_mireds
@property
def max_mireds(self):
"""Return maximum supported color temperature."""
return self._light_features.max_mireds
@property
def color_temp(self):
"""Return the color temperature of this light in mireds for HA."""
return self._light_state.color_temp
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._light_state.brightness
@property
def hs_color(self):
"""Return the color."""
return self._light_state.hs
@property
def is_on(self):
"""Return True if device is on."""
return self._light_state.state
def update(self):
"""Update the TP-Link Bulb's state."""
# State is currently being set, ignore.
if self._is_setting_light_state:
return
try:
# Update light features only once.
if not self._light_features:
self._light_features = self._get_light_features_retry()
self._light_state = self._get_light_state_retry()
self._is_available = True
except (SmartDeviceException, OSError) as ex:
if self._is_available:
_LOGGER.warning(
"Could not read data for %s: %s", self.smartbulb.host, ex
)
self._is_available = False
@property
def supported_features(self):
"""Flag supported features."""
return self._light_features.supported_features
def _get_light_features_retry(self) -> LightFeatures:
"""Retry the retrieval of the supported features."""
try:
return self._get_light_features()
except (SmartDeviceException, OSError):
pass
_LOGGER.debug("Retrying getting light features")
return self._get_light_features()
def _get_light_features(self):
"""Determine all supported features in one go."""
sysinfo = self.smartbulb.sys_info
supported_features = 0
# Calling api here as it reformats
mac = self.smartbulb.mac
alias = sysinfo[LIGHT_SYSINFO_ALIAS]
model = sysinfo[LIGHT_SYSINFO_MODEL]
min_mireds = None
max_mireds = None
has_emeter = self.smartbulb.has_emeter
if sysinfo.get(LIGHT_SYSINFO_IS_DIMMABLE) or LIGHT_STATE_BRIGHTNESS in sysinfo:
supported_features += SUPPORT_BRIGHTNESS
if sysinfo.get(LIGHT_SYSINFO_IS_VARIABLE_COLOR_TEMP):
supported_features += SUPPORT_COLOR_TEMP
# Have to make another api request here in
# order to not re-implement pyHS100 here
max_range, min_range = self.smartbulb.valid_temperature_range
min_mireds = kelvin_to_mired(min_range)
max_mireds = kelvin_to_mired(max_range)
if sysinfo.get(LIGHT_SYSINFO_IS_COLOR):
supported_features += SUPPORT_COLOR
return LightFeatures(
sysinfo=sysinfo,
mac=mac,
alias=alias,
model=model,
supported_features=supported_features,
min_mireds=min_mireds,
max_mireds=max_mireds,
has_emeter=has_emeter,
)
def _get_light_state_retry(self) -> LightState:
"""Retry the retrieval of getting light states."""
try:
return self._get_light_state()
except (SmartDeviceException, OSError):
pass
_LOGGER.debug("Retrying getting light state")
return self._get_light_state()
def _light_state_from_params(self, light_state_params) -> LightState:
brightness = None
color_temp = None
hue_saturation = None
light_features = self._light_features
state = bool(light_state_params[LIGHT_STATE_ON_OFF])
if not state and LIGHT_STATE_DFT_ON in light_state_params:
light_state_params = light_state_params[LIGHT_STATE_DFT_ON]
if light_features.supported_features & SUPPORT_BRIGHTNESS:
brightness = brightness_from_percentage(
light_state_params[LIGHT_STATE_BRIGHTNESS]
)
if light_features.supported_features & SUPPORT_COLOR_TEMP:
if (
light_state_params.get(LIGHT_STATE_COLOR_TEMP) is not None
and light_state_params[LIGHT_STATE_COLOR_TEMP] != 0
):
color_temp = kelvin_to_mired(light_state_params[LIGHT_STATE_COLOR_TEMP])
if light_features.supported_features & SUPPORT_COLOR:
hue_saturation = (
light_state_params[LIGHT_STATE_HUE],
light_state_params[LIGHT_STATE_SATURATION],
)
return LightState(
state=state,
brightness=brightness,
color_temp=color_temp,
hs=hue_saturation,
)
def _get_light_state(self) -> LightState:
"""Get the light state."""
self._update_emeter()
return self._light_state_from_params(self._get_device_state())
def _update_emeter(self):
if not self._light_features.has_emeter:
return
now = dt_util.utcnow()
if (
not self._last_current_power_update
or self._last_current_power_update + CURRENT_POWER_UPDATE_INTERVAL < now
):
self._last_current_power_update = now
self._emeter_params[ATTR_CURRENT_POWER_W] = "{:.1f}".format(
self.smartbulb.current_consumption()
)
if (
not self._last_historical_power_update
or self._last_historical_power_update + HISTORICAL_POWER_UPDATE_INTERVAL
< now
):
self._last_historical_power_update = now
daily_statistics = self.smartbulb.get_emeter_daily()
monthly_statistics = self.smartbulb.get_emeter_monthly()
try:
self._emeter_params[ATTR_DAILY_ENERGY_KWH] = "{:.3f}".format(
daily_statistics[int(time.strftime("%d"))]
)
self._emeter_params[ATTR_MONTHLY_ENERGY_KWH] = "{:.3f}".format(
monthly_statistics[int(time.strftime("%m"))]
)
except KeyError:
# device returned no daily/monthly history
pass
async def _async_set_light_state_retry(
self, old_light_state: LightState, new_light_state: LightState
) -> None:
"""Set the light state with retry."""
# Tell the device to set the states.
if not _light_state_diff(old_light_state, new_light_state):
# Nothing to do, avoid the executor
return
self._is_setting_light_state = True
try:
light_state_params = await self.hass.async_add_executor_job(
self._set_light_state, old_light_state, new_light_state
)
self._is_available = True
self._is_setting_light_state = False
if LIGHT_STATE_ERROR_MSG in light_state_params:
raise HomeAssistantError(light_state_params[LIGHT_STATE_ERROR_MSG])
self._light_state = self._light_state_from_params(light_state_params)
return
except (SmartDeviceException, OSError):
pass
try:
_LOGGER.debug("Retrying setting light state")
light_state_params = await self.hass.async_add_executor_job(
self._set_light_state, old_light_state, new_light_state
)
self._is_available = True
if LIGHT_STATE_ERROR_MSG in light_state_params:
raise HomeAssistantError(light_state_params[LIGHT_STATE_ERROR_MSG])
self._light_state = self._light_state_from_params(light_state_params)
except (SmartDeviceException, OSError) as ex:
self._is_available = False
_LOGGER.warning("Could not set data for %s: %s", self.smartbulb.host, ex)
self._is_setting_light_state = False
def _set_light_state(
self, old_light_state: LightState, new_light_state: LightState
) -> None:
"""Set the light state."""
diff = _light_state_diff(old_light_state, new_light_state)
if not diff:
return
return self._set_device_state(diff)
def _get_device_state(self):
"""State of the bulb or smart dimmer switch."""
if isinstance(self.smartbulb, SmartBulb):
return self.smartbulb.get_light_state()
sysinfo = self.smartbulb.sys_info
# Its not really a bulb, its a dimmable SmartPlug (aka Wall Switch)
return {
LIGHT_STATE_ON_OFF: sysinfo[LIGHT_STATE_RELAY_STATE],
LIGHT_STATE_BRIGHTNESS: sysinfo.get(LIGHT_STATE_BRIGHTNESS, 0),
LIGHT_STATE_COLOR_TEMP: 0,
LIGHT_STATE_HUE: 0,
LIGHT_STATE_SATURATION: 0,
}
def _set_device_state(self, state):
"""Set state of the bulb or smart dimmer switch."""
if isinstance(self.smartbulb, SmartBulb):
return self.smartbulb.set_light_state(state)
# Its not really a bulb, its a dimmable SmartPlug (aka Wall Switch)
if LIGHT_STATE_BRIGHTNESS in state:
# Brightness of 0 is accepted by the
# device but the underlying library rejects it
# so we turn off instead.
if state[LIGHT_STATE_BRIGHTNESS]:
self.smartbulb.brightness = state[LIGHT_STATE_BRIGHTNESS]
else:
self.smartbulb.state = self.smartbulb.SWITCH_STATE_OFF
elif LIGHT_STATE_ON_OFF in state:
if state[LIGHT_STATE_ON_OFF]:
self.smartbulb.state = self.smartbulb.SWITCH_STATE_ON
else:
self.smartbulb.state = self.smartbulb.SWITCH_STATE_OFF
return self._get_device_state()
def _light_state_diff(old_light_state: LightState, new_light_state: LightState):
old_state_param = old_light_state.to_param()
new_state_param = new_light_state.to_param()
return {
key: value
for key, value in new_state_param.items()
if new_state_param.get(key) != old_state_param.get(key)
}
| 34.339545
| 88
| 0.652659
|
5ac529cff782c815d32adafb4c68001df84c1cd4
| 3,377
|
py
|
Python
|
smtk/attribute/testing/python/vectorExpressionTest.py
|
jcfr/SMTK
|
0069ea37f8f71a440b8f10a157b84a56ca004551
|
[
"BSD-3-Clause-Clear"
] | 40
|
2015-02-21T19:55:54.000Z
|
2022-01-06T13:13:05.000Z
|
smtk/attribute/testing/python/vectorExpressionTest.py
|
jcfr/SMTK
|
0069ea37f8f71a440b8f10a157b84a56ca004551
|
[
"BSD-3-Clause-Clear"
] | 127
|
2015-01-15T20:55:45.000Z
|
2021-08-19T17:34:15.000Z
|
smtk/attribute/testing/python/vectorExpressionTest.py
|
jcfr/SMTK
|
0069ea37f8f71a440b8f10a157b84a56ca004551
|
[
"BSD-3-Clause-Clear"
] | 27
|
2015-03-04T14:17:51.000Z
|
2021-12-23T01:05:42.000Z
|
# =============================================================================
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See LICENSE.txt for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
# =============================================================================
"""Test vector expression."""
import smtk
from smtk import attribute
from smtk import io
from smtk import operation
import smtk.testing
import os
class TestVectorExpression(smtk.testing.TestCase):
def load_template(self):
resource = smtk.attribute.Resource.create()
logger = smtk.io.Logger()
reader = smtk.io.AttributeReader()
filename = os.path.join(smtk.testing.DATA_DIR, 'attribute',
'attribute_collection', 'VectorExpressionExample.sbt')
status = reader.read(resource, filename, logger)
print(
'\n'.join([logger.record(i).message for i in range(logger.numberOfRecords())]))
self.assertFalse(status, 'Could not read {}'.format(filename))
return resource
def build_resource(self, resource, smtk_path):
# Create expression att
exp_def = resource.findDefinition('vector-function')
exp_att = resource.createAttribute('velocity_function1', exp_def)
# Create bc att
bc_def = resource.findDefinition('boundary-condition')
bc_att = resource.createAttribute('velocity_bc', bc_def)
velocity_item = bc_att.findDouble('velocity')
self.assertTrue(velocity_item.setExpression(exp_att))
self.assertTrue(velocity_item.isSet())
# Write resource
resource.setLocation(smtk_path)
writer = smtk.attribute.Write.create()
writer.parameters().associate(resource)
self.assertTrue(writer.ableToOperate())
result = writer.operate()
outcome = result.findInt('outcome').value()
self.assertEqual(outcome, int(
smtk.operation.Operation.Outcome.SUCCEEDED))
print('Wrote', smtk_path)
def check_resource(self, smtk_path):
reader = smtk.attribute.Read.create()
reader.parameters().findFile('filename').setValue(smtk_path)
result = reader.operate()
outcome = result.findInt('outcome').value()
self.assertEqual(outcome, int(
smtk.operation.Operation.Outcome.SUCCEEDED))
input_resource = result.findResource('resource').value()
self.assertIsNotNone(input_resource)
bc_att = input_resource.findAttribute('velocity_bc')
velocity_item = bc_att.findDouble('velocity')
self.assertTrue(velocity_item.isSet())
self.assertTrue(velocity_item.isExpression())
def test_expression(self):
if smtk.testing.DATA_DIR == '':
self.skipTest('SMTK test-data directory not provided')
resource = self.load_template()
smtk_path = os.path.join(smtk.testing.TEMP_DIR, 'vector-example.smtk')
self.build_resource(resource, smtk_path)
resource = None
self.check_resource(smtk_path)
# Remove file
os.remove(smtk_path)
if __name__ == '__main__':
smtk.testing.process_arguments()
smtk.testing.main()
| 34.111111
| 91
| 0.639917
|
644136217ef0ae71e60380a51df2172878784762
| 1,897
|
py
|
Python
|
project/api/migrations/0002_auto_20160114_2106.py
|
rishatsharafiev/crm
|
e2c2070bb1875ca2a87ba320456f96832fd475a3
|
[
"MIT"
] | null | null | null |
project/api/migrations/0002_auto_20160114_2106.py
|
rishatsharafiev/crm
|
e2c2070bb1875ca2a87ba320456f96832fd475a3
|
[
"MIT"
] | null | null | null |
project/api/migrations/0002_auto_20160114_2106.py
|
rishatsharafiev/crm
|
e2c2070bb1875ca2a87ba320456f96832fd475a3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-14 18:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='employee',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='\u0421\u043e\u0442\u0440\u0443\u0434\u043d\u0438\u043a'),
),
migrations.AlterField(
model_name='project',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='\u0412\u043b\u0430\u0434\u0435\u043b\u0435\u0446'),
),
migrations.AlterField(
model_name='subdivision',
name='manager',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='manager', to=settings.AUTH_USER_MODEL, verbose_name='\u0420\u0443\u043a\u043e\u0432\u043e\u0434\u0438\u0442\u0435\u043b\u044c'),
),
migrations.AlterField(
model_name='task',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='owner', to=settings.AUTH_USER_MODEL, verbose_name='\u041f\u043e\u0441\u0442\u0430\u043d\u043e\u0432\u0449\u0438\u043a'),
),
migrations.AlterField(
model_name='task',
name='responsible',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='responsible', to=settings.AUTH_USER_MODEL, verbose_name='\u041e\u0442\u0432\u0435\u0442\u0441\u0442\u0432\u0435\u043d\u043d\u044b\u0439'),
),
]
| 44.116279
| 233
| 0.675804
|
f8355de182f03873a127065422a29f5c3be77cd8
| 7,188
|
py
|
Python
|
models/tcn.py
|
dwromero/ckconv
|
d44c6441a98792477d6259368c210089bb33fe7a
|
[
"MIT"
] | 74
|
2021-02-04T14:28:49.000Z
|
2022-03-23T16:12:18.000Z
|
models/tcn.py
|
dwromero/ckconv
|
d44c6441a98792477d6259368c210089bb33fe7a
|
[
"MIT"
] | 7
|
2021-02-28T03:29:12.000Z
|
2022-02-16T14:33:06.000Z
|
models/tcn.py
|
dwromero/ckconv
|
d44c6441a98792477d6259368c210089bb33fe7a
|
[
"MIT"
] | 6
|
2021-02-12T14:43:15.000Z
|
2021-08-11T02:42:31.000Z
|
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
import ckconv
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, : -self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(
self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2
):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(
nn.Conv1d(
n_inputs,
n_outputs,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
)
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(
nn.Conv1d(
n_outputs,
n_outputs,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
)
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(
self.conv1,
self.chomp1,
self.relu1,
self.dropout1,
self.conv2,
self.chomp2,
self.relu2,
self.dropout2,
)
# ### LayerNorm instead of WeightNorm (Used for experiments in appendix).
# super(TemporalBlock, self).__init__()
#
# self.conv1 = nn.Conv1d(
# n_inputs,
# n_outputs,
# kernel_size,
# stride=stride,
# padding=padding,
# dilation=dilation,
# )
#
# self.chomp1 = Chomp1d(padding)
# self.norm1 = ckconv.nn.LayerNorm(n_outputs)
# self.relu1 = nn.ReLU()
# self.dropout1 = nn.Dropout(dropout)
#
# self.conv2 = nn.Conv1d(
# n_outputs,
# n_outputs,
# kernel_size,
# stride=stride,
# padding=padding,
# dilation=dilation,
# )
# self.chomp2 = Chomp1d(padding)
# self.norm2 = ckconv.nn.LayerNorm(n_outputs)
# self.relu2 = nn.ReLU()
# self.dropout2 = nn.Dropout(dropout)
#
# self.net = nn.Sequential(
# self.conv1,
# self.chomp1,
# self.norm1,
# self.relu1,
# self.dropout1,
# self.conv2,
# self.chomp2,
# self.norm2,
# self.relu2,
# self.dropout2,
# )
self.downsample = (
nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
)
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
super(TemporalConvNet, self).__init__()
layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i - 1]
out_channels = num_channels[i]
layers += [
TemporalBlock(
in_channels,
out_channels,
kernel_size,
stride=1,
dilation=dilation_size,
padding=(kernel_size - 1) * dilation_size,
dropout=dropout,
)
]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
class AddProblem_TCN(nn.Module):
def __init__(self, input_size, output_size, num_channels, kernel_size, dropout):
super(AddProblem_TCN, self).__init__()
self.tcn = TemporalConvNet(
input_size, num_channels, kernel_size=kernel_size, dropout=dropout
)
self.linear = nn.Linear(num_channels[-1], output_size)
self.init_weights()
def init_weights(self):
self.linear.weight.data.normal_(0, 0.01)
def forward(self, x):
y1 = self.tcn(x)
return self.linear(y1[:, :, -1])
class CopyMemory_TCN(nn.Module):
def __init__(self, input_size, output_size, num_channels, kernel_size, dropout):
super(CopyMemory_TCN, self).__init__()
self.tcn = TemporalConvNet(
input_size, num_channels, kernel_size=kernel_size, dropout=dropout
)
self.linear = nn.Linear(num_channels[-1], output_size)
self.init_weights()
def init_weights(self):
self.linear.weight.data.normal_(0, 0.01)
def forward(self, x):
y1 = self.tcn(x)
return self.linear(y1.transpose(1, 2))
class MNIST_TCN(nn.Module):
def __init__(self, input_size, output_size, num_channels, kernel_size, dropout):
super(MNIST_TCN, self).__init__()
self.tcn = TemporalConvNet(
input_size, num_channels, kernel_size=kernel_size, dropout=dropout
)
self.linear = nn.Linear(num_channels[-1], output_size)
self.init_weights()
def init_weights(self):
self.linear.weight.data.normal_(0, 0.01)
def forward(self, x):
y1 = self.tcn(x)
return self.linear(y1[:, :, -1])
class PTB_TCN(nn.Module):
def __init__(self, input_size,
output_size,
num_channels,
kernel_size,
dropout,
emb_dropout=0.1,
tied_weights=True):
super(PTB_TCN, self).__init__()
self.encoder = nn.Embedding(output_size, input_size)
self.tcn = TemporalConvNet(input_size, num_channels, kernel_size, dropout=dropout)
self.linear = nn.Linear(num_channels[-1], output_size)
if tied_weights:
if num_channels[-1] != input_size:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.linear.weight = self.encoder.weight
print("Weight tied")
self.drop = nn.Dropout(emb_dropout)
self.init_weights()
def init_weights(self):
self.encoder.weight.data.normal_(0, 0.01)
self.linear.weight.data.normal_(0, 0.01)
self.linear.bias.data.fill_(0)
def forward(self, x, return_emb=False):
emb = self.drop(self.encoder(x)).transpose(1, 2) # MB x emb_size x seq_len
y1 = self.tcn(emb) # MB x n_ch x seq_len
out = self.linear(y1.transpose(1, 2)) # MB x seq_len x voc_size
if return_emb:
return out, y1.transpose(1, 2)
return out
| 30.982759
| 90
| 0.553144
|
4a0244a22d6a301d1db6de1d2e15b3f1f36be1d4
| 1,341
|
py
|
Python
|
migrations/versions/27a721daab2e_renamed_email_to_login_in_user_table.py
|
pombredanne/vulncode-db
|
bffd1467df54d98e5271ec977330365d5879b60d
|
[
"Apache-2.0"
] | 592
|
2019-03-05T13:39:57.000Z
|
2022-03-31T14:52:58.000Z
|
migrations/versions/27a721daab2e_renamed_email_to_login_in_user_table.py
|
pombredanne/vulncode-db
|
bffd1467df54d98e5271ec977330365d5879b60d
|
[
"Apache-2.0"
] | 91
|
2019-04-05T20:45:26.000Z
|
2021-12-24T02:10:50.000Z
|
migrations/versions/27a721daab2e_renamed_email_to_login_in_user_table.py
|
pombredanne/vulncode-db
|
bffd1467df54d98e5271ec977330365d5879b60d
|
[
"Apache-2.0"
] | 84
|
2019-03-31T03:55:56.000Z
|
2022-01-03T13:33:44.000Z
|
"""renamed email to login in user table
Revision ID: 27a721daab2e
Revises: 9d370f33f1a0
Create Date: 2020-12-04 16:14:19.390278
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = "27a721daab2e"
down_revision = "9d370f33f1a0"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"user",
"email",
new_column_name="login",
existing_type=sa.String(256),
existing_nullable=False,
)
# op.drop_index("email", table_name="user")
# op.create_unique_constraint("user_login_uniq", "user", ["login"])
# op.drop_column("user", "email")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"user",
"login",
new_column_name="email",
existing_type=sa.String(256),
existing_nullable=False,
)
# op.add_column("user", sa.Column("email", mysql.VARCHAR(length=256), nullable=False))
# op.drop_constraint("user_login_uniq", "user", type_="unique")
# op.create_index("email", "user", ["email"], unique=True)
# op.drop_column("user", "login")
# ### end Alembic commands ###
| 27.9375
| 90
| 0.651752
|
26bbb3eda5251e1d632ccf63035e09516199f4fd
| 930
|
py
|
Python
|
backend/app_api/views/register_views.py
|
harter123/test_dev05
|
9378f558f961bbf069b541ef146b7658219ebb8b
|
[
"Apache-2.0"
] | null | null | null |
backend/app_api/views/register_views.py
|
harter123/test_dev05
|
9378f558f961bbf069b541ef146b7658219ebb8b
|
[
"Apache-2.0"
] | null | null | null |
backend/app_api/views/register_views.py
|
harter123/test_dev05
|
9378f558f961bbf069b541ef146b7658219ebb8b
|
[
"Apache-2.0"
] | null | null | null |
import json
from django.contrib.auth.models import User, Group
from django.contrib import auth
from rest_framework.authtoken.models import Token
from rest_framework.views import APIView
from app_common.utils.response import response_success, Error
class RegisterView(APIView):
# 这个接口的调用不能加认证
authentication_classes = []
def post(self, request):
"""
登录账号,并获取token
"""
username = request.data.get("username", "")
password1 = request.data.get("password1", "")
password2 = request.data.get("password2", "")
if username == '' or password1 == '' or password2 == '':
return response_success(error=Error.USER_OR_PAWD_NULL)
elif password1 != password2:
return response_success(error=Error.PAWD_ERROR)
else:
User.objects.create_user(username=username, password=password1)
return response_success()
| 28.181818
| 75
| 0.667742
|
9a142c4f22915582edecae25e9417a6ab9a76638
| 1,368
|
py
|
Python
|
src/lexicon/managing_negation.py
|
alexandrabenamar/Who-Wins
|
23df54f98286e67aab39e92ac746bccf6916c231
|
[
"MIT"
] | 3
|
2018-04-10T21:52:57.000Z
|
2018-08-22T15:41:58.000Z
|
src/lexicon/managing_negation.py
|
alexandrabenamar/Who-Wins
|
23df54f98286e67aab39e92ac746bccf6916c231
|
[
"MIT"
] | null | null | null |
src/lexicon/managing_negation.py
|
alexandrabenamar/Who-Wins
|
23df54f98286e67aab39e92ac746bccf6916c231
|
[
"MIT"
] | 1
|
2020-05-18T15:46:23.000Z
|
2020-05-18T15:46:23.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 3 22:34:08 2018
@author: mira
"""
import re
#fonction pas encore terminée
#je suppose que le texte a été préalablement splité en "phrases" avec la
#ponctuation comme séparateur. la liste de mots obtenue est donnée en paramètre.
#je suppose pour l'instant que tous les tokens sont positifs au départ. il faudra
#donc intégrer dans la fonction la recherche du mot dans le lexicon
#la fonction inverse donc la polarité des mots qui viennent après une négation
def negation (tokens):
nb_tokens=len(tokens);
pos_neg = []
i=0
while (i < nb_tokens):
if tokens[i].lower() in ["no", "not", "anti"] or re.match(r"(.)*n't", tokens[i]):
pos_neg.append(0)
i = i+1
if tokens[i].lower() in ["even", "really", "very", "exactly"]:
pos_neg.append(0)
i = i+1
for j in range(i, nb_tokens):
pos_neg.append(-1)
i=i+1
else:
for j in range(i, nb_tokens):
pos_neg.append(1)
i=i+1
else:
pos_neg.append(1)
i=i+1
return pos_neg
#test
tokens = ["i" , "don't" , "really" , "agree" , "with" , "what" , "you" , "said" , "about" , "it"]
print(negation(tokens))
| 27.918367
| 97
| 0.542398
|
98eca949db471ec4734445ebd004e22fefff3d76
| 3,502
|
py
|
Python
|
sdk/luminesce/models/data_type.py
|
finbourne/luminesce-sdk-python-preview
|
7af198cfa9c0fbd619272fb90601162fb7db0a67
|
[
"MIT"
] | null | null | null |
sdk/luminesce/models/data_type.py
|
finbourne/luminesce-sdk-python-preview
|
7af198cfa9c0fbd619272fb90601162fb7db0a67
|
[
"MIT"
] | null | null | null |
sdk/luminesce/models/data_type.py
|
finbourne/luminesce-sdk-python-preview
|
7af198cfa9c0fbd619272fb90601162fb7db0a67
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
FINBOURNE Honeycomb Web API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 1.9.129
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from luminesce.configuration import Configuration
class DataType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
BOOLEAN = "Boolean"
INT = "Int"
BIGINT = "BigInt"
DOUBLE = "Double"
DECIMAL = "Decimal"
TEXT = "Text"
DATE = "Date"
DATETIME = "DateTime"
TABLE = "Table"
allowable_values = [BOOLEAN, INT, BIGINT, DOUBLE, DECIMAL, TEXT, DATE, DATETIME, TABLE] # noqa: E501
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
}
attribute_map = {
}
required_map = {
}
def __init__(self, local_vars_configuration=None): # noqa: E501
"""DataType - a model defined in OpenAPI"
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self.discriminator = None
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DataType):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, DataType):
return True
return self.to_dict() != other.to_dict()
| 27.359375
| 105
| 0.567105
|
94d92d3a7ea432c3bf9405e29268ee8f4658af90
| 13,970
|
py
|
Python
|
neutron/scheduler/dhcp_agent_scheduler.py
|
1pintbeer/neutron
|
f5a827c2be06f24a1f8025f120f16c12eb1b1f55
|
[
"Apache-2.0"
] | null | null | null |
neutron/scheduler/dhcp_agent_scheduler.py
|
1pintbeer/neutron
|
f5a827c2be06f24a1f8025f120f16c12eb1b1f55
|
[
"Apache-2.0"
] | null | null | null |
neutron/scheduler/dhcp_agent_scheduler.py
|
1pintbeer/neutron
|
f5a827c2be06f24a1f8025f120f16c12eb1b1f55
|
[
"Apache-2.0"
] | 1
|
2017-01-10T19:07:55.000Z
|
2017-01-10T19:07:55.000Z
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from operator import itemgetter
from neutron_lib.api.definitions import availability_zone as az_def
from neutron_lib import constants
from neutron_lib.db import api as db_api
from neutron_lib.objects import exceptions
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.common import utils as agent_utils
from neutron.objects import agent as agent_obj
from neutron.objects import network
from neutron.scheduler import base_resource_filter
from neutron.scheduler import base_scheduler
LOG = logging.getLogger(__name__)
class AutoScheduler(object):
def auto_schedule_networks(self, plugin, context, host):
"""Schedule non-hosted networks to the DHCP agent on the specified
host.
"""
agents_per_network = cfg.CONF.dhcp_agents_per_network
# a list of (agent, net_ids) tuples
bindings_to_add = []
# NOTE(ralonsoh) use writer manager to call get_network. See
# https://review.opendev.org/#/c/483518/. Must be changed to READER.
with db_api.CONTEXT_WRITER.using(context):
fields = ['network_id', 'enable_dhcp', 'segment_id']
subnets = plugin.get_subnets(context, fields=fields)
net_ids = {}
net_segment_ids = collections.defaultdict(set)
for s in subnets:
if s['enable_dhcp']:
net_segment_ids[s['network_id']].add(s.get('segment_id'))
for network_id, segment_ids in net_segment_ids.items():
is_routed_network = any(segment_ids)
net_ids[network_id] = is_routed_network
if not net_ids:
LOG.debug('No non-hosted networks')
return False
dhcp_agents = agent_obj.Agent.get_objects(
context, agent_type=constants.AGENT_TYPE_DHCP,
host=host, admin_state_up=True)
segment_host_mapping = network.SegmentHostMapping.get_objects(
context, host=host)
segments_on_host = {s.segment_id for s in segment_host_mapping}
for dhcp_agent in dhcp_agents:
if agent_utils.is_agent_down(dhcp_agent.heartbeat_timestamp):
LOG.warning('DHCP agent %s is not active', dhcp_agent.id)
continue
for net_id, is_routed_network in net_ids.items():
agents = plugin.get_dhcp_agents_hosting_networks(
context, [net_id])
segments_on_network = net_segment_ids[net_id]
if is_routed_network:
if len(segments_on_network & segments_on_host) == 0:
continue
else:
if len(agents) >= agents_per_network:
continue
if any(dhcp_agent.id == agent.id for agent in agents):
continue
net = plugin.get_network(context, net_id)
az_hints = (net.get(az_def.AZ_HINTS) or
cfg.CONF.default_availability_zones)
if (az_hints and
dhcp_agent['availability_zone'] not in az_hints):
continue
bindings_to_add.append((dhcp_agent, net_id))
# do it outside transaction so particular scheduling results don't
# make other to fail
debug_data = []
for agent, net_id in bindings_to_add:
self.resource_filter.bind(context, [agent], net_id)
debug_data.append('(%s, %s, %s)' % (agent['agent_type'],
agent['host'], net_id))
LOG.debug('Resources bound (agent type, host, resource id): %s',
', '.join(debug_data))
return True
class ChanceScheduler(base_scheduler.BaseChanceScheduler, AutoScheduler):
def __init__(self):
super(ChanceScheduler, self).__init__(DhcpFilter())
class WeightScheduler(base_scheduler.BaseWeightScheduler, AutoScheduler):
def __init__(self):
super(WeightScheduler, self).__init__(DhcpFilter())
class AZAwareWeightScheduler(WeightScheduler):
def select(self, plugin, context, resource_hostable_agents,
resource_hosted_agents, num_agents_needed):
"""AZ aware scheduling
If the network has multiple AZs, agents are scheduled as
follows:
- select AZ with least agents scheduled for the network
- for AZs with same amount of scheduled agents, the AZ which
contains least weight agent will be used first
- choose agent in the AZ with WeightScheduler
"""
# The dict to record the agents in each AZ, the record will be sorted
# according to the weight of agent. So that the agent with less weight
# will be used first.
hostable_az_agents = collections.defaultdict(list)
# The dict to record the number of agents in each AZ. When the number
# of agents in each AZ is the same and num_agents_needed is less than
# the number of AZs, we want to select agents with less weight.
# Use an OrderedDict here, so that the AZ with least weight agent
# will be recorded first in the case described above. And, as a result,
# the agent with least weight will be used first.
num_az_agents = collections.OrderedDict()
# resource_hostable_agents should be a list with agents in the order of
# their weight.
resource_hostable_agents = (
super(AZAwareWeightScheduler, self).select(
plugin, context, resource_hostable_agents,
resource_hosted_agents, len(resource_hostable_agents)))
for agent in resource_hostable_agents:
az_agent = agent['availability_zone']
hostable_az_agents[az_agent].append(agent)
if az_agent not in num_az_agents:
num_az_agents[az_agent] = 0
if num_agents_needed <= 0:
return []
for agent in resource_hosted_agents:
az_agent = agent['availability_zone']
if az_agent in num_az_agents:
num_az_agents[az_agent] += 1
chosen_agents = []
while num_agents_needed > 0:
# 'min' will stably output the first min value in the list.
select_az = min(num_az_agents.items(), key=itemgetter(1))[0]
# Select the agent in AZ with least weight.
select_agent = hostable_az_agents[select_az][0]
chosen_agents.append(select_agent)
# Update the AZ-agents records.
del hostable_az_agents[select_az][0]
if not hostable_az_agents[select_az]:
del num_az_agents[select_az]
else:
num_az_agents[select_az] += 1
num_agents_needed -= 1
return chosen_agents
class DhcpFilter(base_resource_filter.BaseResourceFilter):
def bind(self, context, agents, network_id):
"""Bind the network to the agents."""
# customize the bind logic
bound_agents = agents[:]
for agent in agents:
# saving agent_id to use it after rollback to avoid
# DetachedInstanceError
agent_id = agent.id
try:
network.NetworkDhcpAgentBinding(
context, dhcp_agent_id=agent_id,
network_id=network_id).create()
except exceptions.NeutronDbObjectDuplicateEntry:
# it's totally ok, someone just did our job!
bound_agents.remove(agent)
LOG.info('Agent %s already present', agent_id)
LOG.debug('Network %(network_id)s is scheduled to be '
'hosted by DHCP agent %(agent_id)s',
{'network_id': network_id,
'agent_id': agent_id})
super(DhcpFilter, self).bind(context, bound_agents, network_id)
def filter_agents(self, plugin, context, network):
"""Return the agents that can host the network.
This function returns a dictionary which has 3 keys.
n_agents: The number of agents should be scheduled. If n_agents=0,
all networks are already scheduled or no more agent can host the
network.
hostable_agents: A list of agents which can host the network.
hosted_agents: A list of agents which already hosts the network.
"""
agents_dict = self._get_network_hostable_dhcp_agents(
plugin, context, network)
if not agents_dict['hostable_agents'] or agents_dict['n_agents'] <= 0:
return {'n_agents': 0, 'hostable_agents': [],
'hosted_agents': agents_dict['hosted_agents']}
return agents_dict
def _filter_agents_with_network_access(self, plugin, context,
network, hostable_agents):
if 'candidate_hosts' in network:
hostable_dhcp_hosts = network['candidate_hosts']
else:
hostable_dhcp_hosts = plugin.filter_hosts_with_network_access(
context, network['id'],
[agent['host'] for agent in hostable_agents])
reachable_agents = [agent for agent in hostable_agents
if agent['host'] in hostable_dhcp_hosts]
return reachable_agents
def _get_dhcp_agents_hosting_network(self, plugin, context, network):
"""Return dhcp agents hosting the given network or None if a given
network is already hosted by enough number of agents.
"""
agents_per_network = cfg.CONF.dhcp_agents_per_network
# TODO(gongysh) don't schedule the networks with only
# subnets whose enable_dhcp is false
with db_api.CONTEXT_READER.using(context):
network_hosted_agents = plugin.get_dhcp_agents_hosting_networks(
context, [network['id']], hosts=network.get('candidate_hosts'))
if len(network_hosted_agents) >= agents_per_network:
LOG.debug('Network %s is already hosted by enough agents.',
network['id'])
return
return network_hosted_agents
def _get_active_agents(self, plugin, context, az_hints):
"""Return a list of active dhcp agents."""
with db_api.CONTEXT_READER.using(context):
filters = {'agent_type': [constants.AGENT_TYPE_DHCP],
'admin_state_up': [True]}
if az_hints:
filters['availability_zone'] = az_hints
active_dhcp_agents = plugin.get_agent_objects(
context, filters=filters)
if not active_dhcp_agents:
LOG.warning('No more DHCP agents')
return []
return active_dhcp_agents
def _get_network_hostable_dhcp_agents(self, plugin, context, network):
"""Provide information on hostable DHCP agents for network.
The returned value includes the number of agents that will actually
host the given network, a list of DHCP agents that can host the given
network, and a list of DHCP agents currently hosting the network.
"""
hosted_agents = self._get_dhcp_agents_hosting_network(plugin,
context, network)
if hosted_agents is None:
return {'n_agents': 0, 'hostable_agents': [], 'hosted_agents': []}
n_agents = cfg.CONF.dhcp_agents_per_network - len(hosted_agents)
az_hints = (network.get(az_def.AZ_HINTS) or
cfg.CONF.default_availability_zones)
active_dhcp_agents = self._get_active_agents(plugin, context, az_hints)
hosted_agent_ids = [agent['id'] for agent in hosted_agents]
if not active_dhcp_agents:
return {'n_agents': 0, 'hostable_agents': [],
'hosted_agents': hosted_agents}
hostable_dhcp_agents = [
agent for agent in active_dhcp_agents
if agent.id not in hosted_agent_ids and plugin.is_eligible_agent(
context, True, agent)]
hostable_dhcp_agents = self._filter_agents_with_network_access(
plugin, context, network, hostable_dhcp_agents)
if not hostable_dhcp_agents:
result = {'n_agents': 0, 'hostable_agents': [],
'hosted_agents': hosted_agents}
else:
result = {'n_agents': min(len(hostable_dhcp_agents), n_agents),
'hostable_agents': hostable_dhcp_agents,
'hosted_agents': hosted_agents}
hostable_agents_ids = [a['id'] for a in result['hostable_agents']]
hosted_agents_ids = [a['id'] for a in result['hosted_agents']]
LOG.debug('Network hostable DHCP agents. Network: %(network)s, '
'hostable agents: %(hostable_agents)s, hosted agents: '
'%(hosted_agents)s', {'network': network['id'],
'hostable_agents': hostable_agents_ids,
'hosted_agents': hosted_agents_ids})
return result
| 46.722408
| 79
| 0.618468
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.